diff --git a/.librarian/state.yaml b/.librarian/state.yaml index 0d0a95f2f244..4a2631a0d9a7 100644 --- a/.librarian/state.yaml +++ b/.librarian/state.yaml @@ -2721,7 +2721,7 @@ libraries: tag_format: '{id}-v{version}' - id: google-cloud-oracledatabase version: 0.2.0 - last_generated_commit: 3322511885371d2b2253f209ccc3aa60d4100cfd + last_generated_commit: 55319b058f8a0e46bbeeff30e374e4b1f081f494 apis: - path: google/cloud/oracledatabase/v1 service_config: oracledatabase_v1.yaml @@ -2761,7 +2761,7 @@ libraries: tag_format: '{id}-v{version}' - id: google-cloud-org-policy version: 1.15.0 - last_generated_commit: d300b151a973ce0425ae4ad07b3de957ca31bec6 + last_generated_commit: 55319b058f8a0e46bbeeff30e374e4b1f081f494 apis: - path: google/cloud/orgpolicy/v1 service_config: "" @@ -4165,7 +4165,7 @@ libraries: tag_format: '{id}-v{version}' - id: google-maps-places version: 0.3.0 - last_generated_commit: 3322511885371d2b2253f209ccc3aa60d4100cfd + last_generated_commit: 55319b058f8a0e46bbeeff30e374e4b1f081f494 apis: - path: google/maps/places/v1 service_config: places_v1.yaml @@ -4617,7 +4617,7 @@ libraries: tag_format: '{id}-v{version}' - id: grpc-google-iam-v1 version: 0.14.2 - last_generated_commit: d300b151a973ce0425ae4ad07b3de957ca31bec6 + last_generated_commit: 55319b058f8a0e46bbeeff30e374e4b1f081f494 apis: - path: google/iam/v1 service_config: iam_meta_api.yaml diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase/__init__.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase/__init__.py index eea4ed9d9e62..aaf85b8b8029 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase/__init__.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase/__init__.py @@ -34,9 +34,12 @@ AutonomousDatabaseStandbySummary, DatabaseConnectionStringProfile, DBWorkload, + EncryptionKey, + EncryptionKeyHistoryEntry, GenerateType, OperationsInsightsState, ScheduledOperationDetails, + SourceConfig, State, ) from google.cloud.oracledatabase_v1.types.autonomous_database_character_set import ( @@ -49,10 +52,53 @@ from google.cloud.oracledatabase_v1.types.autonomous_db_version import ( AutonomousDbVersion, ) -from google.cloud.oracledatabase_v1.types.common import CustomerContact +from google.cloud.oracledatabase_v1.types.common import ( + ComputeModel, + CustomerContact, + DataCollectionOptionsCommon, + IdentityConnector, +) +from google.cloud.oracledatabase_v1.types.database import ( + Database, + DatabaseProperties, + DbBackupConfig, + GetDatabaseRequest, + ListDatabasesRequest, + ListDatabasesResponse, +) +from google.cloud.oracledatabase_v1.types.database_character_set import ( + DatabaseCharacterSet, + ListDatabaseCharacterSetsRequest, + ListDatabaseCharacterSetsResponse, +) from google.cloud.oracledatabase_v1.types.db_node import DbNode, DbNodeProperties from google.cloud.oracledatabase_v1.types.db_server import DbServer, DbServerProperties +from google.cloud.oracledatabase_v1.types.db_system import ( + CreateDbSystemRequest, + DataCollectionOptionsDbSystem, + DbHome, + DbSystem, + DbSystemOptions, + DbSystemProperties, + DeleteDbSystemRequest, + GetDbSystemRequest, + ListDbSystemsRequest, + ListDbSystemsResponse, +) +from google.cloud.oracledatabase_v1.types.db_system_initial_storage_size import ( + DbSystemInitialStorageSize, + DbSystemInitialStorageSizeProperties, + ListDbSystemInitialStorageSizesRequest, + ListDbSystemInitialStorageSizesResponse, + StorageSizeDetails, +) from google.cloud.oracledatabase_v1.types.db_system_shape import DbSystemShape +from google.cloud.oracledatabase_v1.types.db_version import ( + DbVersion, + DbVersionProperties, + ListDbVersionsRequest, + ListDbVersionsResponse, +) from google.cloud.oracledatabase_v1.types.entitlement import ( CloudAccountDetails, Entitlement, @@ -62,20 +108,60 @@ CloudExadataInfrastructureProperties, MaintenanceWindow, ) +from google.cloud.oracledatabase_v1.types.exadb_vm_cluster import ( + ExadbVmCluster, + ExadbVmClusterProperties, + ExadbVmClusterStorageDetails, +) +from google.cloud.oracledatabase_v1.types.exascale_db_storage_vault import ( + CreateExascaleDbStorageVaultRequest, + DeleteExascaleDbStorageVaultRequest, + ExascaleDbStorageDetails, + ExascaleDbStorageVault, + ExascaleDbStorageVaultProperties, + GetExascaleDbStorageVaultRequest, + ListExascaleDbStorageVaultsRequest, + ListExascaleDbStorageVaultsResponse, +) from google.cloud.oracledatabase_v1.types.gi_version import GiVersion from google.cloud.oracledatabase_v1.types.location_metadata import LocationMetadata +from google.cloud.oracledatabase_v1.types.minor_version import ( + ListMinorVersionsRequest, + ListMinorVersionsResponse, + MinorVersion, +) +from google.cloud.oracledatabase_v1.types.odb_network import ( + CreateOdbNetworkRequest, + DeleteOdbNetworkRequest, + GetOdbNetworkRequest, + ListOdbNetworksRequest, + ListOdbNetworksResponse, + OdbNetwork, +) +from google.cloud.oracledatabase_v1.types.odb_subnet import ( + CreateOdbSubnetRequest, + DeleteOdbSubnetRequest, + GetOdbSubnetRequest, + ListOdbSubnetsRequest, + ListOdbSubnetsResponse, + OdbSubnet, +) from google.cloud.oracledatabase_v1.types.oracledatabase import ( CreateAutonomousDatabaseRequest, CreateCloudExadataInfrastructureRequest, CreateCloudVmClusterRequest, + CreateExadbVmClusterRequest, DeleteAutonomousDatabaseRequest, DeleteCloudExadataInfrastructureRequest, DeleteCloudVmClusterRequest, + DeleteExadbVmClusterRequest, + FailoverAutonomousDatabaseRequest, GenerateAutonomousDatabaseWalletRequest, GenerateAutonomousDatabaseWalletResponse, GetAutonomousDatabaseRequest, GetCloudExadataInfrastructureRequest, GetCloudVmClusterRequest, + GetExadbVmClusterRequest, ListAutonomousDatabaseBackupsRequest, ListAutonomousDatabaseBackupsResponse, ListAutonomousDatabaseCharacterSetsRequest, @@ -96,13 +182,29 @@ ListDbSystemShapesResponse, ListEntitlementsRequest, ListEntitlementsResponse, + ListExadbVmClustersRequest, + ListExadbVmClustersResponse, ListGiVersionsRequest, ListGiVersionsResponse, OperationMetadata, + RemoveVirtualMachineExadbVmClusterRequest, RestartAutonomousDatabaseRequest, RestoreAutonomousDatabaseRequest, StartAutonomousDatabaseRequest, StopAutonomousDatabaseRequest, + SwitchoverAutonomousDatabaseRequest, + UpdateAutonomousDatabaseRequest, + UpdateExadbVmClusterRequest, +) +from google.cloud.oracledatabase_v1.types.pluggable_database import ( + DatabaseManagementConfig, + GetPluggableDatabaseRequest, + ListPluggableDatabasesRequest, + ListPluggableDatabasesResponse, + PluggableDatabase, + PluggableDatabaseConnectionStrings, + PluggableDatabaseNodeLevelDetails, + PluggableDatabaseProperties, ) from google.cloud.oracledatabase_v1.types.vm_cluster import ( CloudVmCluster, @@ -121,7 +223,10 @@ "AutonomousDatabaseProperties", "AutonomousDatabaseStandbySummary", "DatabaseConnectionStringProfile", + "EncryptionKey", + "EncryptionKeyHistoryEntry", "ScheduledOperationDetails", + "SourceConfig", "DBWorkload", "GenerateType", "OperationsInsightsState", @@ -131,29 +236,90 @@ "AutonomousDatabaseBackupProperties", "AutonomousDbVersion", "CustomerContact", + "DataCollectionOptionsCommon", + "IdentityConnector", + "ComputeModel", + "Database", + "DatabaseProperties", + "DbBackupConfig", + "GetDatabaseRequest", + "ListDatabasesRequest", + "ListDatabasesResponse", + "DatabaseCharacterSet", + "ListDatabaseCharacterSetsRequest", + "ListDatabaseCharacterSetsResponse", "DbNode", "DbNodeProperties", "DbServer", "DbServerProperties", + "CreateDbSystemRequest", + "DataCollectionOptionsDbSystem", + "DbHome", + "DbSystem", + "DbSystemOptions", + "DbSystemProperties", + "DeleteDbSystemRequest", + "GetDbSystemRequest", + "ListDbSystemsRequest", + "ListDbSystemsResponse", + "DbSystemInitialStorageSize", + "DbSystemInitialStorageSizeProperties", + "ListDbSystemInitialStorageSizesRequest", + "ListDbSystemInitialStorageSizesResponse", + "StorageSizeDetails", "DbSystemShape", + "DbVersion", + "DbVersionProperties", + "ListDbVersionsRequest", + "ListDbVersionsResponse", "CloudAccountDetails", "Entitlement", "CloudExadataInfrastructure", "CloudExadataInfrastructureProperties", "MaintenanceWindow", + "ExadbVmCluster", + "ExadbVmClusterProperties", + "ExadbVmClusterStorageDetails", + "CreateExascaleDbStorageVaultRequest", + "DeleteExascaleDbStorageVaultRequest", + "ExascaleDbStorageDetails", + "ExascaleDbStorageVault", + "ExascaleDbStorageVaultProperties", + "GetExascaleDbStorageVaultRequest", + "ListExascaleDbStorageVaultsRequest", + "ListExascaleDbStorageVaultsResponse", "GiVersion", "LocationMetadata", + "ListMinorVersionsRequest", + "ListMinorVersionsResponse", + "MinorVersion", + "CreateOdbNetworkRequest", + "DeleteOdbNetworkRequest", + "GetOdbNetworkRequest", + "ListOdbNetworksRequest", + "ListOdbNetworksResponse", + "OdbNetwork", + "CreateOdbSubnetRequest", + "DeleteOdbSubnetRequest", + "GetOdbSubnetRequest", + "ListOdbSubnetsRequest", + "ListOdbSubnetsResponse", + "OdbSubnet", "CreateAutonomousDatabaseRequest", "CreateCloudExadataInfrastructureRequest", "CreateCloudVmClusterRequest", + "CreateExadbVmClusterRequest", "DeleteAutonomousDatabaseRequest", "DeleteCloudExadataInfrastructureRequest", "DeleteCloudVmClusterRequest", + "DeleteExadbVmClusterRequest", + "FailoverAutonomousDatabaseRequest", "GenerateAutonomousDatabaseWalletRequest", "GenerateAutonomousDatabaseWalletResponse", "GetAutonomousDatabaseRequest", "GetCloudExadataInfrastructureRequest", "GetCloudVmClusterRequest", + "GetExadbVmClusterRequest", "ListAutonomousDatabaseBackupsRequest", "ListAutonomousDatabaseBackupsResponse", "ListAutonomousDatabaseCharacterSetsRequest", @@ -174,13 +340,27 @@ "ListDbSystemShapesResponse", "ListEntitlementsRequest", "ListEntitlementsResponse", + "ListExadbVmClustersRequest", + "ListExadbVmClustersResponse", "ListGiVersionsRequest", "ListGiVersionsResponse", "OperationMetadata", + "RemoveVirtualMachineExadbVmClusterRequest", "RestartAutonomousDatabaseRequest", "RestoreAutonomousDatabaseRequest", "StartAutonomousDatabaseRequest", "StopAutonomousDatabaseRequest", + "SwitchoverAutonomousDatabaseRequest", + "UpdateAutonomousDatabaseRequest", + "UpdateExadbVmClusterRequest", + "DatabaseManagementConfig", + "GetPluggableDatabaseRequest", + "ListPluggableDatabasesRequest", + "ListPluggableDatabasesResponse", + "PluggableDatabase", + "PluggableDatabaseConnectionStrings", + "PluggableDatabaseNodeLevelDetails", + "PluggableDatabaseProperties", "CloudVmCluster", "CloudVmClusterProperties", "DataCollectionOptions", diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/__init__.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/__init__.py index c435ba44898b..c680a86254c0 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/__init__.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/__init__.py @@ -29,9 +29,12 @@ AutonomousDatabaseStandbySummary, DatabaseConnectionStringProfile, DBWorkload, + EncryptionKey, + EncryptionKeyHistoryEntry, GenerateType, OperationsInsightsState, ScheduledOperationDetails, + SourceConfig, State, ) from .types.autonomous_database_character_set import AutonomousDatabaseCharacterSet @@ -40,30 +43,113 @@ AutonomousDatabaseBackupProperties, ) from .types.autonomous_db_version import AutonomousDbVersion -from .types.common import CustomerContact +from .types.common import ( + ComputeModel, + CustomerContact, + DataCollectionOptionsCommon, + IdentityConnector, +) +from .types.database import ( + Database, + DatabaseProperties, + DbBackupConfig, + GetDatabaseRequest, + ListDatabasesRequest, + ListDatabasesResponse, +) +from .types.database_character_set import ( + DatabaseCharacterSet, + ListDatabaseCharacterSetsRequest, + ListDatabaseCharacterSetsResponse, +) from .types.db_node import DbNode, DbNodeProperties from .types.db_server import DbServer, DbServerProperties +from .types.db_system import ( + CreateDbSystemRequest, + DataCollectionOptionsDbSystem, + DbHome, + DbSystem, + DbSystemOptions, + DbSystemProperties, + DeleteDbSystemRequest, + GetDbSystemRequest, + ListDbSystemsRequest, + ListDbSystemsResponse, +) +from .types.db_system_initial_storage_size import ( + DbSystemInitialStorageSize, + DbSystemInitialStorageSizeProperties, + ListDbSystemInitialStorageSizesRequest, + ListDbSystemInitialStorageSizesResponse, + StorageSizeDetails, +) from .types.db_system_shape import DbSystemShape +from .types.db_version import ( + DbVersion, + DbVersionProperties, + ListDbVersionsRequest, + ListDbVersionsResponse, +) from .types.entitlement import CloudAccountDetails, Entitlement from .types.exadata_infra import ( CloudExadataInfrastructure, CloudExadataInfrastructureProperties, MaintenanceWindow, ) +from .types.exadb_vm_cluster import ( + ExadbVmCluster, + ExadbVmClusterProperties, + ExadbVmClusterStorageDetails, +) +from .types.exascale_db_storage_vault import ( + CreateExascaleDbStorageVaultRequest, + DeleteExascaleDbStorageVaultRequest, + ExascaleDbStorageDetails, + ExascaleDbStorageVault, + ExascaleDbStorageVaultProperties, + GetExascaleDbStorageVaultRequest, + ListExascaleDbStorageVaultsRequest, + ListExascaleDbStorageVaultsResponse, +) from .types.gi_version import GiVersion from .types.location_metadata import LocationMetadata +from .types.minor_version import ( + ListMinorVersionsRequest, + ListMinorVersionsResponse, + MinorVersion, +) +from .types.odb_network import ( + CreateOdbNetworkRequest, + DeleteOdbNetworkRequest, + GetOdbNetworkRequest, + ListOdbNetworksRequest, + ListOdbNetworksResponse, + OdbNetwork, +) +from .types.odb_subnet import ( + CreateOdbSubnetRequest, + DeleteOdbSubnetRequest, + GetOdbSubnetRequest, + ListOdbSubnetsRequest, + ListOdbSubnetsResponse, + OdbSubnet, +) from .types.oracledatabase import ( CreateAutonomousDatabaseRequest, CreateCloudExadataInfrastructureRequest, CreateCloudVmClusterRequest, + CreateExadbVmClusterRequest, DeleteAutonomousDatabaseRequest, DeleteCloudExadataInfrastructureRequest, DeleteCloudVmClusterRequest, + DeleteExadbVmClusterRequest, + FailoverAutonomousDatabaseRequest, GenerateAutonomousDatabaseWalletRequest, GenerateAutonomousDatabaseWalletResponse, GetAutonomousDatabaseRequest, GetCloudExadataInfrastructureRequest, GetCloudVmClusterRequest, + GetExadbVmClusterRequest, ListAutonomousDatabaseBackupsRequest, ListAutonomousDatabaseBackupsResponse, ListAutonomousDatabaseCharacterSetsRequest, @@ -84,13 +170,29 @@ ListDbSystemShapesResponse, ListEntitlementsRequest, ListEntitlementsResponse, + ListExadbVmClustersRequest, + ListExadbVmClustersResponse, ListGiVersionsRequest, ListGiVersionsResponse, OperationMetadata, + RemoveVirtualMachineExadbVmClusterRequest, RestartAutonomousDatabaseRequest, RestoreAutonomousDatabaseRequest, StartAutonomousDatabaseRequest, StopAutonomousDatabaseRequest, + SwitchoverAutonomousDatabaseRequest, + UpdateAutonomousDatabaseRequest, + UpdateExadbVmClusterRequest, +) +from .types.pluggable_database import ( + DatabaseManagementConfig, + GetPluggableDatabaseRequest, + ListPluggableDatabasesRequest, + ListPluggableDatabasesResponse, + PluggableDatabase, + PluggableDatabaseConnectionStrings, + PluggableDatabaseNodeLevelDetails, + PluggableDatabaseProperties, ) from .types.vm_cluster import ( CloudVmCluster, @@ -116,29 +218,72 @@ "CloudExadataInfrastructureProperties", "CloudVmCluster", "CloudVmClusterProperties", + "ComputeModel", "CreateAutonomousDatabaseRequest", "CreateCloudExadataInfrastructureRequest", "CreateCloudVmClusterRequest", + "CreateDbSystemRequest", + "CreateExadbVmClusterRequest", + "CreateExascaleDbStorageVaultRequest", + "CreateOdbNetworkRequest", + "CreateOdbSubnetRequest", "CustomerContact", "DBWorkload", "DataCollectionOptions", + "DataCollectionOptionsCommon", + "DataCollectionOptionsDbSystem", + "Database", + "DatabaseCharacterSet", "DatabaseConnectionStringProfile", + "DatabaseManagementConfig", + "DatabaseProperties", + "DbBackupConfig", + "DbHome", "DbNode", "DbNodeProperties", "DbServer", "DbServerProperties", + "DbSystem", + "DbSystemInitialStorageSize", + "DbSystemInitialStorageSizeProperties", + "DbSystemOptions", + "DbSystemProperties", "DbSystemShape", + "DbVersion", + "DbVersionProperties", "DeleteAutonomousDatabaseRequest", "DeleteCloudExadataInfrastructureRequest", "DeleteCloudVmClusterRequest", + "DeleteDbSystemRequest", + "DeleteExadbVmClusterRequest", + "DeleteExascaleDbStorageVaultRequest", + "DeleteOdbNetworkRequest", + "DeleteOdbSubnetRequest", + "EncryptionKey", + "EncryptionKeyHistoryEntry", "Entitlement", + "ExadbVmCluster", + "ExadbVmClusterProperties", + "ExadbVmClusterStorageDetails", + "ExascaleDbStorageDetails", + "ExascaleDbStorageVault", + "ExascaleDbStorageVaultProperties", + "FailoverAutonomousDatabaseRequest", "GenerateAutonomousDatabaseWalletRequest", "GenerateAutonomousDatabaseWalletResponse", "GenerateType", "GetAutonomousDatabaseRequest", "GetCloudExadataInfrastructureRequest", "GetCloudVmClusterRequest", + "GetDatabaseRequest", + "GetDbSystemRequest", + "GetExadbVmClusterRequest", + "GetExascaleDbStorageVaultRequest", + "GetOdbNetworkRequest", + "GetOdbSubnetRequest", + "GetPluggableDatabaseRequest", "GiVersion", + "IdentityConnector", "ListAutonomousDatabaseBackupsRequest", "ListAutonomousDatabaseBackupsResponse", "ListAutonomousDatabaseCharacterSetsRequest", @@ -151,25 +296,60 @@ "ListCloudExadataInfrastructuresResponse", "ListCloudVmClustersRequest", "ListCloudVmClustersResponse", + "ListDatabaseCharacterSetsRequest", + "ListDatabaseCharacterSetsResponse", + "ListDatabasesRequest", + "ListDatabasesResponse", "ListDbNodesRequest", "ListDbNodesResponse", "ListDbServersRequest", "ListDbServersResponse", + "ListDbSystemInitialStorageSizesRequest", + "ListDbSystemInitialStorageSizesResponse", "ListDbSystemShapesRequest", "ListDbSystemShapesResponse", + "ListDbSystemsRequest", + "ListDbSystemsResponse", + "ListDbVersionsRequest", + "ListDbVersionsResponse", "ListEntitlementsRequest", "ListEntitlementsResponse", + "ListExadbVmClustersRequest", + "ListExadbVmClustersResponse", + "ListExascaleDbStorageVaultsRequest", + "ListExascaleDbStorageVaultsResponse", "ListGiVersionsRequest", "ListGiVersionsResponse", + "ListMinorVersionsRequest", + "ListMinorVersionsResponse", + "ListOdbNetworksRequest", + "ListOdbNetworksResponse", + "ListOdbSubnetsRequest", + "ListOdbSubnetsResponse", + "ListPluggableDatabasesRequest", + "ListPluggableDatabasesResponse", "LocationMetadata", "MaintenanceWindow", + "MinorVersion", + "OdbNetwork", + "OdbSubnet", "OperationMetadata", "OperationsInsightsState", "OracleDatabaseClient", + "PluggableDatabase", + "PluggableDatabaseConnectionStrings", + "PluggableDatabaseNodeLevelDetails", + "PluggableDatabaseProperties", + "RemoveVirtualMachineExadbVmClusterRequest", "RestartAutonomousDatabaseRequest", "RestoreAutonomousDatabaseRequest", "ScheduledOperationDetails", + "SourceConfig", "StartAutonomousDatabaseRequest", "State", "StopAutonomousDatabaseRequest", + "StorageSizeDetails", + "SwitchoverAutonomousDatabaseRequest", + "UpdateAutonomousDatabaseRequest", + "UpdateExadbVmClusterRequest", ) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/gapic_metadata.json b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/gapic_metadata.json index c7192db26aa5..ce858f41ca92 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/gapic_metadata.json +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/gapic_metadata.json @@ -25,6 +25,31 @@ "create_cloud_vm_cluster" ] }, + "CreateDbSystem": { + "methods": [ + "create_db_system" + ] + }, + "CreateExadbVmCluster": { + "methods": [ + "create_exadb_vm_cluster" + ] + }, + "CreateExascaleDbStorageVault": { + "methods": [ + "create_exascale_db_storage_vault" + ] + }, + "CreateOdbNetwork": { + "methods": [ + "create_odb_network" + ] + }, + "CreateOdbSubnet": { + "methods": [ + "create_odb_subnet" + ] + }, "DeleteAutonomousDatabase": { "methods": [ "delete_autonomous_database" @@ -40,6 +65,36 @@ "delete_cloud_vm_cluster" ] }, + "DeleteDbSystem": { + "methods": [ + "delete_db_system" + ] + }, + "DeleteExadbVmCluster": { + "methods": [ + "delete_exadb_vm_cluster" + ] + }, + "DeleteExascaleDbStorageVault": { + "methods": [ + "delete_exascale_db_storage_vault" + ] + }, + "DeleteOdbNetwork": { + "methods": [ + "delete_odb_network" + ] + }, + "DeleteOdbSubnet": { + "methods": [ + "delete_odb_subnet" + ] + }, + "FailoverAutonomousDatabase": { + "methods": [ + "failover_autonomous_database" + ] + }, "GenerateAutonomousDatabaseWallet": { "methods": [ "generate_autonomous_database_wallet" @@ -60,6 +115,41 @@ "get_cloud_vm_cluster" ] }, + "GetDatabase": { + "methods": [ + "get_database" + ] + }, + "GetDbSystem": { + "methods": [ + "get_db_system" + ] + }, + "GetExadbVmCluster": { + "methods": [ + "get_exadb_vm_cluster" + ] + }, + "GetExascaleDbStorageVault": { + "methods": [ + "get_exascale_db_storage_vault" + ] + }, + "GetOdbNetwork": { + "methods": [ + "get_odb_network" + ] + }, + "GetOdbSubnet": { + "methods": [ + "get_odb_subnet" + ] + }, + "GetPluggableDatabase": { + "methods": [ + "get_pluggable_database" + ] + }, "ListAutonomousDatabaseBackups": { "methods": [ "list_autonomous_database_backups" @@ -90,6 +180,16 @@ "list_cloud_vm_clusters" ] }, + "ListDatabaseCharacterSets": { + "methods": [ + "list_database_character_sets" + ] + }, + "ListDatabases": { + "methods": [ + "list_databases" + ] + }, "ListDbNodes": { "methods": [ "list_db_nodes" @@ -100,21 +200,71 @@ "list_db_servers" ] }, + "ListDbSystemInitialStorageSizes": { + "methods": [ + "list_db_system_initial_storage_sizes" + ] + }, "ListDbSystemShapes": { "methods": [ "list_db_system_shapes" ] }, + "ListDbSystems": { + "methods": [ + "list_db_systems" + ] + }, + "ListDbVersions": { + "methods": [ + "list_db_versions" + ] + }, "ListEntitlements": { "methods": [ "list_entitlements" ] }, + "ListExadbVmClusters": { + "methods": [ + "list_exadb_vm_clusters" + ] + }, + "ListExascaleDbStorageVaults": { + "methods": [ + "list_exascale_db_storage_vaults" + ] + }, "ListGiVersions": { "methods": [ "list_gi_versions" ] }, + "ListMinorVersions": { + "methods": [ + "list_minor_versions" + ] + }, + "ListOdbNetworks": { + "methods": [ + "list_odb_networks" + ] + }, + "ListOdbSubnets": { + "methods": [ + "list_odb_subnets" + ] + }, + "ListPluggableDatabases": { + "methods": [ + "list_pluggable_databases" + ] + }, + "RemoveVirtualMachineExadbVmCluster": { + "methods": [ + "remove_virtual_machine_exadb_vm_cluster" + ] + }, "RestartAutonomousDatabase": { "methods": [ "restart_autonomous_database" @@ -134,6 +284,21 @@ "methods": [ "stop_autonomous_database" ] + }, + "SwitchoverAutonomousDatabase": { + "methods": [ + "switchover_autonomous_database" + ] + }, + "UpdateAutonomousDatabase": { + "methods": [ + "update_autonomous_database" + ] + }, + "UpdateExadbVmCluster": { + "methods": [ + "update_exadb_vm_cluster" + ] } } }, @@ -155,6 +320,31 @@ "create_cloud_vm_cluster" ] }, + "CreateDbSystem": { + "methods": [ + "create_db_system" + ] + }, + "CreateExadbVmCluster": { + "methods": [ + "create_exadb_vm_cluster" + ] + }, + "CreateExascaleDbStorageVault": { + "methods": [ + "create_exascale_db_storage_vault" + ] + }, + "CreateOdbNetwork": { + "methods": [ + "create_odb_network" + ] + }, + "CreateOdbSubnet": { + "methods": [ + "create_odb_subnet" + ] + }, "DeleteAutonomousDatabase": { "methods": [ "delete_autonomous_database" @@ -170,6 +360,36 @@ "delete_cloud_vm_cluster" ] }, + "DeleteDbSystem": { + "methods": [ + "delete_db_system" + ] + }, + "DeleteExadbVmCluster": { + "methods": [ + "delete_exadb_vm_cluster" + ] + }, + "DeleteExascaleDbStorageVault": { + "methods": [ + "delete_exascale_db_storage_vault" + ] + }, + "DeleteOdbNetwork": { + "methods": [ + "delete_odb_network" + ] + }, + "DeleteOdbSubnet": { + "methods": [ + "delete_odb_subnet" + ] + }, + "FailoverAutonomousDatabase": { + "methods": [ + "failover_autonomous_database" + ] + }, "GenerateAutonomousDatabaseWallet": { "methods": [ "generate_autonomous_database_wallet" @@ -190,6 +410,41 @@ "get_cloud_vm_cluster" ] }, + "GetDatabase": { + "methods": [ + "get_database" + ] + }, + "GetDbSystem": { + "methods": [ + "get_db_system" + ] + }, + "GetExadbVmCluster": { + "methods": [ + "get_exadb_vm_cluster" + ] + }, + "GetExascaleDbStorageVault": { + "methods": [ + "get_exascale_db_storage_vault" + ] + }, + "GetOdbNetwork": { + "methods": [ + "get_odb_network" + ] + }, + "GetOdbSubnet": { + "methods": [ + "get_odb_subnet" + ] + }, + "GetPluggableDatabase": { + "methods": [ + "get_pluggable_database" + ] + }, "ListAutonomousDatabaseBackups": { "methods": [ "list_autonomous_database_backups" @@ -220,6 +475,16 @@ "list_cloud_vm_clusters" ] }, + "ListDatabaseCharacterSets": { + "methods": [ + "list_database_character_sets" + ] + }, + "ListDatabases": { + "methods": [ + "list_databases" + ] + }, "ListDbNodes": { "methods": [ "list_db_nodes" @@ -230,21 +495,71 @@ "list_db_servers" ] }, + "ListDbSystemInitialStorageSizes": { + "methods": [ + "list_db_system_initial_storage_sizes" + ] + }, "ListDbSystemShapes": { "methods": [ "list_db_system_shapes" ] }, + "ListDbSystems": { + "methods": [ + "list_db_systems" + ] + }, + "ListDbVersions": { + "methods": [ + "list_db_versions" + ] + }, "ListEntitlements": { "methods": [ "list_entitlements" ] }, + "ListExadbVmClusters": { + "methods": [ + "list_exadb_vm_clusters" + ] + }, + "ListExascaleDbStorageVaults": { + "methods": [ + "list_exascale_db_storage_vaults" + ] + }, "ListGiVersions": { "methods": [ "list_gi_versions" ] }, + "ListMinorVersions": { + "methods": [ + "list_minor_versions" + ] + }, + "ListOdbNetworks": { + "methods": [ + "list_odb_networks" + ] + }, + "ListOdbSubnets": { + "methods": [ + "list_odb_subnets" + ] + }, + "ListPluggableDatabases": { + "methods": [ + "list_pluggable_databases" + ] + }, + "RemoveVirtualMachineExadbVmCluster": { + "methods": [ + "remove_virtual_machine_exadb_vm_cluster" + ] + }, "RestartAutonomousDatabase": { "methods": [ "restart_autonomous_database" @@ -264,6 +579,21 @@ "methods": [ "stop_autonomous_database" ] + }, + "SwitchoverAutonomousDatabase": { + "methods": [ + "switchover_autonomous_database" + ] + }, + "UpdateAutonomousDatabase": { + "methods": [ + "update_autonomous_database" + ] + }, + "UpdateExadbVmCluster": { + "methods": [ + "update_exadb_vm_cluster" + ] } } }, @@ -285,6 +615,31 @@ "create_cloud_vm_cluster" ] }, + "CreateDbSystem": { + "methods": [ + "create_db_system" + ] + }, + "CreateExadbVmCluster": { + "methods": [ + "create_exadb_vm_cluster" + ] + }, + "CreateExascaleDbStorageVault": { + "methods": [ + "create_exascale_db_storage_vault" + ] + }, + "CreateOdbNetwork": { + "methods": [ + "create_odb_network" + ] + }, + "CreateOdbSubnet": { + "methods": [ + "create_odb_subnet" + ] + }, "DeleteAutonomousDatabase": { "methods": [ "delete_autonomous_database" @@ -300,6 +655,36 @@ "delete_cloud_vm_cluster" ] }, + "DeleteDbSystem": { + "methods": [ + "delete_db_system" + ] + }, + "DeleteExadbVmCluster": { + "methods": [ + "delete_exadb_vm_cluster" + ] + }, + "DeleteExascaleDbStorageVault": { + "methods": [ + "delete_exascale_db_storage_vault" + ] + }, + "DeleteOdbNetwork": { + "methods": [ + "delete_odb_network" + ] + }, + "DeleteOdbSubnet": { + "methods": [ + "delete_odb_subnet" + ] + }, + "FailoverAutonomousDatabase": { + "methods": [ + "failover_autonomous_database" + ] + }, "GenerateAutonomousDatabaseWallet": { "methods": [ "generate_autonomous_database_wallet" @@ -320,6 +705,41 @@ "get_cloud_vm_cluster" ] }, + "GetDatabase": { + "methods": [ + "get_database" + ] + }, + "GetDbSystem": { + "methods": [ + "get_db_system" + ] + }, + "GetExadbVmCluster": { + "methods": [ + "get_exadb_vm_cluster" + ] + }, + "GetExascaleDbStorageVault": { + "methods": [ + "get_exascale_db_storage_vault" + ] + }, + "GetOdbNetwork": { + "methods": [ + "get_odb_network" + ] + }, + "GetOdbSubnet": { + "methods": [ + "get_odb_subnet" + ] + }, + "GetPluggableDatabase": { + "methods": [ + "get_pluggable_database" + ] + }, "ListAutonomousDatabaseBackups": { "methods": [ "list_autonomous_database_backups" @@ -350,6 +770,16 @@ "list_cloud_vm_clusters" ] }, + "ListDatabaseCharacterSets": { + "methods": [ + "list_database_character_sets" + ] + }, + "ListDatabases": { + "methods": [ + "list_databases" + ] + }, "ListDbNodes": { "methods": [ "list_db_nodes" @@ -360,21 +790,71 @@ "list_db_servers" ] }, + "ListDbSystemInitialStorageSizes": { + "methods": [ + "list_db_system_initial_storage_sizes" + ] + }, "ListDbSystemShapes": { "methods": [ "list_db_system_shapes" ] }, + "ListDbSystems": { + "methods": [ + "list_db_systems" + ] + }, + "ListDbVersions": { + "methods": [ + "list_db_versions" + ] + }, "ListEntitlements": { "methods": [ "list_entitlements" ] }, + "ListExadbVmClusters": { + "methods": [ + "list_exadb_vm_clusters" + ] + }, + "ListExascaleDbStorageVaults": { + "methods": [ + "list_exascale_db_storage_vaults" + ] + }, "ListGiVersions": { "methods": [ "list_gi_versions" ] }, + "ListMinorVersions": { + "methods": [ + "list_minor_versions" + ] + }, + "ListOdbNetworks": { + "methods": [ + "list_odb_networks" + ] + }, + "ListOdbSubnets": { + "methods": [ + "list_odb_subnets" + ] + }, + "ListPluggableDatabases": { + "methods": [ + "list_pluggable_databases" + ] + }, + "RemoveVirtualMachineExadbVmCluster": { + "methods": [ + "remove_virtual_machine_exadb_vm_cluster" + ] + }, "RestartAutonomousDatabase": { "methods": [ "restart_autonomous_database" @@ -394,6 +874,21 @@ "methods": [ "stop_autonomous_database" ] + }, + "SwitchoverAutonomousDatabase": { + "methods": [ + "switchover_autonomous_database" + ] + }, + "UpdateAutonomousDatabase": { + "methods": [ + "update_autonomous_database" + ] + }, + "UpdateExadbVmCluster": { + "methods": [ + "update_exadb_vm_cluster" + ] } } } diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/async_client.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/async_client.py index fb9040e6ed40..6864aaf3cacf 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/async_client.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/async_client.py @@ -28,7 +28,6 @@ Type, Union, ) -import uuid from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 @@ -50,6 +49,7 @@ from google.cloud.location import locations_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.cloud.oracledatabase_v1.services.oracle_database import pagers @@ -57,19 +57,43 @@ autonomous_database_character_set, autonomous_db_backup, autonomous_db_version, + common, + database, + database_character_set, db_node, db_server, +) +from google.cloud.oracledatabase_v1.types import ( + db_system_initial_storage_size, db_system_shape, + db_version, entitlement, exadata_infra, - gi_version, +) +from google.cloud.oracledatabase_v1.types import ( oracledatabase, + pluggable_database, vm_cluster, ) from google.cloud.oracledatabase_v1.types import ( autonomous_database as gco_autonomous_database, ) +from google.cloud.oracledatabase_v1.types import ( + exadb_vm_cluster as gco_exadb_vm_cluster, +) +from google.cloud.oracledatabase_v1.types import ( + exascale_db_storage_vault as gco_exascale_db_storage_vault, +) from google.cloud.oracledatabase_v1.types import autonomous_database +from google.cloud.oracledatabase_v1.types import db_system +from google.cloud.oracledatabase_v1.types import db_system as gco_db_system +from google.cloud.oracledatabase_v1.types import exadb_vm_cluster +from google.cloud.oracledatabase_v1.types import exascale_db_storage_vault +from google.cloud.oracledatabase_v1.types import gi_version, minor_version +from google.cloud.oracledatabase_v1.types import odb_network +from google.cloud.oracledatabase_v1.types import odb_network as gco_odb_network +from google.cloud.oracledatabase_v1.types import odb_subnet +from google.cloud.oracledatabase_v1.types import odb_subnet as gco_odb_subnet from .client import OracleDatabaseClient from .transports.base import DEFAULT_CLIENT_INFO, OracleDatabaseTransport @@ -131,20 +155,62 @@ class OracleDatabaseAsyncClient: parse_cloud_vm_cluster_path = staticmethod( OracleDatabaseClient.parse_cloud_vm_cluster_path ) + crypto_key_path = staticmethod(OracleDatabaseClient.crypto_key_path) + parse_crypto_key_path = staticmethod(OracleDatabaseClient.parse_crypto_key_path) + database_path = staticmethod(OracleDatabaseClient.database_path) + parse_database_path = staticmethod(OracleDatabaseClient.parse_database_path) + database_character_set_path = staticmethod( + OracleDatabaseClient.database_character_set_path + ) + parse_database_character_set_path = staticmethod( + OracleDatabaseClient.parse_database_character_set_path + ) db_node_path = staticmethod(OracleDatabaseClient.db_node_path) parse_db_node_path = staticmethod(OracleDatabaseClient.parse_db_node_path) db_server_path = staticmethod(OracleDatabaseClient.db_server_path) parse_db_server_path = staticmethod(OracleDatabaseClient.parse_db_server_path) + db_system_path = staticmethod(OracleDatabaseClient.db_system_path) + parse_db_system_path = staticmethod(OracleDatabaseClient.parse_db_system_path) + db_system_initial_storage_size_path = staticmethod( + OracleDatabaseClient.db_system_initial_storage_size_path + ) + parse_db_system_initial_storage_size_path = staticmethod( + OracleDatabaseClient.parse_db_system_initial_storage_size_path + ) db_system_shape_path = staticmethod(OracleDatabaseClient.db_system_shape_path) parse_db_system_shape_path = staticmethod( OracleDatabaseClient.parse_db_system_shape_path ) + db_version_path = staticmethod(OracleDatabaseClient.db_version_path) + parse_db_version_path = staticmethod(OracleDatabaseClient.parse_db_version_path) entitlement_path = staticmethod(OracleDatabaseClient.entitlement_path) parse_entitlement_path = staticmethod(OracleDatabaseClient.parse_entitlement_path) + exadb_vm_cluster_path = staticmethod(OracleDatabaseClient.exadb_vm_cluster_path) + parse_exadb_vm_cluster_path = staticmethod( + OracleDatabaseClient.parse_exadb_vm_cluster_path + ) + exascale_db_storage_vault_path = staticmethod( + OracleDatabaseClient.exascale_db_storage_vault_path + ) + parse_exascale_db_storage_vault_path = staticmethod( + OracleDatabaseClient.parse_exascale_db_storage_vault_path + ) gi_version_path = staticmethod(OracleDatabaseClient.gi_version_path) parse_gi_version_path = staticmethod(OracleDatabaseClient.parse_gi_version_path) + minor_version_path = staticmethod(OracleDatabaseClient.minor_version_path) + parse_minor_version_path = staticmethod( + OracleDatabaseClient.parse_minor_version_path + ) network_path = staticmethod(OracleDatabaseClient.network_path) parse_network_path = staticmethod(OracleDatabaseClient.parse_network_path) + odb_network_path = staticmethod(OracleDatabaseClient.odb_network_path) + parse_odb_network_path = staticmethod(OracleDatabaseClient.parse_odb_network_path) + odb_subnet_path = staticmethod(OracleDatabaseClient.odb_subnet_path) + parse_odb_subnet_path = staticmethod(OracleDatabaseClient.parse_odb_subnet_path) + pluggable_database_path = staticmethod(OracleDatabaseClient.pluggable_database_path) + parse_pluggable_database_path = staticmethod( + OracleDatabaseClient.parse_pluggable_database_path + ) common_billing_account_path = staticmethod( OracleDatabaseClient.common_billing_account_path ) @@ -1174,9 +1240,6 @@ async def sample_create_cloud_vm_cluster(): # Initialize request argument(s) cloud_vm_cluster = oracledatabase_v1.CloudVmCluster() cloud_vm_cluster.exadata_infrastructure = "exadata_infrastructure_value" - cloud_vm_cluster.cidr = "cidr_value" - cloud_vm_cluster.backup_subnet_cidr = "backup_subnet_cidr_value" - cloud_vm_cluster.network = "network_value" request = oracledatabase_v1.CreateCloudVmClusterRequest( parent="parent_value", @@ -1728,6 +1791,7 @@ async def sample_list_db_nodes(): Required. The parent value for database node in the following format: projects/{project}/locations/{location}/cloudVmClusters/{cloudVmCluster}. + . This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1935,6 +1999,132 @@ async def sample_list_gi_versions(): # Done; return the response. return response + async def list_minor_versions( + self, + request: Optional[Union[minor_version.ListMinorVersionsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListMinorVersionsAsyncPager: + r"""Lists all the valid minor versions for the given + project, location, gi version and shape family. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_minor_versions(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListMinorVersionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_minor_versions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListMinorVersionsRequest, dict]]): + The request object. The request for ``MinorVersion.List``. + parent (:class:`str`): + Required. The parent value for the MinorVersion resource + with the format: + projects/{project}/locations/{location}/giVersions/{gi_version} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListMinorVersionsAsyncPager: + The response for MinorVersion.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, minor_version.ListMinorVersionsRequest): + request = minor_version.ListMinorVersionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_minor_versions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMinorVersionsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def list_db_system_shapes( self, request: Optional[Union[oracledatabase.ListDbSystemShapesRequest, dict]] = None, @@ -2459,6 +2649,148 @@ async def sample_create_autonomous_database(): # Done; return the response. return response + async def update_autonomous_database( + self, + request: Optional[ + Union[oracledatabase.UpdateAutonomousDatabaseRequest, dict] + ] = None, + *, + autonomous_database: Optional[ + gco_autonomous_database.AutonomousDatabase + ] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates the parameters of a single Autonomous + Database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_update_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.UpdateAutonomousDatabaseRequest( + ) + + # Make the request + operation = client.update_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.UpdateAutonomousDatabaseRequest, dict]]): + The request object. The request for ``AutonomousDatabase.Update``. + autonomous_database (:class:`google.cloud.oracledatabase_v1.types.AutonomousDatabase`): + Required. The resource being updated + This corresponds to the ``autonomous_database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. Field mask is used to specify the fields to be + overwritten in the Exadata resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.AutonomousDatabase` Details of the Autonomous Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/AutonomousDatabase/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [autonomous_database, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.UpdateAutonomousDatabaseRequest): + request = oracledatabase.UpdateAutonomousDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if autonomous_database is not None: + request.autonomous_database = autonomous_database + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_autonomous_database + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("autonomous_database.name", request.autonomous_database.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gco_autonomous_database.AutonomousDatabase, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + async def delete_autonomous_database( self, request: Optional[ @@ -3650,6 +3982,4129 @@ async def sample_restart_autonomous_database(): # Done; return the response. return response + async def switchover_autonomous_database( + self, + request: Optional[ + Union[oracledatabase.SwitchoverAutonomousDatabaseRequest, dict] + ] = None, + *, + name: Optional[str] = None, + peer_autonomous_database: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Initiates a switchover of specified autonomous + database to the associated peer database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_switchover_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.SwitchoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Make the request + operation = client.switchover_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.SwitchoverAutonomousDatabaseRequest, dict]]): + The request object. The request for + ``OracleDatabase.SwitchoverAutonomousDatabase``. + name (:class:`str`): + Required. The name of the Autonomous Database in the + following format: + projects/{project}/locations/{location}/autonomousDatabases/{autonomous_database}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + peer_autonomous_database (:class:`str`): + Required. The peer database name to + switch over to. + + This corresponds to the ``peer_autonomous_database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.AutonomousDatabase` Details of the Autonomous Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/AutonomousDatabase/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, peer_autonomous_database] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.SwitchoverAutonomousDatabaseRequest): + request = oracledatabase.SwitchoverAutonomousDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if peer_autonomous_database is not None: + request.peer_autonomous_database = peer_autonomous_database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.switchover_autonomous_database + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + autonomous_database.AutonomousDatabase, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def failover_autonomous_database( + self, + request: Optional[ + Union[oracledatabase.FailoverAutonomousDatabaseRequest, dict] + ] = None, + *, + name: Optional[str] = None, + peer_autonomous_database: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Initiates a failover to target autonomous database + from the associated primary database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_failover_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.FailoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Make the request + operation = client.failover_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.FailoverAutonomousDatabaseRequest, dict]]): + The request object. The request for + ``OracleDatabase.FailoverAutonomousDatabase``. + name (:class:`str`): + Required. The name of the Autonomous Database in the + following format: + projects/{project}/locations/{location}/autonomousDatabases/{autonomous_database}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + peer_autonomous_database (:class:`str`): + Required. The peer database name to + fail over to. + + This corresponds to the ``peer_autonomous_database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.AutonomousDatabase` Details of the Autonomous Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/AutonomousDatabase/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, peer_autonomous_database] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.FailoverAutonomousDatabaseRequest): + request = oracledatabase.FailoverAutonomousDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if peer_autonomous_database is not None: + request.peer_autonomous_database = peer_autonomous_database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.failover_autonomous_database + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + autonomous_database.AutonomousDatabase, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def list_odb_networks( + self, + request: Optional[Union[odb_network.ListOdbNetworksRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListOdbNetworksAsyncPager: + r"""Lists the ODB Networks in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_odb_networks(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListOdbNetworksRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_odb_networks(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListOdbNetworksRequest, dict]]): + The request object. The request for ``OdbNetwork.List``. + parent (:class:`str`): + Required. The parent value for the + ODB Network in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListOdbNetworksAsyncPager: + The response for OdbNetwork.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_network.ListOdbNetworksRequest): + request = odb_network.ListOdbNetworksRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_odb_networks + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListOdbNetworksAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_odb_network( + self, + request: Optional[Union[odb_network.GetOdbNetworkRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> odb_network.OdbNetwork: + r"""Gets details of a single ODB Network. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_get_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetOdbNetworkRequest( + name="name_value", + ) + + # Make the request + response = await client.get_odb_network(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.GetOdbNetworkRequest, dict]]): + The request object. The request for ``OdbNetwork.Get``. + name (:class:`str`): + Required. The name of the OdbNetwork in the following + format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.OdbNetwork: + Represents OdbNetwork resource. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_network.GetOdbNetworkRequest): + request = odb_network.GetOdbNetworkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_odb_network + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_odb_network( + self, + request: Optional[Union[gco_odb_network.CreateOdbNetworkRequest, dict]] = None, + *, + parent: Optional[str] = None, + odb_network: Optional[gco_odb_network.OdbNetwork] = None, + odb_network_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new ODB Network in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_create_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + odb_network = oracledatabase_v1.OdbNetwork() + odb_network.network = "network_value" + + request = oracledatabase_v1.CreateOdbNetworkRequest( + parent="parent_value", + odb_network_id="odb_network_id_value", + odb_network=odb_network, + ) + + # Make the request + operation = client.create_odb_network(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.CreateOdbNetworkRequest, dict]]): + The request object. The request for ``OdbNetwork.Create``. + parent (:class:`str`): + Required. The parent value for the + OdbNetwork in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + odb_network (:class:`google.cloud.oracledatabase_v1.types.OdbNetwork`): + Required. Details of the OdbNetwork + instance to create. + + This corresponds to the ``odb_network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + odb_network_id (:class:`str`): + Required. The ID of the OdbNetwork to create. This value + is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``odb_network_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.oracledatabase_v1.types.OdbNetwork` + Represents OdbNetwork resource. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, odb_network, odb_network_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gco_odb_network.CreateOdbNetworkRequest): + request = gco_odb_network.CreateOdbNetworkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if odb_network is not None: + request.odb_network = odb_network + if odb_network_id is not None: + request.odb_network_id = odb_network_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_odb_network + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gco_odb_network.OdbNetwork, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_odb_network( + self, + request: Optional[Union[odb_network.DeleteOdbNetworkRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single ODB Network. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_delete_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteOdbNetworkRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_odb_network(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.DeleteOdbNetworkRequest, dict]]): + The request object. The request for ``OdbNetwork.Delete``. + name (:class:`str`): + Required. The name of the resource in the following + format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_network.DeleteOdbNetworkRequest): + request = odb_network.DeleteOdbNetworkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_odb_network + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def list_odb_subnets( + self, + request: Optional[Union[odb_subnet.ListOdbSubnetsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListOdbSubnetsAsyncPager: + r"""Lists all the ODB Subnets in a given ODB Network. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_odb_subnets(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListOdbSubnetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_odb_subnets(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListOdbSubnetsRequest, dict]]): + The request object. The request for ``OdbSubnet.List``. + parent (:class:`str`): + Required. The parent value for the OdbSubnet in the + following format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListOdbSubnetsAsyncPager: + The response for OdbSubnet.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_subnet.ListOdbSubnetsRequest): + request = odb_subnet.ListOdbSubnetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_odb_subnets + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListOdbSubnetsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_odb_subnet( + self, + request: Optional[Union[odb_subnet.GetOdbSubnetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> odb_subnet.OdbSubnet: + r"""Gets details of a single ODB Subnet. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_get_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetOdbSubnetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_odb_subnet(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.GetOdbSubnetRequest, dict]]): + The request object. The request for ``OdbSubnet.Get``. + name (:class:`str`): + Required. The name of the OdbSubnet in the following + format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.OdbSubnet: + Represents OdbSubnet resource. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_subnet.GetOdbSubnetRequest): + request = odb_subnet.GetOdbSubnetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_odb_subnet + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_odb_subnet( + self, + request: Optional[Union[gco_odb_subnet.CreateOdbSubnetRequest, dict]] = None, + *, + parent: Optional[str] = None, + odb_subnet: Optional[gco_odb_subnet.OdbSubnet] = None, + odb_subnet_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new ODB Subnet in a given ODB Network. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_create_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + odb_subnet = oracledatabase_v1.OdbSubnet() + odb_subnet.cidr_range = "cidr_range_value" + odb_subnet.purpose = "BACKUP_SUBNET" + + request = oracledatabase_v1.CreateOdbSubnetRequest( + parent="parent_value", + odb_subnet_id="odb_subnet_id_value", + odb_subnet=odb_subnet, + ) + + # Make the request + operation = client.create_odb_subnet(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.CreateOdbSubnetRequest, dict]]): + The request object. The request for ``OdbSubnet.Create``. + parent (:class:`str`): + Required. The parent value for the OdbSubnet in the + following format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + odb_subnet (:class:`google.cloud.oracledatabase_v1.types.OdbSubnet`): + Required. Details of the OdbSubnet + instance to create. + + This corresponds to the ``odb_subnet`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + odb_subnet_id (:class:`str`): + Required. The ID of the OdbSubnet to create. This value + is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``odb_subnet_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.oracledatabase_v1.types.OdbSubnet` + Represents OdbSubnet resource. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, odb_subnet, odb_subnet_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gco_odb_subnet.CreateOdbSubnetRequest): + request = gco_odb_subnet.CreateOdbSubnetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if odb_subnet is not None: + request.odb_subnet = odb_subnet + if odb_subnet_id is not None: + request.odb_subnet_id = odb_subnet_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_odb_subnet + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gco_odb_subnet.OdbSubnet, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_odb_subnet( + self, + request: Optional[Union[odb_subnet.DeleteOdbSubnetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single ODB Subnet. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_delete_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteOdbSubnetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_odb_subnet(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.DeleteOdbSubnetRequest, dict]]): + The request object. The request for ``OdbSubnet.Delete``. + name (:class:`str`): + Required. The name of the resource in the following + format: + projects/{project}/locations/{region}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_subnet.DeleteOdbSubnetRequest): + request = odb_subnet.DeleteOdbSubnetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_odb_subnet + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def list_exadb_vm_clusters( + self, + request: Optional[ + Union[oracledatabase.ListExadbVmClustersRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListExadbVmClustersAsyncPager: + r"""Lists all the Exadb (Exascale) VM Clusters for the + given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_exadb_vm_clusters(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListExadbVmClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_exadb_vm_clusters(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListExadbVmClustersRequest, dict]]): + The request object. The request for ``ExadbVmCluster.List``. + parent (:class:`str`): + Required. The parent value for + ExadbVmClusters in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListExadbVmClustersAsyncPager: + The response for ExadbVmCluster.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.ListExadbVmClustersRequest): + request = oracledatabase.ListExadbVmClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_exadb_vm_clusters + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListExadbVmClustersAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_exadb_vm_cluster( + self, + request: Optional[Union[oracledatabase.GetExadbVmClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> exadb_vm_cluster.ExadbVmCluster: + r"""Gets details of a single Exadb (Exascale) VM Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_get_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetExadbVmClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_exadb_vm_cluster(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.GetExadbVmClusterRequest, dict]]): + The request object. The request for ``ExadbVmCluster.Get``. + name (:class:`str`): + Required. The name of the ExadbVmCluster in the + following format: + projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.ExadbVmCluster: + ExadbVmCluster represents a cluster + of VMs that are used to run Exadata + workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.GetExadbVmClusterRequest): + request = oracledatabase.GetExadbVmClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_exadb_vm_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_exadb_vm_cluster( + self, + request: Optional[ + Union[oracledatabase.CreateExadbVmClusterRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + exadb_vm_cluster: Optional[gco_exadb_vm_cluster.ExadbVmCluster] = None, + exadb_vm_cluster_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Exadb (Exascale) VM Cluster resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_create_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + exadb_vm_cluster = oracledatabase_v1.ExadbVmCluster() + exadb_vm_cluster.properties.grid_image_id = "grid_image_id_value" + exadb_vm_cluster.properties.node_count = 1070 + exadb_vm_cluster.properties.enabled_ecpu_count_per_node = 2826 + exadb_vm_cluster.properties.vm_file_system_storage.size_in_gbs_per_node = 2103 + exadb_vm_cluster.properties.exascale_db_storage_vault = "exascale_db_storage_vault_value" + exadb_vm_cluster.properties.hostname_prefix = "hostname_prefix_value" + exadb_vm_cluster.properties.ssh_public_keys = ['ssh_public_keys_value1', 'ssh_public_keys_value2'] + exadb_vm_cluster.properties.shape_attribute = "BLOCK_STORAGE" + exadb_vm_cluster.odb_subnet = "odb_subnet_value" + exadb_vm_cluster.backup_odb_subnet = "backup_odb_subnet_value" + exadb_vm_cluster.display_name = "display_name_value" + + request = oracledatabase_v1.CreateExadbVmClusterRequest( + parent="parent_value", + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + exadb_vm_cluster=exadb_vm_cluster, + ) + + # Make the request + operation = client.create_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.CreateExadbVmClusterRequest, dict]]): + The request object. The request for ``ExadbVmCluster.Create``. + parent (:class:`str`): + Required. The value for parent of the + ExadbVmCluster in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exadb_vm_cluster (:class:`google.cloud.oracledatabase_v1.types.ExadbVmCluster`): + Required. The resource being created. + This corresponds to the ``exadb_vm_cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exadb_vm_cluster_id (:class:`str`): + Required. The ID of the ExadbVmCluster to create. This + value is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``exadb_vm_cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.ExadbVmCluster` ExadbVmCluster represents a cluster of VMs that are used to run Exadata + workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, exadb_vm_cluster, exadb_vm_cluster_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.CreateExadbVmClusterRequest): + request = oracledatabase.CreateExadbVmClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if exadb_vm_cluster is not None: + request.exadb_vm_cluster = exadb_vm_cluster + if exadb_vm_cluster_id is not None: + request.exadb_vm_cluster_id = exadb_vm_cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_exadb_vm_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gco_exadb_vm_cluster.ExadbVmCluster, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_exadb_vm_cluster( + self, + request: Optional[ + Union[oracledatabase.DeleteExadbVmClusterRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Exadb (Exascale) VM Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_delete_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteExadbVmClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.DeleteExadbVmClusterRequest, dict]]): + The request object. The request for ``ExadbVmCluster.Delete``. + name (:class:`str`): + Required. The name of the ExadbVmCluster in the + following format: + projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.DeleteExadbVmClusterRequest): + request = oracledatabase.DeleteExadbVmClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_exadb_vm_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def update_exadb_vm_cluster( + self, + request: Optional[ + Union[oracledatabase.UpdateExadbVmClusterRequest, dict] + ] = None, + *, + exadb_vm_cluster: Optional[gco_exadb_vm_cluster.ExadbVmCluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a single Exadb (Exascale) VM Cluster. To add + virtual machines to existing exadb vm cluster, only pass + the node count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_update_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + exadb_vm_cluster = oracledatabase_v1.ExadbVmCluster() + exadb_vm_cluster.properties.grid_image_id = "grid_image_id_value" + exadb_vm_cluster.properties.node_count = 1070 + exadb_vm_cluster.properties.enabled_ecpu_count_per_node = 2826 + exadb_vm_cluster.properties.vm_file_system_storage.size_in_gbs_per_node = 2103 + exadb_vm_cluster.properties.exascale_db_storage_vault = "exascale_db_storage_vault_value" + exadb_vm_cluster.properties.hostname_prefix = "hostname_prefix_value" + exadb_vm_cluster.properties.ssh_public_keys = ['ssh_public_keys_value1', 'ssh_public_keys_value2'] + exadb_vm_cluster.properties.shape_attribute = "BLOCK_STORAGE" + exadb_vm_cluster.odb_subnet = "odb_subnet_value" + exadb_vm_cluster.backup_odb_subnet = "backup_odb_subnet_value" + exadb_vm_cluster.display_name = "display_name_value" + + request = oracledatabase_v1.UpdateExadbVmClusterRequest( + exadb_vm_cluster=exadb_vm_cluster, + ) + + # Make the request + operation = client.update_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.UpdateExadbVmClusterRequest, dict]]): + The request object. The request for ``ExadbVmCluster.Update``. We only + support adding the Virtual Machine to the + ExadbVmCluster. Rest of the fields in ExadbVmCluster are + immutable. + exadb_vm_cluster (:class:`google.cloud.oracledatabase_v1.types.ExadbVmCluster`): + Required. The resource being updated. + This corresponds to the ``exadb_vm_cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. A mask specifying which + fields in th VM Cluster should be + updated. A field specified in the mask + is overwritten. If a mask isn't provided + then all the fields in the VM Cluster + are overwritten. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.ExadbVmCluster` ExadbVmCluster represents a cluster of VMs that are used to run Exadata + workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [exadb_vm_cluster, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.UpdateExadbVmClusterRequest): + request = oracledatabase.UpdateExadbVmClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if exadb_vm_cluster is not None: + request.exadb_vm_cluster = exadb_vm_cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_exadb_vm_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("exadb_vm_cluster.name", request.exadb_vm_cluster.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gco_exadb_vm_cluster.ExadbVmCluster, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def remove_virtual_machine_exadb_vm_cluster( + self, + request: Optional[ + Union[oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, dict] + ] = None, + *, + name: Optional[str] = None, + hostnames: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Removes virtual machines from an existing exadb vm + cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_remove_virtual_machine_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.RemoveVirtualMachineExadbVmClusterRequest( + name="name_value", + hostnames=['hostnames_value1', 'hostnames_value2'], + ) + + # Make the request + operation = client.remove_virtual_machine_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.RemoveVirtualMachineExadbVmClusterRequest, dict]]): + The request object. The request for ``ExadbVmCluster.RemoveVirtualMachine``. + name (:class:`str`): + Required. The name of the ExadbVmCluster in the + following format: + projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hostnames (:class:`MutableSequence[str]`): + Required. The list of host names of + db nodes to be removed from the + ExadbVmCluster. + + This corresponds to the ``hostnames`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.ExadbVmCluster` ExadbVmCluster represents a cluster of VMs that are used to run Exadata + workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, hostnames] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, oracledatabase.RemoveVirtualMachineExadbVmClusterRequest + ): + request = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if hostnames: + request.hostnames.extend(hostnames) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.remove_virtual_machine_exadb_vm_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + exadb_vm_cluster.ExadbVmCluster, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def list_exascale_db_storage_vaults( + self, + request: Optional[ + Union[exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListExascaleDbStorageVaultsAsyncPager: + r"""Lists all the ExascaleDB Storage Vaults for the given + project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_exascale_db_storage_vaults(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListExascaleDbStorageVaultsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_exascale_db_storage_vaults(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsRequest, dict]]): + The request object. The request for ``ExascaleDbStorageVault.List``. + parent (:class:`str`): + Required. The parent value for + ExascaleDbStorageVault in the following + format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListExascaleDbStorageVaultsAsyncPager: + The response for ExascaleDbStorageVault.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest + ): + request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_exascale_db_storage_vaults + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListExascaleDbStorageVaultsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_exascale_db_storage_vault( + self, + request: Optional[ + Union[exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> exascale_db_storage_vault.ExascaleDbStorageVault: + r"""Gets details of a single ExascaleDB Storage Vault. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_get_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetExascaleDbStorageVaultRequest( + name="name_value", + ) + + # Make the request + response = await client.get_exascale_db_storage_vault(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.GetExascaleDbStorageVaultRequest, dict]]): + The request object. The request for ``ExascaleDbStorageVault.Get``. + name (:class:`str`): + Required. The name of the ExascaleDbStorageVault in the + following format: + projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault: + ExascaleDbStorageVault represents a + storage vault exadb vm cluster resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExascaleDbStorageVault/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, exascale_db_storage_vault.GetExascaleDbStorageVaultRequest + ): + request = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_exascale_db_storage_vault + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_exascale_db_storage_vault( + self, + request: Optional[ + Union[ + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, dict + ] + ] = None, + *, + parent: Optional[str] = None, + exascale_db_storage_vault: Optional[ + gco_exascale_db_storage_vault.ExascaleDbStorageVault + ] = None, + exascale_db_storage_vault_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new ExascaleDB Storage Vault resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_create_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + exascale_db_storage_vault = oracledatabase_v1.ExascaleDbStorageVault() + exascale_db_storage_vault.display_name = "display_name_value" + exascale_db_storage_vault.properties.exascale_db_storage_details.total_size_gbs = 1497 + + request = oracledatabase_v1.CreateExascaleDbStorageVaultRequest( + parent="parent_value", + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", + exascale_db_storage_vault=exascale_db_storage_vault, + ) + + # Make the request + operation = client.create_exascale_db_storage_vault(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.CreateExascaleDbStorageVaultRequest, dict]]): + The request object. The request for ``ExascaleDbStorageVault.Create``. + parent (:class:`str`): + Required. The value for parent of the + ExascaleDbStorageVault in the following + format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exascale_db_storage_vault (:class:`google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault`): + Required. The resource being created. + This corresponds to the ``exascale_db_storage_vault`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exascale_db_storage_vault_id (:class:`str`): + Required. The ID of the ExascaleDbStorageVault to + create. This value is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``exascale_db_storage_vault_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault` ExascaleDbStorageVault represents a storage vault exadb vm cluster resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExascaleDbStorageVault/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [ + parent, + exascale_db_storage_vault, + exascale_db_storage_vault_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest + ): + request = gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if exascale_db_storage_vault is not None: + request.exascale_db_storage_vault = exascale_db_storage_vault + if exascale_db_storage_vault_id is not None: + request.exascale_db_storage_vault_id = exascale_db_storage_vault_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_exascale_db_storage_vault + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gco_exascale_db_storage_vault.ExascaleDbStorageVault, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_exascale_db_storage_vault( + self, + request: Optional[ + Union[exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single ExascaleDB Storage Vault. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_delete_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteExascaleDbStorageVaultRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_exascale_db_storage_vault(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.DeleteExascaleDbStorageVaultRequest, dict]]): + The request object. The request message for + ``ExascaleDbStorageVault.Delete``. + name (:class:`str`): + Required. The name of the ExascaleDbStorageVault in the + following format: + projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest + ): + request = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_exascale_db_storage_vault + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def list_db_system_initial_storage_sizes( + self, + request: Optional[ + Union[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, + dict, + ] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDbSystemInitialStorageSizesAsyncPager: + r"""Lists all the DbSystemInitialStorageSizes for the + given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_db_system_initial_storage_sizes(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbSystemInitialStorageSizesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_system_initial_storage_sizes(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesRequest, dict]]): + The request object. The request for ``DbSystemInitialStorageSizes.List``. + parent (:class:`str`): + Required. The parent value for the + DbSystemInitialStorageSize resource with + the format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemInitialStorageSizesAsyncPager: + The response for DbSystemInitialStorageSizes.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, + ): + request = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest( + request + ) + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_db_system_initial_storage_sizes + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDbSystemInitialStorageSizesAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_databases( + self, + request: Optional[Union[database.ListDatabasesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDatabasesAsyncPager: + r"""Lists all the Databases for the given project, + location and DbSystem. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_databases(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDatabasesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_databases(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListDatabasesRequest, dict]]): + The request object. The request for ``Database.List``. + parent (:class:`str`): + Required. The parent resource name in + the following format: + projects/{project}/locations/{region} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDatabasesAsyncPager: + The response for Database.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, database.ListDatabasesRequest): + request = database.ListDatabasesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_databases + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatabasesAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_database( + self, + request: Optional[Union[database.GetDatabaseRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> database.Database: + r"""Gets details of a single Database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_get_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetDatabaseRequest( + name="name_value", + ) + + # Make the request + response = await client.get_database(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.GetDatabaseRequest, dict]]): + The request object. The request for ``Database.Get``. + name (:class:`str`): + Required. The name of the Database + resource in the following format: + projects/{project}/locations/{region}/databases/{database} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.Database: + Details of the Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/Database/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, database.GetDatabaseRequest): + request = database.GetDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_database + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_pluggable_databases( + self, + request: Optional[ + Union[pluggable_database.ListPluggableDatabasesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListPluggableDatabasesAsyncPager: + r"""Lists all the PluggableDatabases for the given + project, location and Container Database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_pluggable_databases(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListPluggableDatabasesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pluggable_databases(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListPluggableDatabasesRequest, dict]]): + The request object. The request for ``PluggableDatabase.List``. + parent (:class:`str`): + Required. The parent, which owns this + collection of PluggableDatabases. + Format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListPluggableDatabasesAsyncPager: + The response for PluggableDatabase.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, pluggable_database.ListPluggableDatabasesRequest): + request = pluggable_database.ListPluggableDatabasesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_pluggable_databases + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPluggableDatabasesAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_pluggable_database( + self, + request: Optional[ + Union[pluggable_database.GetPluggableDatabaseRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pluggable_database.PluggableDatabase: + r"""Gets details of a single PluggableDatabase. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_get_pluggable_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetPluggableDatabaseRequest( + name="name_value", + ) + + # Make the request + response = await client.get_pluggable_database(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.GetPluggableDatabaseRequest, dict]]): + The request object. The request for ``PluggableDatabase.Get``. + name (:class:`str`): + Required. The name of the PluggableDatabase resource in + the following format: + projects/{project}/locations/{region}/pluggableDatabases/{pluggable_database} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.PluggableDatabase: + The PluggableDatabase resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/PluggableDatabase/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, pluggable_database.GetPluggableDatabaseRequest): + request = pluggable_database.GetPluggableDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_pluggable_database + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_db_systems( + self, + request: Optional[Union[db_system.ListDbSystemsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDbSystemsAsyncPager: + r"""Lists all the DbSystems for the given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_db_systems(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbSystemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_systems(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListDbSystemsRequest, dict]]): + The request object. The request for ``DbSystem.List``. + parent (:class:`str`): + Required. The parent value for + DbSystems in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemsAsyncPager: + The response for DbSystem.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, db_system.ListDbSystemsRequest): + request = db_system.ListDbSystemsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_db_systems + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDbSystemsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_db_system( + self, + request: Optional[Union[db_system.GetDbSystemRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> db_system.DbSystem: + r"""Gets details of a single DbSystem. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_get_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetDbSystemRequest( + name="name_value", + ) + + # Make the request + response = await client.get_db_system(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.GetDbSystemRequest, dict]]): + The request object. The request for ``DbSystem.Get``. + name (:class:`str`): + Required. The name of the DbSystem in the following + format: + projects/{project}/locations/{location}/dbSystems/{db_system}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.DbSystem: + Details of the DbSystem (BaseDB) + resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/DbSystem/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, db_system.GetDbSystemRequest): + request = db_system.GetDbSystemRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_db_system + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_db_system( + self, + request: Optional[Union[gco_db_system.CreateDbSystemRequest, dict]] = None, + *, + parent: Optional[str] = None, + db_system: Optional[gco_db_system.DbSystem] = None, + db_system_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new DbSystem in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_create_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + db_system = oracledatabase_v1.DbSystem() + db_system.odb_subnet = "odb_subnet_value" + db_system.display_name = "display_name_value" + + request = oracledatabase_v1.CreateDbSystemRequest( + parent="parent_value", + db_system_id="db_system_id_value", + db_system=db_system, + ) + + # Make the request + operation = client.create_db_system(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.CreateDbSystemRequest, dict]]): + The request object. The request for ``DbSystem.Create``. + parent (:class:`str`): + Required. The value for parent of the + DbSystem in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + db_system (:class:`google.cloud.oracledatabase_v1.types.DbSystem`): + Required. The resource being created. + This corresponds to the ``db_system`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + db_system_id (:class:`str`): + Required. The ID of the DbSystem to create. This value + is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``db_system_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.DbSystem` Details of the DbSystem (BaseDB) resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/DbSystem/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, db_system, db_system_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gco_db_system.CreateDbSystemRequest): + request = gco_db_system.CreateDbSystemRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if db_system is not None: + request.db_system = db_system + if db_system_id is not None: + request.db_system_id = db_system_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_db_system + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gco_db_system.DbSystem, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_db_system( + self, + request: Optional[Union[db_system.DeleteDbSystemRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single DbSystem. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_delete_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteDbSystemRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_db_system(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.DeleteDbSystemRequest, dict]]): + The request object. The request for ``DbSystem.Delete``. + name (:class:`str`): + Required. The name of the DbSystem in the following + format: + projects/{project}/locations/{location}/dbSystems/{db_system}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, db_system.DeleteDbSystemRequest): + request = db_system.DeleteDbSystemRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_db_system + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + async def list_db_versions( + self, + request: Optional[Union[db_version.ListDbVersionsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDbVersionsAsyncPager: + r"""List DbVersions for the given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_db_versions(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbVersionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_versions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListDbVersionsRequest, dict]]): + The request object. The request for ``DbVersions.List``. + parent (:class:`str`): + Required. The parent value for the + DbVersion resource with the format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbVersionsAsyncPager: + The response for DbVersions.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, db_version.ListDbVersionsRequest): + request = db_version.ListDbVersionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_db_versions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDbVersionsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_database_character_sets( + self, + request: Optional[ + Union[database_character_set.ListDatabaseCharacterSetsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDatabaseCharacterSetsAsyncPager: + r"""List DatabaseCharacterSets for the given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + async def sample_list_database_character_sets(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDatabaseCharacterSetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_database_character_sets(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsRequest, dict]]): + The request object. The request for ``DatabaseCharacterSet.List``. + parent (:class:`str`): + Required. The parent value for + DatabaseCharacterSets in the following + format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDatabaseCharacterSetsAsyncPager: + The response for DatabaseCharacterSet.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, database_character_set.ListDatabaseCharacterSetsRequest + ): + request = database_character_set.ListDatabaseCharacterSetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_database_character_sets + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatabaseCharacterSetsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/client.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/client.py index 96a203710ff6..4d271d54b665 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/client.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/client.py @@ -32,7 +32,6 @@ Union, cast, ) -import uuid import warnings from google.api_core import client_options as client_options_lib @@ -67,6 +66,7 @@ from google.cloud.location import locations_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.cloud.oracledatabase_v1.services.oracle_database import pagers @@ -74,19 +74,43 @@ autonomous_database_character_set, autonomous_db_backup, autonomous_db_version, + common, + database, + database_character_set, db_node, db_server, +) +from google.cloud.oracledatabase_v1.types import ( + db_system_initial_storage_size, db_system_shape, + db_version, entitlement, exadata_infra, - gi_version, +) +from google.cloud.oracledatabase_v1.types import ( oracledatabase, + pluggable_database, vm_cluster, ) from google.cloud.oracledatabase_v1.types import ( autonomous_database as gco_autonomous_database, ) +from google.cloud.oracledatabase_v1.types import ( + exadb_vm_cluster as gco_exadb_vm_cluster, +) +from google.cloud.oracledatabase_v1.types import ( + exascale_db_storage_vault as gco_exascale_db_storage_vault, +) from google.cloud.oracledatabase_v1.types import autonomous_database +from google.cloud.oracledatabase_v1.types import db_system +from google.cloud.oracledatabase_v1.types import db_system as gco_db_system +from google.cloud.oracledatabase_v1.types import exadb_vm_cluster +from google.cloud.oracledatabase_v1.types import exascale_db_storage_vault +from google.cloud.oracledatabase_v1.types import gi_version, minor_version +from google.cloud.oracledatabase_v1.types import odb_network +from google.cloud.oracledatabase_v1.types import odb_network as gco_odb_network +from google.cloud.oracledatabase_v1.types import odb_subnet +from google.cloud.oracledatabase_v1.types import odb_subnet as gco_odb_subnet from .transports.base import DEFAULT_CLIENT_INFO, OracleDatabaseTransport from .transports.grpc import OracleDatabaseGrpcTransport @@ -352,6 +376,74 @@ def parse_cloud_vm_cluster_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def crypto_key_path( + project: str, + location: str, + key_ring: str, + crypto_key: str, + ) -> str: + """Returns a fully-qualified crypto_key string.""" + return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( + project=project, + location=location, + key_ring=key_ring, + crypto_key=crypto_key, + ) + + @staticmethod + def parse_crypto_key_path(path: str) -> Dict[str, str]: + """Parses a crypto_key path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def database_path( + project: str, + location: str, + database: str, + ) -> str: + """Returns a fully-qualified database string.""" + return "projects/{project}/locations/{location}/databases/{database}".format( + project=project, + location=location, + database=database, + ) + + @staticmethod + def parse_database_path(path: str) -> Dict[str, str]: + """Parses a database path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/databases/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def database_character_set_path( + project: str, + location: str, + database_character_set: str, + ) -> str: + """Returns a fully-qualified database_character_set string.""" + return "projects/{project}/locations/{location}/databaseCharacterSets/{database_character_set}".format( + project=project, + location=location, + database_character_set=database_character_set, + ) + + @staticmethod + def parse_database_character_set_path(path: str) -> Dict[str, str]: + """Parses a database_character_set path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/databaseCharacterSets/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def db_node_path( project: str, @@ -400,6 +492,50 @@ def parse_db_server_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def db_system_path( + project: str, + location: str, + db_system: str, + ) -> str: + """Returns a fully-qualified db_system string.""" + return "projects/{project}/locations/{location}/dbSystems/{db_system}".format( + project=project, + location=location, + db_system=db_system, + ) + + @staticmethod + def parse_db_system_path(path: str) -> Dict[str, str]: + """Parses a db_system path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/dbSystems/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def db_system_initial_storage_size_path( + project: str, + location: str, + db_system_initial_storage_size: str, + ) -> str: + """Returns a fully-qualified db_system_initial_storage_size string.""" + return "projects/{project}/locations/{location}/dbSystemInitialStorageSizes/{db_system_initial_storage_size}".format( + project=project, + location=location, + db_system_initial_storage_size=db_system_initial_storage_size, + ) + + @staticmethod + def parse_db_system_initial_storage_size_path(path: str) -> Dict[str, str]: + """Parses a db_system_initial_storage_size path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/dbSystemInitialStorageSizes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def db_system_shape_path( project: str, @@ -422,6 +558,28 @@ def parse_db_system_shape_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def db_version_path( + project: str, + location: str, + db_version: str, + ) -> str: + """Returns a fully-qualified db_version string.""" + return "projects/{project}/locations/{location}/dbVersions/{db_version}".format( + project=project, + location=location, + db_version=db_version, + ) + + @staticmethod + def parse_db_version_path(path: str) -> Dict[str, str]: + """Parses a db_version path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/dbVersions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def entitlement_path( project: str, @@ -446,6 +604,50 @@ def parse_entitlement_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def exadb_vm_cluster_path( + project: str, + location: str, + exadb_vm_cluster: str, + ) -> str: + """Returns a fully-qualified exadb_vm_cluster string.""" + return "projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}".format( + project=project, + location=location, + exadb_vm_cluster=exadb_vm_cluster, + ) + + @staticmethod + def parse_exadb_vm_cluster_path(path: str) -> Dict[str, str]: + """Parses a exadb_vm_cluster path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/exadbVmClusters/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def exascale_db_storage_vault_path( + project: str, + location: str, + exascale_db_storage_vault: str, + ) -> str: + """Returns a fully-qualified exascale_db_storage_vault string.""" + return "projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault}".format( + project=project, + location=location, + exascale_db_storage_vault=exascale_db_storage_vault, + ) + + @staticmethod + def parse_exascale_db_storage_vault_path(path: str) -> Dict[str, str]: + """Parses a exascale_db_storage_vault path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/exascaleDbStorageVaults/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def gi_version_path( project: str, @@ -468,6 +670,30 @@ def parse_gi_version_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def minor_version_path( + project: str, + location: str, + gi_version: str, + minor_version: str, + ) -> str: + """Returns a fully-qualified minor_version string.""" + return "projects/{project}/locations/{location}/giVersions/{gi_version}/minorVersions/{minor_version}".format( + project=project, + location=location, + gi_version=gi_version, + minor_version=minor_version, + ) + + @staticmethod + def parse_minor_version_path(path: str) -> Dict[str, str]: + """Parses a minor_version path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/giVersions/(?P.+?)/minorVersions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def network_path( project: str, @@ -487,6 +713,76 @@ def parse_network_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def odb_network_path( + project: str, + location: str, + odb_network: str, + ) -> str: + """Returns a fully-qualified odb_network string.""" + return ( + "projects/{project}/locations/{location}/odbNetworks/{odb_network}".format( + project=project, + location=location, + odb_network=odb_network, + ) + ) + + @staticmethod + def parse_odb_network_path(path: str) -> Dict[str, str]: + """Parses a odb_network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/odbNetworks/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def odb_subnet_path( + project: str, + location: str, + odb_network: str, + odb_subnet: str, + ) -> str: + """Returns a fully-qualified odb_subnet string.""" + return "projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet}".format( + project=project, + location=location, + odb_network=odb_network, + odb_subnet=odb_subnet, + ) + + @staticmethod + def parse_odb_subnet_path(path: str) -> Dict[str, str]: + """Parses a odb_subnet path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/odbNetworks/(?P.+?)/odbSubnets/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def pluggable_database_path( + project: str, + location: str, + pluggable_database: str, + ) -> str: + """Returns a fully-qualified pluggable_database string.""" + return "projects/{project}/locations/{location}/pluggableDatabases/{pluggable_database}".format( + project=project, + location=location, + pluggable_database=pluggable_database, + ) + + @staticmethod + def parse_pluggable_database_path(path: str) -> Dict[str, str]: + """Parses a pluggable_database path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/pluggableDatabases/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path( billing_account: str, @@ -1788,9 +2084,6 @@ def sample_create_cloud_vm_cluster(): # Initialize request argument(s) cloud_vm_cluster = oracledatabase_v1.CloudVmCluster() cloud_vm_cluster.exadata_infrastructure = "exadata_infrastructure_value" - cloud_vm_cluster.cidr = "cidr_value" - cloud_vm_cluster.backup_subnet_cidr = "backup_subnet_cidr_value" - cloud_vm_cluster.network = "network_value" request = oracledatabase_v1.CreateCloudVmClusterRequest( parent="parent_value", @@ -2330,6 +2623,7 @@ def sample_list_db_nodes(): Required. The parent value for database node in the following format: projects/{project}/locations/{location}/cloudVmClusters/{cloudVmCluster}. + . This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2531,17 +2825,17 @@ def sample_list_gi_versions(): # Done; return the response. return response - def list_db_system_shapes( + def list_minor_versions( self, - request: Optional[Union[oracledatabase.ListDbSystemShapesRequest, dict]] = None, + request: Optional[Union[minor_version.ListMinorVersionsRequest, dict]] = None, *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListDbSystemShapesPager: - r"""Lists the database system shapes available for the - project and location. + ) -> pagers.ListMinorVersionsPager: + r"""Lists all the valid minor versions for the given + project, location, gi version and shape family. .. code-block:: python @@ -2554,30 +2848,29 @@ def list_db_system_shapes( # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import oracledatabase_v1 - def sample_list_db_system_shapes(): + def sample_list_minor_versions(): # Create a client client = oracledatabase_v1.OracleDatabaseClient() # Initialize request argument(s) - request = oracledatabase_v1.ListDbSystemShapesRequest( + request = oracledatabase_v1.ListMinorVersionsRequest( parent="parent_value", ) # Make the request - page_result = client.list_db_system_shapes(request=request) + page_result = client.list_minor_versions(request=request) # Handle the response for response in page_result: print(response) Args: - request (Union[google.cloud.oracledatabase_v1.types.ListDbSystemShapesRequest, dict]): - The request object. The request for ``DbSystemShape.List``. + request (Union[google.cloud.oracledatabase_v1.types.ListMinorVersionsRequest, dict]): + The request object. The request for ``MinorVersion.List``. parent (str): - Required. The parent value for - Database System Shapes in the following - format: - projects/{project}/locations/{location}. + Required. The parent value for the MinorVersion resource + with the format: + projects/{project}/locations/{location}/giVersions/{gi_version} This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -2591,8 +2884,8 @@ def sample_list_db_system_shapes(): be of type `bytes`. Returns: - google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemShapesPager: - The response for DbSystemShape.List. + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListMinorVersionsPager: + The response for MinorVersion.List. Iterating over this object will yield results and resolve additional pages automatically. @@ -2613,8 +2906,8 @@ def sample_list_db_system_shapes(): # - Use the request object if provided (there's no risk of modifying the input as # there are no flattened fields), or create one. - if not isinstance(request, oracledatabase.ListDbSystemShapesRequest): - request = oracledatabase.ListDbSystemShapesRequest(request) + if not isinstance(request, minor_version.ListMinorVersionsRequest): + request = minor_version.ListMinorVersionsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: @@ -2622,7 +2915,7 @@ def sample_list_db_system_shapes(): # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_db_system_shapes] + rpc = self._transport._wrapped_methods[self._transport.list_minor_versions] # Certain fields should be provided within the metadata header; # add these here. @@ -2643,7 +2936,7 @@ def sample_list_db_system_shapes(): # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. - response = pagers.ListDbSystemShapesPager( + response = pagers.ListMinorVersionsPager( method=rpc, request=request, response=response, @@ -2655,19 +2948,17 @@ def sample_list_db_system_shapes(): # Done; return the response. return response - def list_autonomous_databases( + def list_db_system_shapes( self, - request: Optional[ - Union[oracledatabase.ListAutonomousDatabasesRequest, dict] - ] = None, + request: Optional[Union[oracledatabase.ListDbSystemShapesRequest, dict]] = None, *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListAutonomousDatabasesPager: - r"""Lists the Autonomous Databases in a given project and - location. + ) -> pagers.ListDbSystemShapesPager: + r"""Lists the database system shapes available for the + project and location. .. code-block:: python @@ -2680,28 +2971,28 @@ def list_autonomous_databases( # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import oracledatabase_v1 - def sample_list_autonomous_databases(): + def sample_list_db_system_shapes(): # Create a client client = oracledatabase_v1.OracleDatabaseClient() # Initialize request argument(s) - request = oracledatabase_v1.ListAutonomousDatabasesRequest( + request = oracledatabase_v1.ListDbSystemShapesRequest( parent="parent_value", ) # Make the request - page_result = client.list_autonomous_databases(request=request) + page_result = client.list_db_system_shapes(request=request) # Handle the response for response in page_result: print(response) Args: - request (Union[google.cloud.oracledatabase_v1.types.ListAutonomousDatabasesRequest, dict]): - The request object. The request for ``AutonomousDatabase.List``. + request (Union[google.cloud.oracledatabase_v1.types.ListDbSystemShapesRequest, dict]): + The request object. The request for ``DbSystemShape.List``. parent (str): - Required. The parent value for the - Autonomous Database in the following + Required. The parent value for + Database System Shapes in the following format: projects/{project}/locations/{location}. @@ -2717,8 +3008,8 @@ def sample_list_autonomous_databases(): be of type `bytes`. Returns: - google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabasesPager: - The response for AutonomousDatabase.List. + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemShapesPager: + The response for DbSystemShape.List. Iterating over this object will yield results and resolve additional pages automatically. @@ -2739,8 +3030,8 @@ def sample_list_autonomous_databases(): # - Use the request object if provided (there's no risk of modifying the input as # there are no flattened fields), or create one. - if not isinstance(request, oracledatabase.ListAutonomousDatabasesRequest): - request = oracledatabase.ListAutonomousDatabasesRequest(request) + if not isinstance(request, oracledatabase.ListDbSystemShapesRequest): + request = oracledatabase.ListDbSystemShapesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: @@ -2748,9 +3039,7 @@ def sample_list_autonomous_databases(): # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[ - self._transport.list_autonomous_databases - ] + rpc = self._transport._wrapped_methods[self._transport.list_db_system_shapes] # Certain fields should be provided within the metadata header; # add these here. @@ -2771,7 +3060,7 @@ def sample_list_autonomous_databases(): # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. - response = pagers.ListAutonomousDatabasesPager( + response = pagers.ListDbSystemShapesPager( method=rpc, request=request, response=response, @@ -2783,18 +3072,19 @@ def sample_list_autonomous_databases(): # Done; return the response. return response - def get_autonomous_database( + def list_autonomous_databases( self, request: Optional[ - Union[oracledatabase.GetAutonomousDatabaseRequest, dict] + Union[oracledatabase.ListAutonomousDatabasesRequest, dict] ] = None, *, - name: Optional[str] = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> autonomous_database.AutonomousDatabase: - r"""Gets the details of a single Autonomous Database. + ) -> pagers.ListAutonomousDatabasesPager: + r"""Lists the Autonomous Databases in a given project and + location. .. code-block:: python @@ -2807,14 +3097,141 @@ def get_autonomous_database( # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import oracledatabase_v1 - def sample_get_autonomous_database(): + def sample_list_autonomous_databases(): # Create a client client = oracledatabase_v1.OracleDatabaseClient() # Initialize request argument(s) - request = oracledatabase_v1.GetAutonomousDatabaseRequest( - name="name_value", - ) + request = oracledatabase_v1.ListAutonomousDatabasesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_autonomous_databases(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListAutonomousDatabasesRequest, dict]): + The request object. The request for ``AutonomousDatabase.List``. + parent (str): + Required. The parent value for the + Autonomous Database in the following + format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabasesPager: + The response for AutonomousDatabase.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.ListAutonomousDatabasesRequest): + request = oracledatabase.ListAutonomousDatabasesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_autonomous_databases + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAutonomousDatabasesPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_autonomous_database( + self, + request: Optional[ + Union[oracledatabase.GetAutonomousDatabaseRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> autonomous_database.AutonomousDatabase: + r"""Gets the details of a single Autonomous Database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_get_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetAutonomousDatabaseRequest( + name="name_value", + ) # Make the request response = client.get_autonomous_database(request=request) @@ -3047,6 +3464,147 @@ def sample_create_autonomous_database(): # Done; return the response. return response + def update_autonomous_database( + self, + request: Optional[ + Union[oracledatabase.UpdateAutonomousDatabaseRequest, dict] + ] = None, + *, + autonomous_database: Optional[ + gco_autonomous_database.AutonomousDatabase + ] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Updates the parameters of a single Autonomous + Database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_update_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.UpdateAutonomousDatabaseRequest( + ) + + # Make the request + operation = client.update_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.UpdateAutonomousDatabaseRequest, dict]): + The request object. The request for ``AutonomousDatabase.Update``. + autonomous_database (google.cloud.oracledatabase_v1.types.AutonomousDatabase): + Required. The resource being updated + This corresponds to the ``autonomous_database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Field mask is used to specify the fields to be + overwritten in the Exadata resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.AutonomousDatabase` Details of the Autonomous Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/AutonomousDatabase/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [autonomous_database, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.UpdateAutonomousDatabaseRequest): + request = oracledatabase.UpdateAutonomousDatabaseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if autonomous_database is not None: + request.autonomous_database = autonomous_database + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_autonomous_database + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("autonomous_database.name", request.autonomous_database.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gco_autonomous_database.AutonomousDatabase, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + def delete_autonomous_database( self, request: Optional[ @@ -4227,6 +4785,4054 @@ def sample_restart_autonomous_database(): # Done; return the response. return response + def switchover_autonomous_database( + self, + request: Optional[ + Union[oracledatabase.SwitchoverAutonomousDatabaseRequest, dict] + ] = None, + *, + name: Optional[str] = None, + peer_autonomous_database: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Initiates a switchover of specified autonomous + database to the associated peer database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_switchover_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.SwitchoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Make the request + operation = client.switchover_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.SwitchoverAutonomousDatabaseRequest, dict]): + The request object. The request for + ``OracleDatabase.SwitchoverAutonomousDatabase``. + name (str): + Required. The name of the Autonomous Database in the + following format: + projects/{project}/locations/{location}/autonomousDatabases/{autonomous_database}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + peer_autonomous_database (str): + Required. The peer database name to + switch over to. + + This corresponds to the ``peer_autonomous_database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.AutonomousDatabase` Details of the Autonomous Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/AutonomousDatabase/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, peer_autonomous_database] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.SwitchoverAutonomousDatabaseRequest): + request = oracledatabase.SwitchoverAutonomousDatabaseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if peer_autonomous_database is not None: + request.peer_autonomous_database = peer_autonomous_database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.switchover_autonomous_database + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + autonomous_database.AutonomousDatabase, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def failover_autonomous_database( + self, + request: Optional[ + Union[oracledatabase.FailoverAutonomousDatabaseRequest, dict] + ] = None, + *, + name: Optional[str] = None, + peer_autonomous_database: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Initiates a failover to target autonomous database + from the associated primary database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_failover_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.FailoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Make the request + operation = client.failover_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.FailoverAutonomousDatabaseRequest, dict]): + The request object. The request for + ``OracleDatabase.FailoverAutonomousDatabase``. + name (str): + Required. The name of the Autonomous Database in the + following format: + projects/{project}/locations/{location}/autonomousDatabases/{autonomous_database}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + peer_autonomous_database (str): + Required. The peer database name to + fail over to. + + This corresponds to the ``peer_autonomous_database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.AutonomousDatabase` Details of the Autonomous Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/AutonomousDatabase/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, peer_autonomous_database] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.FailoverAutonomousDatabaseRequest): + request = oracledatabase.FailoverAutonomousDatabaseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if peer_autonomous_database is not None: + request.peer_autonomous_database = peer_autonomous_database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.failover_autonomous_database + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + autonomous_database.AutonomousDatabase, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def list_odb_networks( + self, + request: Optional[Union[odb_network.ListOdbNetworksRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListOdbNetworksPager: + r"""Lists the ODB Networks in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_odb_networks(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListOdbNetworksRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_odb_networks(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListOdbNetworksRequest, dict]): + The request object. The request for ``OdbNetwork.List``. + parent (str): + Required. The parent value for the + ODB Network in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListOdbNetworksPager: + The response for OdbNetwork.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_network.ListOdbNetworksRequest): + request = odb_network.ListOdbNetworksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_odb_networks] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListOdbNetworksPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_odb_network( + self, + request: Optional[Union[odb_network.GetOdbNetworkRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> odb_network.OdbNetwork: + r"""Gets details of a single ODB Network. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_get_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetOdbNetworkRequest( + name="name_value", + ) + + # Make the request + response = client.get_odb_network(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.GetOdbNetworkRequest, dict]): + The request object. The request for ``OdbNetwork.Get``. + name (str): + Required. The name of the OdbNetwork in the following + format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.OdbNetwork: + Represents OdbNetwork resource. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_network.GetOdbNetworkRequest): + request = odb_network.GetOdbNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_odb_network] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_odb_network( + self, + request: Optional[Union[gco_odb_network.CreateOdbNetworkRequest, dict]] = None, + *, + parent: Optional[str] = None, + odb_network: Optional[gco_odb_network.OdbNetwork] = None, + odb_network_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a new ODB Network in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_create_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + odb_network = oracledatabase_v1.OdbNetwork() + odb_network.network = "network_value" + + request = oracledatabase_v1.CreateOdbNetworkRequest( + parent="parent_value", + odb_network_id="odb_network_id_value", + odb_network=odb_network, + ) + + # Make the request + operation = client.create_odb_network(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.CreateOdbNetworkRequest, dict]): + The request object. The request for ``OdbNetwork.Create``. + parent (str): + Required. The parent value for the + OdbNetwork in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + odb_network (google.cloud.oracledatabase_v1.types.OdbNetwork): + Required. Details of the OdbNetwork + instance to create. + + This corresponds to the ``odb_network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + odb_network_id (str): + Required. The ID of the OdbNetwork to create. This value + is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``odb_network_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.oracledatabase_v1.types.OdbNetwork` + Represents OdbNetwork resource. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, odb_network, odb_network_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gco_odb_network.CreateOdbNetworkRequest): + request = gco_odb_network.CreateOdbNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if odb_network is not None: + request.odb_network = odb_network + if odb_network_id is not None: + request.odb_network_id = odb_network_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_odb_network] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gco_odb_network.OdbNetwork, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def delete_odb_network( + self, + request: Optional[Union[odb_network.DeleteOdbNetworkRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Deletes a single ODB Network. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_delete_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteOdbNetworkRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_odb_network(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.DeleteOdbNetworkRequest, dict]): + The request object. The request for ``OdbNetwork.Delete``. + name (str): + Required. The name of the resource in the following + format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_network.DeleteOdbNetworkRequest): + request = odb_network.DeleteOdbNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_odb_network] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def list_odb_subnets( + self, + request: Optional[Union[odb_subnet.ListOdbSubnetsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListOdbSubnetsPager: + r"""Lists all the ODB Subnets in a given ODB Network. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_odb_subnets(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListOdbSubnetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_odb_subnets(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListOdbSubnetsRequest, dict]): + The request object. The request for ``OdbSubnet.List``. + parent (str): + Required. The parent value for the OdbSubnet in the + following format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListOdbSubnetsPager: + The response for OdbSubnet.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_subnet.ListOdbSubnetsRequest): + request = odb_subnet.ListOdbSubnetsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_odb_subnets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListOdbSubnetsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_odb_subnet( + self, + request: Optional[Union[odb_subnet.GetOdbSubnetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> odb_subnet.OdbSubnet: + r"""Gets details of a single ODB Subnet. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_get_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetOdbSubnetRequest( + name="name_value", + ) + + # Make the request + response = client.get_odb_subnet(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.GetOdbSubnetRequest, dict]): + The request object. The request for ``OdbSubnet.Get``. + name (str): + Required. The name of the OdbSubnet in the following + format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.OdbSubnet: + Represents OdbSubnet resource. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_subnet.GetOdbSubnetRequest): + request = odb_subnet.GetOdbSubnetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_odb_subnet] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_odb_subnet( + self, + request: Optional[Union[gco_odb_subnet.CreateOdbSubnetRequest, dict]] = None, + *, + parent: Optional[str] = None, + odb_subnet: Optional[gco_odb_subnet.OdbSubnet] = None, + odb_subnet_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a new ODB Subnet in a given ODB Network. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_create_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + odb_subnet = oracledatabase_v1.OdbSubnet() + odb_subnet.cidr_range = "cidr_range_value" + odb_subnet.purpose = "BACKUP_SUBNET" + + request = oracledatabase_v1.CreateOdbSubnetRequest( + parent="parent_value", + odb_subnet_id="odb_subnet_id_value", + odb_subnet=odb_subnet, + ) + + # Make the request + operation = client.create_odb_subnet(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.CreateOdbSubnetRequest, dict]): + The request object. The request for ``OdbSubnet.Create``. + parent (str): + Required. The parent value for the OdbSubnet in the + following format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + odb_subnet (google.cloud.oracledatabase_v1.types.OdbSubnet): + Required. Details of the OdbSubnet + instance to create. + + This corresponds to the ``odb_subnet`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + odb_subnet_id (str): + Required. The ID of the OdbSubnet to create. This value + is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``odb_subnet_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.oracledatabase_v1.types.OdbSubnet` + Represents OdbSubnet resource. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, odb_subnet, odb_subnet_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gco_odb_subnet.CreateOdbSubnetRequest): + request = gco_odb_subnet.CreateOdbSubnetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if odb_subnet is not None: + request.odb_subnet = odb_subnet + if odb_subnet_id is not None: + request.odb_subnet_id = odb_subnet_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_odb_subnet] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gco_odb_subnet.OdbSubnet, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def delete_odb_subnet( + self, + request: Optional[Union[odb_subnet.DeleteOdbSubnetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Deletes a single ODB Subnet. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_delete_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteOdbSubnetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_odb_subnet(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.DeleteOdbSubnetRequest, dict]): + The request object. The request for ``OdbSubnet.Delete``. + name (str): + Required. The name of the resource in the following + format: + projects/{project}/locations/{region}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, odb_subnet.DeleteOdbSubnetRequest): + request = odb_subnet.DeleteOdbSubnetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_odb_subnet] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def list_exadb_vm_clusters( + self, + request: Optional[ + Union[oracledatabase.ListExadbVmClustersRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListExadbVmClustersPager: + r"""Lists all the Exadb (Exascale) VM Clusters for the + given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_exadb_vm_clusters(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListExadbVmClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_exadb_vm_clusters(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListExadbVmClustersRequest, dict]): + The request object. The request for ``ExadbVmCluster.List``. + parent (str): + Required. The parent value for + ExadbVmClusters in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListExadbVmClustersPager: + The response for ExadbVmCluster.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.ListExadbVmClustersRequest): + request = oracledatabase.ListExadbVmClustersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_exadb_vm_clusters] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListExadbVmClustersPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_exadb_vm_cluster( + self, + request: Optional[Union[oracledatabase.GetExadbVmClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> exadb_vm_cluster.ExadbVmCluster: + r"""Gets details of a single Exadb (Exascale) VM Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_get_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetExadbVmClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_exadb_vm_cluster(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.GetExadbVmClusterRequest, dict]): + The request object. The request for ``ExadbVmCluster.Get``. + name (str): + Required. The name of the ExadbVmCluster in the + following format: + projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.ExadbVmCluster: + ExadbVmCluster represents a cluster + of VMs that are used to run Exadata + workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.GetExadbVmClusterRequest): + request = oracledatabase.GetExadbVmClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_exadb_vm_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_exadb_vm_cluster( + self, + request: Optional[ + Union[oracledatabase.CreateExadbVmClusterRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + exadb_vm_cluster: Optional[gco_exadb_vm_cluster.ExadbVmCluster] = None, + exadb_vm_cluster_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a new Exadb (Exascale) VM Cluster resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_create_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + exadb_vm_cluster = oracledatabase_v1.ExadbVmCluster() + exadb_vm_cluster.properties.grid_image_id = "grid_image_id_value" + exadb_vm_cluster.properties.node_count = 1070 + exadb_vm_cluster.properties.enabled_ecpu_count_per_node = 2826 + exadb_vm_cluster.properties.vm_file_system_storage.size_in_gbs_per_node = 2103 + exadb_vm_cluster.properties.exascale_db_storage_vault = "exascale_db_storage_vault_value" + exadb_vm_cluster.properties.hostname_prefix = "hostname_prefix_value" + exadb_vm_cluster.properties.ssh_public_keys = ['ssh_public_keys_value1', 'ssh_public_keys_value2'] + exadb_vm_cluster.properties.shape_attribute = "BLOCK_STORAGE" + exadb_vm_cluster.odb_subnet = "odb_subnet_value" + exadb_vm_cluster.backup_odb_subnet = "backup_odb_subnet_value" + exadb_vm_cluster.display_name = "display_name_value" + + request = oracledatabase_v1.CreateExadbVmClusterRequest( + parent="parent_value", + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + exadb_vm_cluster=exadb_vm_cluster, + ) + + # Make the request + operation = client.create_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.CreateExadbVmClusterRequest, dict]): + The request object. The request for ``ExadbVmCluster.Create``. + parent (str): + Required. The value for parent of the + ExadbVmCluster in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exadb_vm_cluster (google.cloud.oracledatabase_v1.types.ExadbVmCluster): + Required. The resource being created. + This corresponds to the ``exadb_vm_cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exadb_vm_cluster_id (str): + Required. The ID of the ExadbVmCluster to create. This + value is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``exadb_vm_cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.ExadbVmCluster` ExadbVmCluster represents a cluster of VMs that are used to run Exadata + workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, exadb_vm_cluster, exadb_vm_cluster_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.CreateExadbVmClusterRequest): + request = oracledatabase.CreateExadbVmClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if exadb_vm_cluster is not None: + request.exadb_vm_cluster = exadb_vm_cluster + if exadb_vm_cluster_id is not None: + request.exadb_vm_cluster_id = exadb_vm_cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_exadb_vm_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gco_exadb_vm_cluster.ExadbVmCluster, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def delete_exadb_vm_cluster( + self, + request: Optional[ + Union[oracledatabase.DeleteExadbVmClusterRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Deletes a single Exadb (Exascale) VM Cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_delete_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteExadbVmClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.DeleteExadbVmClusterRequest, dict]): + The request object. The request for ``ExadbVmCluster.Delete``. + name (str): + Required. The name of the ExadbVmCluster in the + following format: + projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.DeleteExadbVmClusterRequest): + request = oracledatabase.DeleteExadbVmClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_exadb_vm_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def update_exadb_vm_cluster( + self, + request: Optional[ + Union[oracledatabase.UpdateExadbVmClusterRequest, dict] + ] = None, + *, + exadb_vm_cluster: Optional[gco_exadb_vm_cluster.ExadbVmCluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Updates a single Exadb (Exascale) VM Cluster. To add + virtual machines to existing exadb vm cluster, only pass + the node count. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_update_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + exadb_vm_cluster = oracledatabase_v1.ExadbVmCluster() + exadb_vm_cluster.properties.grid_image_id = "grid_image_id_value" + exadb_vm_cluster.properties.node_count = 1070 + exadb_vm_cluster.properties.enabled_ecpu_count_per_node = 2826 + exadb_vm_cluster.properties.vm_file_system_storage.size_in_gbs_per_node = 2103 + exadb_vm_cluster.properties.exascale_db_storage_vault = "exascale_db_storage_vault_value" + exadb_vm_cluster.properties.hostname_prefix = "hostname_prefix_value" + exadb_vm_cluster.properties.ssh_public_keys = ['ssh_public_keys_value1', 'ssh_public_keys_value2'] + exadb_vm_cluster.properties.shape_attribute = "BLOCK_STORAGE" + exadb_vm_cluster.odb_subnet = "odb_subnet_value" + exadb_vm_cluster.backup_odb_subnet = "backup_odb_subnet_value" + exadb_vm_cluster.display_name = "display_name_value" + + request = oracledatabase_v1.UpdateExadbVmClusterRequest( + exadb_vm_cluster=exadb_vm_cluster, + ) + + # Make the request + operation = client.update_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.UpdateExadbVmClusterRequest, dict]): + The request object. The request for ``ExadbVmCluster.Update``. We only + support adding the Virtual Machine to the + ExadbVmCluster. Rest of the fields in ExadbVmCluster are + immutable. + exadb_vm_cluster (google.cloud.oracledatabase_v1.types.ExadbVmCluster): + Required. The resource being updated. + This corresponds to the ``exadb_vm_cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A mask specifying which + fields in th VM Cluster should be + updated. A field specified in the mask + is overwritten. If a mask isn't provided + then all the fields in the VM Cluster + are overwritten. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.ExadbVmCluster` ExadbVmCluster represents a cluster of VMs that are used to run Exadata + workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [exadb_vm_cluster, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, oracledatabase.UpdateExadbVmClusterRequest): + request = oracledatabase.UpdateExadbVmClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if exadb_vm_cluster is not None: + request.exadb_vm_cluster = exadb_vm_cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_exadb_vm_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("exadb_vm_cluster.name", request.exadb_vm_cluster.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gco_exadb_vm_cluster.ExadbVmCluster, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def remove_virtual_machine_exadb_vm_cluster( + self, + request: Optional[ + Union[oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, dict] + ] = None, + *, + name: Optional[str] = None, + hostnames: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Removes virtual machines from an existing exadb vm + cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_remove_virtual_machine_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.RemoveVirtualMachineExadbVmClusterRequest( + name="name_value", + hostnames=['hostnames_value1', 'hostnames_value2'], + ) + + # Make the request + operation = client.remove_virtual_machine_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.RemoveVirtualMachineExadbVmClusterRequest, dict]): + The request object. The request for ``ExadbVmCluster.RemoveVirtualMachine``. + name (str): + Required. The name of the ExadbVmCluster in the + following format: + projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hostnames (MutableSequence[str]): + Required. The list of host names of + db nodes to be removed from the + ExadbVmCluster. + + This corresponds to the ``hostnames`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.ExadbVmCluster` ExadbVmCluster represents a cluster of VMs that are used to run Exadata + workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, hostnames] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, oracledatabase.RemoveVirtualMachineExadbVmClusterRequest + ): + request = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if hostnames is not None: + request.hostnames = hostnames + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.remove_virtual_machine_exadb_vm_cluster + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + exadb_vm_cluster.ExadbVmCluster, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def list_exascale_db_storage_vaults( + self, + request: Optional[ + Union[exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListExascaleDbStorageVaultsPager: + r"""Lists all the ExascaleDB Storage Vaults for the given + project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_exascale_db_storage_vaults(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListExascaleDbStorageVaultsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_exascale_db_storage_vaults(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsRequest, dict]): + The request object. The request for ``ExascaleDbStorageVault.List``. + parent (str): + Required. The parent value for + ExascaleDbStorageVault in the following + format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListExascaleDbStorageVaultsPager: + The response for ExascaleDbStorageVault.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest + ): + request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_exascale_db_storage_vaults + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListExascaleDbStorageVaultsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_exascale_db_storage_vault( + self, + request: Optional[ + Union[exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> exascale_db_storage_vault.ExascaleDbStorageVault: + r"""Gets details of a single ExascaleDB Storage Vault. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_get_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetExascaleDbStorageVaultRequest( + name="name_value", + ) + + # Make the request + response = client.get_exascale_db_storage_vault(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.GetExascaleDbStorageVaultRequest, dict]): + The request object. The request for ``ExascaleDbStorageVault.Get``. + name (str): + Required. The name of the ExascaleDbStorageVault in the + following format: + projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault: + ExascaleDbStorageVault represents a + storage vault exadb vm cluster resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExascaleDbStorageVault/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, exascale_db_storage_vault.GetExascaleDbStorageVaultRequest + ): + request = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_exascale_db_storage_vault + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_exascale_db_storage_vault( + self, + request: Optional[ + Union[ + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, dict + ] + ] = None, + *, + parent: Optional[str] = None, + exascale_db_storage_vault: Optional[ + gco_exascale_db_storage_vault.ExascaleDbStorageVault + ] = None, + exascale_db_storage_vault_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a new ExascaleDB Storage Vault resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_create_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + exascale_db_storage_vault = oracledatabase_v1.ExascaleDbStorageVault() + exascale_db_storage_vault.display_name = "display_name_value" + exascale_db_storage_vault.properties.exascale_db_storage_details.total_size_gbs = 1497 + + request = oracledatabase_v1.CreateExascaleDbStorageVaultRequest( + parent="parent_value", + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", + exascale_db_storage_vault=exascale_db_storage_vault, + ) + + # Make the request + operation = client.create_exascale_db_storage_vault(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.CreateExascaleDbStorageVaultRequest, dict]): + The request object. The request for ``ExascaleDbStorageVault.Create``. + parent (str): + Required. The value for parent of the + ExascaleDbStorageVault in the following + format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exascale_db_storage_vault (google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault): + Required. The resource being created. + This corresponds to the ``exascale_db_storage_vault`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exascale_db_storage_vault_id (str): + Required. The ID of the ExascaleDbStorageVault to + create. This value is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``exascale_db_storage_vault_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault` ExascaleDbStorageVault represents a storage vault exadb vm cluster resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExascaleDbStorageVault/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [ + parent, + exascale_db_storage_vault, + exascale_db_storage_vault_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest + ): + request = gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if exascale_db_storage_vault is not None: + request.exascale_db_storage_vault = exascale_db_storage_vault + if exascale_db_storage_vault_id is not None: + request.exascale_db_storage_vault_id = exascale_db_storage_vault_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_exascale_db_storage_vault + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gco_exascale_db_storage_vault.ExascaleDbStorageVault, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def delete_exascale_db_storage_vault( + self, + request: Optional[ + Union[exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Deletes a single ExascaleDB Storage Vault. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_delete_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteExascaleDbStorageVaultRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_exascale_db_storage_vault(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.DeleteExascaleDbStorageVaultRequest, dict]): + The request object. The request message for + ``ExascaleDbStorageVault.Delete``. + name (str): + Required. The name of the ExascaleDbStorageVault in the + following format: + projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest + ): + request = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_exascale_db_storage_vault + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def list_db_system_initial_storage_sizes( + self, + request: Optional[ + Union[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, + dict, + ] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDbSystemInitialStorageSizesPager: + r"""Lists all the DbSystemInitialStorageSizes for the + given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_db_system_initial_storage_sizes(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbSystemInitialStorageSizesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_system_initial_storage_sizes(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesRequest, dict]): + The request object. The request for ``DbSystemInitialStorageSizes.List``. + parent (str): + Required. The parent value for the + DbSystemInitialStorageSize resource with + the format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemInitialStorageSizesPager: + The response for DbSystemInitialStorageSizes.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, + ): + request = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest( + request + ) + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_db_system_initial_storage_sizes + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDbSystemInitialStorageSizesPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_databases( + self, + request: Optional[Union[database.ListDatabasesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDatabasesPager: + r"""Lists all the Databases for the given project, + location and DbSystem. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_databases(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDatabasesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_databases(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListDatabasesRequest, dict]): + The request object. The request for ``Database.List``. + parent (str): + Required. The parent resource name in + the following format: + projects/{project}/locations/{region} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDatabasesPager: + The response for Database.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, database.ListDatabasesRequest): + request = database.ListDatabasesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_databases] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatabasesPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_database( + self, + request: Optional[Union[database.GetDatabaseRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> database.Database: + r"""Gets details of a single Database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_get_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetDatabaseRequest( + name="name_value", + ) + + # Make the request + response = client.get_database(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.GetDatabaseRequest, dict]): + The request object. The request for ``Database.Get``. + name (str): + Required. The name of the Database + resource in the following format: + projects/{project}/locations/{region}/databases/{database} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.Database: + Details of the Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/Database/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, database.GetDatabaseRequest): + request = database.GetDatabaseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_database] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_pluggable_databases( + self, + request: Optional[ + Union[pluggable_database.ListPluggableDatabasesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListPluggableDatabasesPager: + r"""Lists all the PluggableDatabases for the given + project, location and Container Database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_pluggable_databases(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListPluggableDatabasesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pluggable_databases(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListPluggableDatabasesRequest, dict]): + The request object. The request for ``PluggableDatabase.List``. + parent (str): + Required. The parent, which owns this + collection of PluggableDatabases. + Format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListPluggableDatabasesPager: + The response for PluggableDatabase.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, pluggable_database.ListPluggableDatabasesRequest): + request = pluggable_database.ListPluggableDatabasesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_pluggable_databases] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPluggableDatabasesPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_pluggable_database( + self, + request: Optional[ + Union[pluggable_database.GetPluggableDatabaseRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pluggable_database.PluggableDatabase: + r"""Gets details of a single PluggableDatabase. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_get_pluggable_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetPluggableDatabaseRequest( + name="name_value", + ) + + # Make the request + response = client.get_pluggable_database(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.GetPluggableDatabaseRequest, dict]): + The request object. The request for ``PluggableDatabase.Get``. + name (str): + Required. The name of the PluggableDatabase resource in + the following format: + projects/{project}/locations/{region}/pluggableDatabases/{pluggable_database} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.PluggableDatabase: + The PluggableDatabase resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/PluggableDatabase/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, pluggable_database.GetPluggableDatabaseRequest): + request = pluggable_database.GetPluggableDatabaseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_pluggable_database] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_db_systems( + self, + request: Optional[Union[db_system.ListDbSystemsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDbSystemsPager: + r"""Lists all the DbSystems for the given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_db_systems(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbSystemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_systems(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListDbSystemsRequest, dict]): + The request object. The request for ``DbSystem.List``. + parent (str): + Required. The parent value for + DbSystems in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemsPager: + The response for DbSystem.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, db_system.ListDbSystemsRequest): + request = db_system.ListDbSystemsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_db_systems] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDbSystemsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_db_system( + self, + request: Optional[Union[db_system.GetDbSystemRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> db_system.DbSystem: + r"""Gets details of a single DbSystem. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_get_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetDbSystemRequest( + name="name_value", + ) + + # Make the request + response = client.get_db_system(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.GetDbSystemRequest, dict]): + The request object. The request for ``DbSystem.Get``. + name (str): + Required. The name of the DbSystem in the following + format: + projects/{project}/locations/{location}/dbSystems/{db_system}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.types.DbSystem: + Details of the DbSystem (BaseDB) + resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/DbSystem/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, db_system.GetDbSystemRequest): + request = db_system.GetDbSystemRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_db_system] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_db_system( + self, + request: Optional[Union[gco_db_system.CreateDbSystemRequest, dict]] = None, + *, + parent: Optional[str] = None, + db_system: Optional[gco_db_system.DbSystem] = None, + db_system_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a new DbSystem in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_create_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + db_system = oracledatabase_v1.DbSystem() + db_system.odb_subnet = "odb_subnet_value" + db_system.display_name = "display_name_value" + + request = oracledatabase_v1.CreateDbSystemRequest( + parent="parent_value", + db_system_id="db_system_id_value", + db_system=db_system, + ) + + # Make the request + operation = client.create_db_system(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.CreateDbSystemRequest, dict]): + The request object. The request for ``DbSystem.Create``. + parent (str): + Required. The value for parent of the + DbSystem in the following format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + db_system (google.cloud.oracledatabase_v1.types.DbSystem): + Required. The resource being created. + This corresponds to the ``db_system`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + db_system_id (str): + Required. The ID of the DbSystem to create. This value + is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + + This corresponds to the ``db_system_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.oracledatabase_v1.types.DbSystem` Details of the DbSystem (BaseDB) resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/DbSystem/ + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, db_system, db_system_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gco_db_system.CreateDbSystemRequest): + request = gco_db_system.CreateDbSystemRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if db_system is not None: + request.db_system = db_system + if db_system_id is not None: + request.db_system_id = db_system_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_db_system] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gco_db_system.DbSystem, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def delete_db_system( + self, + request: Optional[Union[db_system.DeleteDbSystemRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Deletes a single DbSystem. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_delete_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteDbSystemRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_db_system(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.DeleteDbSystemRequest, dict]): + The request object. The request for ``DbSystem.Delete``. + name (str): + Required. The name of the DbSystem in the following + format: + projects/{project}/locations/{location}/dbSystems/{db_system}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, db_system.DeleteDbSystemRequest): + request = db_system.DeleteDbSystemRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_db_system] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=oracledatabase.OperationMetadata, + ) + + # Done; return the response. + return response + + def list_db_versions( + self, + request: Optional[Union[db_version.ListDbVersionsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDbVersionsPager: + r"""List DbVersions for the given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_db_versions(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbVersionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListDbVersionsRequest, dict]): + The request object. The request for ``DbVersions.List``. + parent (str): + Required. The parent value for the + DbVersion resource with the format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbVersionsPager: + The response for DbVersions.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, db_version.ListDbVersionsRequest): + request = db_version.ListDbVersionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_db_versions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDbVersionsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_database_character_sets( + self, + request: Optional[ + Union[database_character_set.ListDatabaseCharacterSetsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListDatabaseCharacterSetsPager: + r"""List DatabaseCharacterSets for the given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import oracledatabase_v1 + + def sample_list_database_character_sets(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDatabaseCharacterSetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_database_character_sets(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsRequest, dict]): + The request object. The request for ``DatabaseCharacterSet.List``. + parent (str): + Required. The parent value for + DatabaseCharacterSets in the following + format: + projects/{project}/locations/{location}. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDatabaseCharacterSetsPager: + The response for DatabaseCharacterSet.List. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, database_character_set.ListDatabaseCharacterSetsRequest + ): + request = database_character_set.ListDatabaseCharacterSetsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_database_character_sets + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatabaseCharacterSetsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "OracleDatabaseClient": return self diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/pagers.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/pagers.py index 3f983ec983ca..51c55b2109a0 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/pagers.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/pagers.py @@ -43,13 +43,24 @@ autonomous_database_character_set, autonomous_db_backup, autonomous_db_version, + database, + database_character_set, db_node, db_server, + db_system, + db_system_initial_storage_size, db_system_shape, + db_version, entitlement, exadata_infra, + exadb_vm_cluster, + exascale_db_storage_vault, gi_version, + minor_version, + odb_network, + odb_subnet, oracledatabase, + pluggable_database, vm_cluster, ) @@ -994,6 +1005,162 @@ def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) +class ListMinorVersionsPager: + """A pager for iterating through ``list_minor_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListMinorVersionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``minor_versions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMinorVersions`` requests and continue to iterate + through the ``minor_versions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListMinorVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., minor_version.ListMinorVersionsResponse], + request: minor_version.ListMinorVersionsRequest, + response: minor_version.ListMinorVersionsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListMinorVersionsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListMinorVersionsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = minor_version.ListMinorVersionsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[minor_version.ListMinorVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[minor_version.MinorVersion]: + for page in self.pages: + yield from page.minor_versions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMinorVersionsAsyncPager: + """A pager for iterating through ``list_minor_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListMinorVersionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``minor_versions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMinorVersions`` requests and continue to iterate + through the ``minor_versions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListMinorVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[minor_version.ListMinorVersionsResponse]], + request: minor_version.ListMinorVersionsRequest, + response: minor_version.ListMinorVersionsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListMinorVersionsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListMinorVersionsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = minor_version.ListMinorVersionsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[minor_version.ListMinorVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[minor_version.MinorVersion]: + async def async_generator(): + async for page in self.pages: + for response in page.minor_versions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + class ListDbSystemShapesPager: """A pager for iterating through ``list_db_system_shapes`` requests. @@ -1802,3 +1969,1615 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListOdbNetworksPager: + """A pager for iterating through ``list_odb_networks`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListOdbNetworksResponse` object, and + provides an ``__iter__`` method to iterate through its + ``odb_networks`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListOdbNetworks`` requests and continue to iterate + through the ``odb_networks`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListOdbNetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., odb_network.ListOdbNetworksResponse], + request: odb_network.ListOdbNetworksRequest, + response: odb_network.ListOdbNetworksResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListOdbNetworksRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListOdbNetworksResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = odb_network.ListOdbNetworksRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[odb_network.ListOdbNetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[odb_network.OdbNetwork]: + for page in self.pages: + yield from page.odb_networks + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListOdbNetworksAsyncPager: + """A pager for iterating through ``list_odb_networks`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListOdbNetworksResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``odb_networks`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListOdbNetworks`` requests and continue to iterate + through the ``odb_networks`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListOdbNetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[odb_network.ListOdbNetworksResponse]], + request: odb_network.ListOdbNetworksRequest, + response: odb_network.ListOdbNetworksResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListOdbNetworksRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListOdbNetworksResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = odb_network.ListOdbNetworksRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[odb_network.ListOdbNetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[odb_network.OdbNetwork]: + async def async_generator(): + async for page in self.pages: + for response in page.odb_networks: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListOdbSubnetsPager: + """A pager for iterating through ``list_odb_subnets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListOdbSubnetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``odb_subnets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListOdbSubnets`` requests and continue to iterate + through the ``odb_subnets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListOdbSubnetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., odb_subnet.ListOdbSubnetsResponse], + request: odb_subnet.ListOdbSubnetsRequest, + response: odb_subnet.ListOdbSubnetsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListOdbSubnetsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListOdbSubnetsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = odb_subnet.ListOdbSubnetsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[odb_subnet.ListOdbSubnetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[odb_subnet.OdbSubnet]: + for page in self.pages: + yield from page.odb_subnets + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListOdbSubnetsAsyncPager: + """A pager for iterating through ``list_odb_subnets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListOdbSubnetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``odb_subnets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListOdbSubnets`` requests and continue to iterate + through the ``odb_subnets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListOdbSubnetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[odb_subnet.ListOdbSubnetsResponse]], + request: odb_subnet.ListOdbSubnetsRequest, + response: odb_subnet.ListOdbSubnetsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListOdbSubnetsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListOdbSubnetsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = odb_subnet.ListOdbSubnetsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[odb_subnet.ListOdbSubnetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[odb_subnet.OdbSubnet]: + async def async_generator(): + async for page in self.pages: + for response in page.odb_subnets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExadbVmClustersPager: + """A pager for iterating through ``list_exadb_vm_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListExadbVmClustersResponse` object, and + provides an ``__iter__`` method to iterate through its + ``exadb_vm_clusters`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListExadbVmClusters`` requests and continue to iterate + through the ``exadb_vm_clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListExadbVmClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., oracledatabase.ListExadbVmClustersResponse], + request: oracledatabase.ListExadbVmClustersRequest, + response: oracledatabase.ListExadbVmClustersResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListExadbVmClustersRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListExadbVmClustersResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = oracledatabase.ListExadbVmClustersRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[oracledatabase.ListExadbVmClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[exadb_vm_cluster.ExadbVmCluster]: + for page in self.pages: + yield from page.exadb_vm_clusters + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExadbVmClustersAsyncPager: + """A pager for iterating through ``list_exadb_vm_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListExadbVmClustersResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``exadb_vm_clusters`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListExadbVmClusters`` requests and continue to iterate + through the ``exadb_vm_clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListExadbVmClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[oracledatabase.ListExadbVmClustersResponse]], + request: oracledatabase.ListExadbVmClustersRequest, + response: oracledatabase.ListExadbVmClustersResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListExadbVmClustersRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListExadbVmClustersResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = oracledatabase.ListExadbVmClustersRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[oracledatabase.ListExadbVmClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[exadb_vm_cluster.ExadbVmCluster]: + async def async_generator(): + async for page in self.pages: + for response in page.exadb_vm_clusters: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExascaleDbStorageVaultsPager: + """A pager for iterating through ``list_exascale_db_storage_vaults`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``exascale_db_storage_vaults`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListExascaleDbStorageVaults`` requests and continue to iterate + through the ``exascale_db_storage_vaults`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse + ], + request: exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, + response: exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest( + request + ) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterator[exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[exascale_db_storage_vault.ExascaleDbStorageVault]: + for page in self.pages: + yield from page.exascale_db_storage_vaults + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExascaleDbStorageVaultsAsyncPager: + """A pager for iterating through ``list_exascale_db_storage_vaults`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``exascale_db_storage_vaults`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListExascaleDbStorageVaults`` requests and continue to iterate + through the ``exascale_db_storage_vaults`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., + Awaitable[exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse], + ], + request: exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, + response: exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest( + request + ) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__( + self, + ) -> AsyncIterator[exascale_db_storage_vault.ExascaleDbStorageVault]: + async def async_generator(): + async for page in self.pages: + for response in page.exascale_db_storage_vaults: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDbSystemInitialStorageSizesPager: + """A pager for iterating through ``list_db_system_initial_storage_sizes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``db_system_initial_storage_sizes`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDbSystemInitialStorageSizes`` requests and continue to iterate + through the ``db_system_initial_storage_sizes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse + ], + request: db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, + response: db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest( + request + ) + ) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterator[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse + ]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__( + self, + ) -> Iterator[db_system_initial_storage_size.DbSystemInitialStorageSize]: + for page in self.pages: + yield from page.db_system_initial_storage_sizes + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDbSystemInitialStorageSizesAsyncPager: + """A pager for iterating through ``list_db_system_initial_storage_sizes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``db_system_initial_storage_sizes`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDbSystemInitialStorageSizes`` requests and continue to iterate + through the ``db_system_initial_storage_sizes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., + Awaitable[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse + ], + ], + request: db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, + response: db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest( + request + ) + ) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse + ]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__( + self, + ) -> AsyncIterator[db_system_initial_storage_size.DbSystemInitialStorageSize]: + async def async_generator(): + async for page in self.pages: + for response in page.db_system_initial_storage_sizes: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDatabasesPager: + """A pager for iterating through ``list_databases`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDatabasesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``databases`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatabases`` requests and continue to iterate + through the ``databases`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDatabasesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., database.ListDatabasesResponse], + request: database.ListDatabasesRequest, + response: database.ListDatabasesResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDatabasesRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDatabasesResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = database.ListDatabasesRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[database.ListDatabasesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[database.Database]: + for page in self.pages: + yield from page.databases + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDatabasesAsyncPager: + """A pager for iterating through ``list_databases`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDatabasesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``databases`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatabases`` requests and continue to iterate + through the ``databases`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDatabasesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[database.ListDatabasesResponse]], + request: database.ListDatabasesRequest, + response: database.ListDatabasesResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDatabasesRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDatabasesResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = database.ListDatabasesRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[database.ListDatabasesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[database.Database]: + async def async_generator(): + async for page in self.pages: + for response in page.databases: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPluggableDatabasesPager: + """A pager for iterating through ``list_pluggable_databases`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListPluggableDatabasesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``pluggable_databases`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPluggableDatabases`` requests and continue to iterate + through the ``pluggable_databases`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListPluggableDatabasesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., pluggable_database.ListPluggableDatabasesResponse], + request: pluggable_database.ListPluggableDatabasesRequest, + response: pluggable_database.ListPluggableDatabasesResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListPluggableDatabasesRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListPluggableDatabasesResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = pluggable_database.ListPluggableDatabasesRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[pluggable_database.ListPluggableDatabasesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[pluggable_database.PluggableDatabase]: + for page in self.pages: + yield from page.pluggable_databases + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPluggableDatabasesAsyncPager: + """A pager for iterating through ``list_pluggable_databases`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListPluggableDatabasesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``pluggable_databases`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPluggableDatabases`` requests and continue to iterate + through the ``pluggable_databases`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListPluggableDatabasesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[pluggable_database.ListPluggableDatabasesResponse] + ], + request: pluggable_database.ListPluggableDatabasesRequest, + response: pluggable_database.ListPluggableDatabasesResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListPluggableDatabasesRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListPluggableDatabasesResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = pluggable_database.ListPluggableDatabasesRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[pluggable_database.ListPluggableDatabasesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[pluggable_database.PluggableDatabase]: + async def async_generator(): + async for page in self.pages: + for response in page.pluggable_databases: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDbSystemsPager: + """A pager for iterating through ``list_db_systems`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDbSystemsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``db_systems`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDbSystems`` requests and continue to iterate + through the ``db_systems`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDbSystemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., db_system.ListDbSystemsResponse], + request: db_system.ListDbSystemsRequest, + response: db_system.ListDbSystemsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDbSystemsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDbSystemsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = db_system.ListDbSystemsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[db_system.ListDbSystemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[db_system.DbSystem]: + for page in self.pages: + yield from page.db_systems + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDbSystemsAsyncPager: + """A pager for iterating through ``list_db_systems`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDbSystemsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``db_systems`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDbSystems`` requests and continue to iterate + through the ``db_systems`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDbSystemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[db_system.ListDbSystemsResponse]], + request: db_system.ListDbSystemsRequest, + response: db_system.ListDbSystemsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDbSystemsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDbSystemsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = db_system.ListDbSystemsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[db_system.ListDbSystemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[db_system.DbSystem]: + async def async_generator(): + async for page in self.pages: + for response in page.db_systems: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDbVersionsPager: + """A pager for iterating through ``list_db_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDbVersionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``db_versions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDbVersions`` requests and continue to iterate + through the ``db_versions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDbVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., db_version.ListDbVersionsResponse], + request: db_version.ListDbVersionsRequest, + response: db_version.ListDbVersionsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDbVersionsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDbVersionsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = db_version.ListDbVersionsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[db_version.ListDbVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[db_version.DbVersion]: + for page in self.pages: + yield from page.db_versions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDbVersionsAsyncPager: + """A pager for iterating through ``list_db_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDbVersionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``db_versions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDbVersions`` requests and continue to iterate + through the ``db_versions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDbVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[db_version.ListDbVersionsResponse]], + request: db_version.ListDbVersionsRequest, + response: db_version.ListDbVersionsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDbVersionsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDbVersionsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = db_version.ListDbVersionsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[db_version.ListDbVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[db_version.DbVersion]: + async def async_generator(): + async for page in self.pages: + for response in page.db_versions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDatabaseCharacterSetsPager: + """A pager for iterating through ``list_database_character_sets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``database_character_sets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatabaseCharacterSets`` requests and continue to iterate + through the ``database_character_sets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., database_character_set.ListDatabaseCharacterSetsResponse], + request: database_character_set.ListDatabaseCharacterSetsRequest, + response: database_character_set.ListDatabaseCharacterSetsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = database_character_set.ListDatabaseCharacterSetsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterator[database_character_set.ListDatabaseCharacterSetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[database_character_set.DatabaseCharacterSet]: + for page in self.pages: + yield from page.database_character_sets + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDatabaseCharacterSetsAsyncPager: + """A pager for iterating through ``list_database_character_sets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``database_character_sets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatabaseCharacterSets`` requests and continue to iterate + through the ``database_character_sets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[database_character_set.ListDatabaseCharacterSetsResponse] + ], + request: database_character_set.ListDatabaseCharacterSetsRequest, + response: database_character_set.ListDatabaseCharacterSetsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsRequest): + The initial request object. + response (google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = database_character_set.ListDatabaseCharacterSetsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[database_character_set.ListDatabaseCharacterSetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[database_character_set.DatabaseCharacterSet]: + async def async_generator(): + async for page in self.pages: + for response in page.database_character_sets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/base.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/base.py index 855e849bbeb5..cde5176115c4 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/base.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/base.py @@ -29,11 +29,32 @@ from google.cloud.oracledatabase_v1 import gapic_version as package_version from google.cloud.oracledatabase_v1.types import ( - autonomous_database, + db_system_initial_storage_size, + db_version, exadata_infra, + exadb_vm_cluster, +) +from google.cloud.oracledatabase_v1.types import ( + autonomous_database, + database, + database_character_set, +) +from google.cloud.oracledatabase_v1.types import ( oracledatabase, + pluggable_database, vm_cluster, ) +from google.cloud.oracledatabase_v1.types import ( + exascale_db_storage_vault as gco_exascale_db_storage_vault, +) +from google.cloud.oracledatabase_v1.types import db_system +from google.cloud.oracledatabase_v1.types import db_system as gco_db_system +from google.cloud.oracledatabase_v1.types import exascale_db_storage_vault +from google.cloud.oracledatabase_v1.types import minor_version +from google.cloud.oracledatabase_v1.types import odb_network +from google.cloud.oracledatabase_v1.types import odb_network as gco_odb_network +from google.cloud.oracledatabase_v1.types import odb_subnet +from google.cloud.oracledatabase_v1.types import odb_subnet as gco_odb_subnet DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -148,8 +169,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -165,8 +184,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -192,8 +209,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -209,8 +224,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -236,8 +249,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -253,8 +264,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -270,8 +279,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -287,8 +294,21 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_minor_versions: gapic_v1.method.wrap_method( + self.list_minor_versions, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -304,8 +324,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -321,8 +339,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -338,8 +354,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -352,6 +366,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.update_autonomous_database: gapic_v1.method.wrap_method( + self.update_autonomous_database, + default_timeout=None, + client_info=client_info, + ), self.delete_autonomous_database: gapic_v1.method.wrap_method( self.delete_autonomous_database, default_timeout=None, @@ -375,8 +394,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -392,8 +409,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -409,8 +424,6 @@ def _prep_wrapped_messages(self, client_info): multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -433,204 +446,541 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), - self.get_location: gapic_v1.method.wrap_method( - self.get_location, + self.switchover_autonomous_database: gapic_v1.method.wrap_method( + self.switchover_autonomous_database, default_timeout=None, client_info=client_info, ), - self.list_locations: gapic_v1.method.wrap_method( - self.list_locations, + self.failover_autonomous_database: gapic_v1.method.wrap_method( + self.failover_autonomous_database, default_timeout=None, client_info=client_info, ), - self.cancel_operation: gapic_v1.method.wrap_method( - self.cancel_operation, + self.list_odb_networks: gapic_v1.method.wrap_method( + self.list_odb_networks, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_odb_network: gapic_v1.method.wrap_method( + self.get_odb_network, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_odb_network: gapic_v1.method.wrap_method( + self.create_odb_network, default_timeout=None, client_info=client_info, ), - self.delete_operation: gapic_v1.method.wrap_method( - self.delete_operation, + self.delete_odb_network: gapic_v1.method.wrap_method( + self.delete_odb_network, default_timeout=None, client_info=client_info, ), - self.get_operation: gapic_v1.method.wrap_method( - self.get_operation, + self.list_odb_subnets: gapic_v1.method.wrap_method( + self.list_odb_subnets, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_odb_subnet: gapic_v1.method.wrap_method( + self.get_odb_subnet, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_odb_subnet: gapic_v1.method.wrap_method( + self.create_odb_subnet, default_timeout=None, client_info=client_info, ), - self.list_operations: gapic_v1.method.wrap_method( - self.list_operations, + self.delete_odb_subnet: gapic_v1.method.wrap_method( + self.delete_odb_subnet, default_timeout=None, client_info=client_info, ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def list_cloud_exadata_infrastructures( - self, - ) -> Callable[ - [oracledatabase.ListCloudExadataInfrastructuresRequest], - Union[ - oracledatabase.ListCloudExadataInfrastructuresResponse, - Awaitable[oracledatabase.ListCloudExadataInfrastructuresResponse], - ], - ]: - raise NotImplementedError() - - @property - def get_cloud_exadata_infrastructure( - self, - ) -> Callable[ - [oracledatabase.GetCloudExadataInfrastructureRequest], - Union[ - exadata_infra.CloudExadataInfrastructure, - Awaitable[exadata_infra.CloudExadataInfrastructure], - ], - ]: - raise NotImplementedError() - - @property - def create_cloud_exadata_infrastructure( - self, - ) -> Callable[ - [oracledatabase.CreateCloudExadataInfrastructureRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def delete_cloud_exadata_infrastructure( - self, - ) -> Callable[ - [oracledatabase.DeleteCloudExadataInfrastructureRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def list_cloud_vm_clusters( - self, - ) -> Callable[ - [oracledatabase.ListCloudVmClustersRequest], - Union[ - oracledatabase.ListCloudVmClustersResponse, - Awaitable[oracledatabase.ListCloudVmClustersResponse], - ], - ]: - raise NotImplementedError() - - @property - def get_cloud_vm_cluster( - self, - ) -> Callable[ - [oracledatabase.GetCloudVmClusterRequest], - Union[vm_cluster.CloudVmCluster, Awaitable[vm_cluster.CloudVmCluster]], - ]: - raise NotImplementedError() - - @property - def create_cloud_vm_cluster( - self, - ) -> Callable[ - [oracledatabase.CreateCloudVmClusterRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def delete_cloud_vm_cluster( - self, - ) -> Callable[ - [oracledatabase.DeleteCloudVmClusterRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def list_entitlements( - self, - ) -> Callable[ - [oracledatabase.ListEntitlementsRequest], - Union[ - oracledatabase.ListEntitlementsResponse, - Awaitable[oracledatabase.ListEntitlementsResponse], - ], - ]: - raise NotImplementedError() - - @property - def list_db_servers( - self, - ) -> Callable[ - [oracledatabase.ListDbServersRequest], - Union[ - oracledatabase.ListDbServersResponse, - Awaitable[oracledatabase.ListDbServersResponse], - ], - ]: - raise NotImplementedError() - - @property - def list_db_nodes( - self, - ) -> Callable[ - [oracledatabase.ListDbNodesRequest], - Union[ - oracledatabase.ListDbNodesResponse, - Awaitable[oracledatabase.ListDbNodesResponse], - ], - ]: - raise NotImplementedError() - - @property - def list_gi_versions( - self, - ) -> Callable[ - [oracledatabase.ListGiVersionsRequest], - Union[ - oracledatabase.ListGiVersionsResponse, - Awaitable[oracledatabase.ListGiVersionsResponse], - ], - ]: - raise NotImplementedError() - - @property - def list_db_system_shapes( - self, - ) -> Callable[ - [oracledatabase.ListDbSystemShapesRequest], - Union[ - oracledatabase.ListDbSystemShapesResponse, - Awaitable[oracledatabase.ListDbSystemShapesResponse], - ], - ]: - raise NotImplementedError() - - @property - def list_autonomous_databases( - self, - ) -> Callable[ - [oracledatabase.ListAutonomousDatabasesRequest], - Union[ - oracledatabase.ListAutonomousDatabasesResponse, - Awaitable[oracledatabase.ListAutonomousDatabasesResponse], - ], - ]: - raise NotImplementedError() + self.list_exadb_vm_clusters: gapic_v1.method.wrap_method( + self.list_exadb_vm_clusters, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_exadb_vm_cluster: gapic_v1.method.wrap_method( + self.get_exadb_vm_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_exadb_vm_cluster: gapic_v1.method.wrap_method( + self.create_exadb_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.delete_exadb_vm_cluster: gapic_v1.method.wrap_method( + self.delete_exadb_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.update_exadb_vm_cluster: gapic_v1.method.wrap_method( + self.update_exadb_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.remove_virtual_machine_exadb_vm_cluster: gapic_v1.method.wrap_method( + self.remove_virtual_machine_exadb_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.list_exascale_db_storage_vaults: gapic_v1.method.wrap_method( + self.list_exascale_db_storage_vaults, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_exascale_db_storage_vault: gapic_v1.method.wrap_method( + self.get_exascale_db_storage_vault, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_exascale_db_storage_vault: gapic_v1.method.wrap_method( + self.create_exascale_db_storage_vault, + default_timeout=None, + client_info=client_info, + ), + self.delete_exascale_db_storage_vault: gapic_v1.method.wrap_method( + self.delete_exascale_db_storage_vault, + default_timeout=None, + client_info=client_info, + ), + self.list_db_system_initial_storage_sizes: gapic_v1.method.wrap_method( + self.list_db_system_initial_storage_sizes, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_databases: gapic_v1.method.wrap_method( + self.list_databases, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_database: gapic_v1.method.wrap_method( + self.get_database, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_pluggable_databases: gapic_v1.method.wrap_method( + self.list_pluggable_databases, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_pluggable_database: gapic_v1.method.wrap_method( + self.get_pluggable_database, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_db_systems: gapic_v1.method.wrap_method( + self.list_db_systems, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_db_system: gapic_v1.method.wrap_method( + self.get_db_system, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_db_system: gapic_v1.method.wrap_method( + self.create_db_system, + default_timeout=None, + client_info=client_info, + ), + self.delete_db_system: gapic_v1.method.wrap_method( + self.delete_db_system, + default_timeout=None, + client_info=client_info, + ), + self.list_db_versions: gapic_v1.method.wrap_method( + self.list_db_versions, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_database_character_sets: gapic_v1.method.wrap_method( + self.list_database_character_sets, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_location: gapic_v1.method.wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: gapic_v1.method.wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: gapic_v1.method.wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def list_cloud_exadata_infrastructures( + self, + ) -> Callable[ + [oracledatabase.ListCloudExadataInfrastructuresRequest], + Union[ + oracledatabase.ListCloudExadataInfrastructuresResponse, + Awaitable[oracledatabase.ListCloudExadataInfrastructuresResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_cloud_exadata_infrastructure( + self, + ) -> Callable[ + [oracledatabase.GetCloudExadataInfrastructureRequest], + Union[ + exadata_infra.CloudExadataInfrastructure, + Awaitable[exadata_infra.CloudExadataInfrastructure], + ], + ]: + raise NotImplementedError() + + @property + def create_cloud_exadata_infrastructure( + self, + ) -> Callable[ + [oracledatabase.CreateCloudExadataInfrastructureRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_cloud_exadata_infrastructure( + self, + ) -> Callable[ + [oracledatabase.DeleteCloudExadataInfrastructureRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_cloud_vm_clusters( + self, + ) -> Callable[ + [oracledatabase.ListCloudVmClustersRequest], + Union[ + oracledatabase.ListCloudVmClustersResponse, + Awaitable[oracledatabase.ListCloudVmClustersResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_cloud_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.GetCloudVmClusterRequest], + Union[vm_cluster.CloudVmCluster, Awaitable[vm_cluster.CloudVmCluster]], + ]: + raise NotImplementedError() + + @property + def create_cloud_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.CreateCloudVmClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_cloud_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.DeleteCloudVmClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_entitlements( + self, + ) -> Callable[ + [oracledatabase.ListEntitlementsRequest], + Union[ + oracledatabase.ListEntitlementsResponse, + Awaitable[oracledatabase.ListEntitlementsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_db_servers( + self, + ) -> Callable[ + [oracledatabase.ListDbServersRequest], + Union[ + oracledatabase.ListDbServersResponse, + Awaitable[oracledatabase.ListDbServersResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_db_nodes( + self, + ) -> Callable[ + [oracledatabase.ListDbNodesRequest], + Union[ + oracledatabase.ListDbNodesResponse, + Awaitable[oracledatabase.ListDbNodesResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_gi_versions( + self, + ) -> Callable[ + [oracledatabase.ListGiVersionsRequest], + Union[ + oracledatabase.ListGiVersionsResponse, + Awaitable[oracledatabase.ListGiVersionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_minor_versions( + self, + ) -> Callable[ + [minor_version.ListMinorVersionsRequest], + Union[ + minor_version.ListMinorVersionsResponse, + Awaitable[minor_version.ListMinorVersionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_db_system_shapes( + self, + ) -> Callable[ + [oracledatabase.ListDbSystemShapesRequest], + Union[ + oracledatabase.ListDbSystemShapesResponse, + Awaitable[oracledatabase.ListDbSystemShapesResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_autonomous_databases( + self, + ) -> Callable[ + [oracledatabase.ListAutonomousDatabasesRequest], + Union[ + oracledatabase.ListAutonomousDatabasesResponse, + Awaitable[oracledatabase.ListAutonomousDatabasesResponse], + ], + ]: + raise NotImplementedError() @property def get_autonomous_database( @@ -653,6 +1003,15 @@ def create_autonomous_database( ]: raise NotImplementedError() + @property + def update_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.UpdateAutonomousDatabaseRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def delete_autonomous_database( self, @@ -746,6 +1105,323 @@ def restart_autonomous_database( ]: raise NotImplementedError() + @property + def switchover_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.SwitchoverAutonomousDatabaseRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def failover_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.FailoverAutonomousDatabaseRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_odb_networks( + self, + ) -> Callable[ + [odb_network.ListOdbNetworksRequest], + Union[ + odb_network.ListOdbNetworksResponse, + Awaitable[odb_network.ListOdbNetworksResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_odb_network( + self, + ) -> Callable[ + [odb_network.GetOdbNetworkRequest], + Union[odb_network.OdbNetwork, Awaitable[odb_network.OdbNetwork]], + ]: + raise NotImplementedError() + + @property + def create_odb_network( + self, + ) -> Callable[ + [gco_odb_network.CreateOdbNetworkRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_odb_network( + self, + ) -> Callable[ + [odb_network.DeleteOdbNetworkRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_odb_subnets( + self, + ) -> Callable[ + [odb_subnet.ListOdbSubnetsRequest], + Union[ + odb_subnet.ListOdbSubnetsResponse, + Awaitable[odb_subnet.ListOdbSubnetsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_odb_subnet( + self, + ) -> Callable[ + [odb_subnet.GetOdbSubnetRequest], + Union[odb_subnet.OdbSubnet, Awaitable[odb_subnet.OdbSubnet]], + ]: + raise NotImplementedError() + + @property + def create_odb_subnet( + self, + ) -> Callable[ + [gco_odb_subnet.CreateOdbSubnetRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_odb_subnet( + self, + ) -> Callable[ + [odb_subnet.DeleteOdbSubnetRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_exadb_vm_clusters( + self, + ) -> Callable[ + [oracledatabase.ListExadbVmClustersRequest], + Union[ + oracledatabase.ListExadbVmClustersResponse, + Awaitable[oracledatabase.ListExadbVmClustersResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.GetExadbVmClusterRequest], + Union[ + exadb_vm_cluster.ExadbVmCluster, Awaitable[exadb_vm_cluster.ExadbVmCluster] + ], + ]: + raise NotImplementedError() + + @property + def create_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.CreateExadbVmClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.DeleteExadbVmClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def update_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.UpdateExadbVmClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def remove_virtual_machine_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.RemoveVirtualMachineExadbVmClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_exascale_db_storage_vaults( + self, + ) -> Callable[ + [exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest], + Union[ + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse, + Awaitable[exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_exascale_db_storage_vault( + self, + ) -> Callable[ + [exascale_db_storage_vault.GetExascaleDbStorageVaultRequest], + Union[ + exascale_db_storage_vault.ExascaleDbStorageVault, + Awaitable[exascale_db_storage_vault.ExascaleDbStorageVault], + ], + ]: + raise NotImplementedError() + + @property + def create_exascale_db_storage_vault( + self, + ) -> Callable[ + [gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_exascale_db_storage_vault( + self, + ) -> Callable[ + [exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_db_system_initial_storage_sizes( + self, + ) -> Callable[ + [db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest], + Union[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse, + Awaitable[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse + ], + ], + ]: + raise NotImplementedError() + + @property + def list_databases( + self, + ) -> Callable[ + [database.ListDatabasesRequest], + Union[ + database.ListDatabasesResponse, Awaitable[database.ListDatabasesResponse] + ], + ]: + raise NotImplementedError() + + @property + def get_database( + self, + ) -> Callable[ + [database.GetDatabaseRequest], + Union[database.Database, Awaitable[database.Database]], + ]: + raise NotImplementedError() + + @property + def list_pluggable_databases( + self, + ) -> Callable[ + [pluggable_database.ListPluggableDatabasesRequest], + Union[ + pluggable_database.ListPluggableDatabasesResponse, + Awaitable[pluggable_database.ListPluggableDatabasesResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_pluggable_database( + self, + ) -> Callable[ + [pluggable_database.GetPluggableDatabaseRequest], + Union[ + pluggable_database.PluggableDatabase, + Awaitable[pluggable_database.PluggableDatabase], + ], + ]: + raise NotImplementedError() + + @property + def list_db_systems( + self, + ) -> Callable[ + [db_system.ListDbSystemsRequest], + Union[ + db_system.ListDbSystemsResponse, Awaitable[db_system.ListDbSystemsResponse] + ], + ]: + raise NotImplementedError() + + @property + def get_db_system( + self, + ) -> Callable[ + [db_system.GetDbSystemRequest], + Union[db_system.DbSystem, Awaitable[db_system.DbSystem]], + ]: + raise NotImplementedError() + + @property + def create_db_system( + self, + ) -> Callable[ + [gco_db_system.CreateDbSystemRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_db_system( + self, + ) -> Callable[ + [db_system.DeleteDbSystemRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_db_versions( + self, + ) -> Callable[ + [db_version.ListDbVersionsRequest], + Union[ + db_version.ListDbVersionsResponse, + Awaitable[db_version.ListDbVersionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_database_character_sets( + self, + ) -> Callable[ + [database_character_set.ListDatabaseCharacterSetsRequest], + Union[ + database_character_set.ListDatabaseCharacterSetsResponse, + Awaitable[database_character_set.ListDatabaseCharacterSetsResponse], + ], + ]: + raise NotImplementedError() + @property def list_operations( self, diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/grpc.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/grpc.py index 54da5520416f..804b87b72872 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/grpc.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/grpc.py @@ -31,11 +31,32 @@ import proto # type: ignore from google.cloud.oracledatabase_v1.types import ( - autonomous_database, + db_system_initial_storage_size, + db_version, exadata_infra, + exadb_vm_cluster, +) +from google.cloud.oracledatabase_v1.types import ( + autonomous_database, + database, + database_character_set, +) +from google.cloud.oracledatabase_v1.types import ( oracledatabase, + pluggable_database, vm_cluster, ) +from google.cloud.oracledatabase_v1.types import ( + exascale_db_storage_vault as gco_exascale_db_storage_vault, +) +from google.cloud.oracledatabase_v1.types import db_system +from google.cloud.oracledatabase_v1.types import db_system as gco_db_system +from google.cloud.oracledatabase_v1.types import exascale_db_storage_vault +from google.cloud.oracledatabase_v1.types import minor_version +from google.cloud.oracledatabase_v1.types import odb_network +from google.cloud.oracledatabase_v1.types import odb_network as gco_odb_network +from google.cloud.oracledatabase_v1.types import odb_subnet +from google.cloud.oracledatabase_v1.types import odb_subnet as gco_odb_subnet from .base import DEFAULT_CLIENT_INFO, OracleDatabaseTransport @@ -703,6 +724,36 @@ def list_gi_versions( ) return self._stubs["list_gi_versions"] + @property + def list_minor_versions( + self, + ) -> Callable[ + [minor_version.ListMinorVersionsRequest], + minor_version.ListMinorVersionsResponse, + ]: + r"""Return a callable for the list minor versions method over gRPC. + + Lists all the valid minor versions for the given + project, location, gi version and shape family. + + Returns: + Callable[[~.ListMinorVersionsRequest], + ~.ListMinorVersionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_minor_versions" not in self._stubs: + self._stubs["list_minor_versions"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListMinorVersions", + request_serializer=minor_version.ListMinorVersionsRequest.serialize, + response_deserializer=minor_version.ListMinorVersionsResponse.deserialize, + ) + return self._stubs["list_minor_versions"] + @property def list_db_system_shapes( self, @@ -823,6 +874,37 @@ def create_autonomous_database( ) return self._stubs["create_autonomous_database"] + @property + def update_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.UpdateAutonomousDatabaseRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update autonomous database method over gRPC. + + Updates the parameters of a single Autonomous + Database. + + Returns: + Callable[[~.UpdateAutonomousDatabaseRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_autonomous_database" not in self._stubs: + self._stubs[ + "update_autonomous_database" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/UpdateAutonomousDatabase", + request_serializer=oracledatabase.UpdateAutonomousDatabaseRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_autonomous_database"] + @property def delete_autonomous_database( self, @@ -1099,6 +1181,898 @@ def restart_autonomous_database( ) return self._stubs["restart_autonomous_database"] + @property + def switchover_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.SwitchoverAutonomousDatabaseRequest], operations_pb2.Operation + ]: + r"""Return a callable for the switchover autonomous database method over gRPC. + + Initiates a switchover of specified autonomous + database to the associated peer database. + + Returns: + Callable[[~.SwitchoverAutonomousDatabaseRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "switchover_autonomous_database" not in self._stubs: + self._stubs[ + "switchover_autonomous_database" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/SwitchoverAutonomousDatabase", + request_serializer=oracledatabase.SwitchoverAutonomousDatabaseRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["switchover_autonomous_database"] + + @property + def failover_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.FailoverAutonomousDatabaseRequest], operations_pb2.Operation + ]: + r"""Return a callable for the failover autonomous database method over gRPC. + + Initiates a failover to target autonomous database + from the associated primary database. + + Returns: + Callable[[~.FailoverAutonomousDatabaseRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "failover_autonomous_database" not in self._stubs: + self._stubs[ + "failover_autonomous_database" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/FailoverAutonomousDatabase", + request_serializer=oracledatabase.FailoverAutonomousDatabaseRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["failover_autonomous_database"] + + @property + def list_odb_networks( + self, + ) -> Callable[ + [odb_network.ListOdbNetworksRequest], odb_network.ListOdbNetworksResponse + ]: + r"""Return a callable for the list odb networks method over gRPC. + + Lists the ODB Networks in a given project and + location. + + Returns: + Callable[[~.ListOdbNetworksRequest], + ~.ListOdbNetworksResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_odb_networks" not in self._stubs: + self._stubs["list_odb_networks"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListOdbNetworks", + request_serializer=odb_network.ListOdbNetworksRequest.serialize, + response_deserializer=odb_network.ListOdbNetworksResponse.deserialize, + ) + return self._stubs["list_odb_networks"] + + @property + def get_odb_network( + self, + ) -> Callable[[odb_network.GetOdbNetworkRequest], odb_network.OdbNetwork]: + r"""Return a callable for the get odb network method over gRPC. + + Gets details of a single ODB Network. + + Returns: + Callable[[~.GetOdbNetworkRequest], + ~.OdbNetwork]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_odb_network" not in self._stubs: + self._stubs["get_odb_network"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetOdbNetwork", + request_serializer=odb_network.GetOdbNetworkRequest.serialize, + response_deserializer=odb_network.OdbNetwork.deserialize, + ) + return self._stubs["get_odb_network"] + + @property + def create_odb_network( + self, + ) -> Callable[[gco_odb_network.CreateOdbNetworkRequest], operations_pb2.Operation]: + r"""Return a callable for the create odb network method over gRPC. + + Creates a new ODB Network in a given project and + location. + + Returns: + Callable[[~.CreateOdbNetworkRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_odb_network" not in self._stubs: + self._stubs["create_odb_network"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateOdbNetwork", + request_serializer=gco_odb_network.CreateOdbNetworkRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_odb_network"] + + @property + def delete_odb_network( + self, + ) -> Callable[[odb_network.DeleteOdbNetworkRequest], operations_pb2.Operation]: + r"""Return a callable for the delete odb network method over gRPC. + + Deletes a single ODB Network. + + Returns: + Callable[[~.DeleteOdbNetworkRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_odb_network" not in self._stubs: + self._stubs["delete_odb_network"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteOdbNetwork", + request_serializer=odb_network.DeleteOdbNetworkRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_odb_network"] + + @property + def list_odb_subnets( + self, + ) -> Callable[ + [odb_subnet.ListOdbSubnetsRequest], odb_subnet.ListOdbSubnetsResponse + ]: + r"""Return a callable for the list odb subnets method over gRPC. + + Lists all the ODB Subnets in a given ODB Network. + + Returns: + Callable[[~.ListOdbSubnetsRequest], + ~.ListOdbSubnetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_odb_subnets" not in self._stubs: + self._stubs["list_odb_subnets"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListOdbSubnets", + request_serializer=odb_subnet.ListOdbSubnetsRequest.serialize, + response_deserializer=odb_subnet.ListOdbSubnetsResponse.deserialize, + ) + return self._stubs["list_odb_subnets"] + + @property + def get_odb_subnet( + self, + ) -> Callable[[odb_subnet.GetOdbSubnetRequest], odb_subnet.OdbSubnet]: + r"""Return a callable for the get odb subnet method over gRPC. + + Gets details of a single ODB Subnet. + + Returns: + Callable[[~.GetOdbSubnetRequest], + ~.OdbSubnet]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_odb_subnet" not in self._stubs: + self._stubs["get_odb_subnet"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetOdbSubnet", + request_serializer=odb_subnet.GetOdbSubnetRequest.serialize, + response_deserializer=odb_subnet.OdbSubnet.deserialize, + ) + return self._stubs["get_odb_subnet"] + + @property + def create_odb_subnet( + self, + ) -> Callable[[gco_odb_subnet.CreateOdbSubnetRequest], operations_pb2.Operation]: + r"""Return a callable for the create odb subnet method over gRPC. + + Creates a new ODB Subnet in a given ODB Network. + + Returns: + Callable[[~.CreateOdbSubnetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_odb_subnet" not in self._stubs: + self._stubs["create_odb_subnet"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateOdbSubnet", + request_serializer=gco_odb_subnet.CreateOdbSubnetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_odb_subnet"] + + @property + def delete_odb_subnet( + self, + ) -> Callable[[odb_subnet.DeleteOdbSubnetRequest], operations_pb2.Operation]: + r"""Return a callable for the delete odb subnet method over gRPC. + + Deletes a single ODB Subnet. + + Returns: + Callable[[~.DeleteOdbSubnetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_odb_subnet" not in self._stubs: + self._stubs["delete_odb_subnet"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteOdbSubnet", + request_serializer=odb_subnet.DeleteOdbSubnetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_odb_subnet"] + + @property + def list_exadb_vm_clusters( + self, + ) -> Callable[ + [oracledatabase.ListExadbVmClustersRequest], + oracledatabase.ListExadbVmClustersResponse, + ]: + r"""Return a callable for the list exadb vm clusters method over gRPC. + + Lists all the Exadb (Exascale) VM Clusters for the + given project and location. + + Returns: + Callable[[~.ListExadbVmClustersRequest], + ~.ListExadbVmClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_exadb_vm_clusters" not in self._stubs: + self._stubs["list_exadb_vm_clusters"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListExadbVmClusters", + request_serializer=oracledatabase.ListExadbVmClustersRequest.serialize, + response_deserializer=oracledatabase.ListExadbVmClustersResponse.deserialize, + ) + return self._stubs["list_exadb_vm_clusters"] + + @property + def get_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.GetExadbVmClusterRequest], exadb_vm_cluster.ExadbVmCluster + ]: + r"""Return a callable for the get exadb vm cluster method over gRPC. + + Gets details of a single Exadb (Exascale) VM Cluster. + + Returns: + Callable[[~.GetExadbVmClusterRequest], + ~.ExadbVmCluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_exadb_vm_cluster" not in self._stubs: + self._stubs["get_exadb_vm_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetExadbVmCluster", + request_serializer=oracledatabase.GetExadbVmClusterRequest.serialize, + response_deserializer=exadb_vm_cluster.ExadbVmCluster.deserialize, + ) + return self._stubs["get_exadb_vm_cluster"] + + @property + def create_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.CreateExadbVmClusterRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create exadb vm cluster method over gRPC. + + Creates a new Exadb (Exascale) VM Cluster resource. + + Returns: + Callable[[~.CreateExadbVmClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_exadb_vm_cluster" not in self._stubs: + self._stubs["create_exadb_vm_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateExadbVmCluster", + request_serializer=oracledatabase.CreateExadbVmClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_exadb_vm_cluster"] + + @property + def delete_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.DeleteExadbVmClusterRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete exadb vm cluster method over gRPC. + + Deletes a single Exadb (Exascale) VM Cluster. + + Returns: + Callable[[~.DeleteExadbVmClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_exadb_vm_cluster" not in self._stubs: + self._stubs["delete_exadb_vm_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteExadbVmCluster", + request_serializer=oracledatabase.DeleteExadbVmClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_exadb_vm_cluster"] + + @property + def update_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.UpdateExadbVmClusterRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update exadb vm cluster method over gRPC. + + Updates a single Exadb (Exascale) VM Cluster. To add + virtual machines to existing exadb vm cluster, only pass + the node count. + + Returns: + Callable[[~.UpdateExadbVmClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_exadb_vm_cluster" not in self._stubs: + self._stubs["update_exadb_vm_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/UpdateExadbVmCluster", + request_serializer=oracledatabase.UpdateExadbVmClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_exadb_vm_cluster"] + + @property + def remove_virtual_machine_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.RemoveVirtualMachineExadbVmClusterRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the remove virtual machine exadb + vm cluster method over gRPC. + + Removes virtual machines from an existing exadb vm + cluster. + + Returns: + Callable[[~.RemoveVirtualMachineExadbVmClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "remove_virtual_machine_exadb_vm_cluster" not in self._stubs: + self._stubs[ + "remove_virtual_machine_exadb_vm_cluster" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/RemoveVirtualMachineExadbVmCluster", + request_serializer=oracledatabase.RemoveVirtualMachineExadbVmClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["remove_virtual_machine_exadb_vm_cluster"] + + @property + def list_exascale_db_storage_vaults( + self, + ) -> Callable[ + [exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest], + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse, + ]: + r"""Return a callable for the list exascale db storage + vaults method over gRPC. + + Lists all the ExascaleDB Storage Vaults for the given + project and location. + + Returns: + Callable[[~.ListExascaleDbStorageVaultsRequest], + ~.ListExascaleDbStorageVaultsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_exascale_db_storage_vaults" not in self._stubs: + self._stubs[ + "list_exascale_db_storage_vaults" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListExascaleDbStorageVaults", + request_serializer=exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest.serialize, + response_deserializer=exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse.deserialize, + ) + return self._stubs["list_exascale_db_storage_vaults"] + + @property + def get_exascale_db_storage_vault( + self, + ) -> Callable[ + [exascale_db_storage_vault.GetExascaleDbStorageVaultRequest], + exascale_db_storage_vault.ExascaleDbStorageVault, + ]: + r"""Return a callable for the get exascale db storage vault method over gRPC. + + Gets details of a single ExascaleDB Storage Vault. + + Returns: + Callable[[~.GetExascaleDbStorageVaultRequest], + ~.ExascaleDbStorageVault]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_exascale_db_storage_vault" not in self._stubs: + self._stubs[ + "get_exascale_db_storage_vault" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetExascaleDbStorageVault", + request_serializer=exascale_db_storage_vault.GetExascaleDbStorageVaultRequest.serialize, + response_deserializer=exascale_db_storage_vault.ExascaleDbStorageVault.deserialize, + ) + return self._stubs["get_exascale_db_storage_vault"] + + @property + def create_exascale_db_storage_vault( + self, + ) -> Callable[ + [gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the create exascale db storage + vault method over gRPC. + + Creates a new ExascaleDB Storage Vault resource. + + Returns: + Callable[[~.CreateExascaleDbStorageVaultRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_exascale_db_storage_vault" not in self._stubs: + self._stubs[ + "create_exascale_db_storage_vault" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateExascaleDbStorageVault", + request_serializer=gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_exascale_db_storage_vault"] + + @property + def delete_exascale_db_storage_vault( + self, + ) -> Callable[ + [exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the delete exascale db storage + vault method over gRPC. + + Deletes a single ExascaleDB Storage Vault. + + Returns: + Callable[[~.DeleteExascaleDbStorageVaultRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_exascale_db_storage_vault" not in self._stubs: + self._stubs[ + "delete_exascale_db_storage_vault" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteExascaleDbStorageVault", + request_serializer=exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_exascale_db_storage_vault"] + + @property + def list_db_system_initial_storage_sizes( + self, + ) -> Callable[ + [db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest], + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse, + ]: + r"""Return a callable for the list db system initial storage + sizes method over gRPC. + + Lists all the DbSystemInitialStorageSizes for the + given project and location. + + Returns: + Callable[[~.ListDbSystemInitialStorageSizesRequest], + ~.ListDbSystemInitialStorageSizesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_db_system_initial_storage_sizes" not in self._stubs: + self._stubs[ + "list_db_system_initial_storage_sizes" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDbSystemInitialStorageSizes", + request_serializer=db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest.serialize, + response_deserializer=db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse.deserialize, + ) + return self._stubs["list_db_system_initial_storage_sizes"] + + @property + def list_databases( + self, + ) -> Callable[[database.ListDatabasesRequest], database.ListDatabasesResponse]: + r"""Return a callable for the list databases method over gRPC. + + Lists all the Databases for the given project, + location and DbSystem. + + Returns: + Callable[[~.ListDatabasesRequest], + ~.ListDatabasesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_databases" not in self._stubs: + self._stubs["list_databases"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDatabases", + request_serializer=database.ListDatabasesRequest.serialize, + response_deserializer=database.ListDatabasesResponse.deserialize, + ) + return self._stubs["list_databases"] + + @property + def get_database( + self, + ) -> Callable[[database.GetDatabaseRequest], database.Database]: + r"""Return a callable for the get database method over gRPC. + + Gets details of a single Database. + + Returns: + Callable[[~.GetDatabaseRequest], + ~.Database]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_database" not in self._stubs: + self._stubs["get_database"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetDatabase", + request_serializer=database.GetDatabaseRequest.serialize, + response_deserializer=database.Database.deserialize, + ) + return self._stubs["get_database"] + + @property + def list_pluggable_databases( + self, + ) -> Callable[ + [pluggable_database.ListPluggableDatabasesRequest], + pluggable_database.ListPluggableDatabasesResponse, + ]: + r"""Return a callable for the list pluggable databases method over gRPC. + + Lists all the PluggableDatabases for the given + project, location and Container Database. + + Returns: + Callable[[~.ListPluggableDatabasesRequest], + ~.ListPluggableDatabasesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_pluggable_databases" not in self._stubs: + self._stubs["list_pluggable_databases"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListPluggableDatabases", + request_serializer=pluggable_database.ListPluggableDatabasesRequest.serialize, + response_deserializer=pluggable_database.ListPluggableDatabasesResponse.deserialize, + ) + return self._stubs["list_pluggable_databases"] + + @property + def get_pluggable_database( + self, + ) -> Callable[ + [pluggable_database.GetPluggableDatabaseRequest], + pluggable_database.PluggableDatabase, + ]: + r"""Return a callable for the get pluggable database method over gRPC. + + Gets details of a single PluggableDatabase. + + Returns: + Callable[[~.GetPluggableDatabaseRequest], + ~.PluggableDatabase]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_pluggable_database" not in self._stubs: + self._stubs["get_pluggable_database"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetPluggableDatabase", + request_serializer=pluggable_database.GetPluggableDatabaseRequest.serialize, + response_deserializer=pluggable_database.PluggableDatabase.deserialize, + ) + return self._stubs["get_pluggable_database"] + + @property + def list_db_systems( + self, + ) -> Callable[[db_system.ListDbSystemsRequest], db_system.ListDbSystemsResponse]: + r"""Return a callable for the list db systems method over gRPC. + + Lists all the DbSystems for the given project and + location. + + Returns: + Callable[[~.ListDbSystemsRequest], + ~.ListDbSystemsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_db_systems" not in self._stubs: + self._stubs["list_db_systems"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDbSystems", + request_serializer=db_system.ListDbSystemsRequest.serialize, + response_deserializer=db_system.ListDbSystemsResponse.deserialize, + ) + return self._stubs["list_db_systems"] + + @property + def get_db_system( + self, + ) -> Callable[[db_system.GetDbSystemRequest], db_system.DbSystem]: + r"""Return a callable for the get db system method over gRPC. + + Gets details of a single DbSystem. + + Returns: + Callable[[~.GetDbSystemRequest], + ~.DbSystem]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_db_system" not in self._stubs: + self._stubs["get_db_system"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetDbSystem", + request_serializer=db_system.GetDbSystemRequest.serialize, + response_deserializer=db_system.DbSystem.deserialize, + ) + return self._stubs["get_db_system"] + + @property + def create_db_system( + self, + ) -> Callable[[gco_db_system.CreateDbSystemRequest], operations_pb2.Operation]: + r"""Return a callable for the create db system method over gRPC. + + Creates a new DbSystem in a given project and + location. + + Returns: + Callable[[~.CreateDbSystemRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_db_system" not in self._stubs: + self._stubs["create_db_system"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateDbSystem", + request_serializer=gco_db_system.CreateDbSystemRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_db_system"] + + @property + def delete_db_system( + self, + ) -> Callable[[db_system.DeleteDbSystemRequest], operations_pb2.Operation]: + r"""Return a callable for the delete db system method over gRPC. + + Deletes a single DbSystem. + + Returns: + Callable[[~.DeleteDbSystemRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_db_system" not in self._stubs: + self._stubs["delete_db_system"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteDbSystem", + request_serializer=db_system.DeleteDbSystemRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_db_system"] + + @property + def list_db_versions( + self, + ) -> Callable[ + [db_version.ListDbVersionsRequest], db_version.ListDbVersionsResponse + ]: + r"""Return a callable for the list db versions method over gRPC. + + List DbVersions for the given project and location. + + Returns: + Callable[[~.ListDbVersionsRequest], + ~.ListDbVersionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_db_versions" not in self._stubs: + self._stubs["list_db_versions"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDbVersions", + request_serializer=db_version.ListDbVersionsRequest.serialize, + response_deserializer=db_version.ListDbVersionsResponse.deserialize, + ) + return self._stubs["list_db_versions"] + + @property + def list_database_character_sets( + self, + ) -> Callable[ + [database_character_set.ListDatabaseCharacterSetsRequest], + database_character_set.ListDatabaseCharacterSetsResponse, + ]: + r"""Return a callable for the list database character sets method over gRPC. + + List DatabaseCharacterSets for the given project and + location. + + Returns: + Callable[[~.ListDatabaseCharacterSetsRequest], + ~.ListDatabaseCharacterSetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_database_character_sets" not in self._stubs: + self._stubs[ + "list_database_character_sets" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDatabaseCharacterSets", + request_serializer=database_character_set.ListDatabaseCharacterSetsRequest.serialize, + response_deserializer=database_character_set.ListDatabaseCharacterSetsResponse.deserialize, + ) + return self._stubs["list_database_character_sets"] + def close(self): self._logged_channel.close() diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/grpc_asyncio.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/grpc_asyncio.py index ac848823c962..00d9615caa28 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/grpc_asyncio.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/grpc_asyncio.py @@ -34,11 +34,32 @@ import proto # type: ignore from google.cloud.oracledatabase_v1.types import ( - autonomous_database, + db_system_initial_storage_size, + db_version, exadata_infra, + exadb_vm_cluster, +) +from google.cloud.oracledatabase_v1.types import ( + autonomous_database, + database, + database_character_set, +) +from google.cloud.oracledatabase_v1.types import ( oracledatabase, + pluggable_database, vm_cluster, ) +from google.cloud.oracledatabase_v1.types import ( + exascale_db_storage_vault as gco_exascale_db_storage_vault, +) +from google.cloud.oracledatabase_v1.types import db_system +from google.cloud.oracledatabase_v1.types import db_system as gco_db_system +from google.cloud.oracledatabase_v1.types import exascale_db_storage_vault +from google.cloud.oracledatabase_v1.types import minor_version +from google.cloud.oracledatabase_v1.types import odb_network +from google.cloud.oracledatabase_v1.types import odb_network as gco_odb_network +from google.cloud.oracledatabase_v1.types import odb_subnet +from google.cloud.oracledatabase_v1.types import odb_subnet as gco_odb_subnet from .base import DEFAULT_CLIENT_INFO, OracleDatabaseTransport from .grpc import OracleDatabaseGrpcTransport @@ -718,6 +739,36 @@ def list_gi_versions( ) return self._stubs["list_gi_versions"] + @property + def list_minor_versions( + self, + ) -> Callable[ + [minor_version.ListMinorVersionsRequest], + Awaitable[minor_version.ListMinorVersionsResponse], + ]: + r"""Return a callable for the list minor versions method over gRPC. + + Lists all the valid minor versions for the given + project, location, gi version and shape family. + + Returns: + Callable[[~.ListMinorVersionsRequest], + Awaitable[~.ListMinorVersionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_minor_versions" not in self._stubs: + self._stubs["list_minor_versions"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListMinorVersions", + request_serializer=minor_version.ListMinorVersionsRequest.serialize, + response_deserializer=minor_version.ListMinorVersionsResponse.deserialize, + ) + return self._stubs["list_minor_versions"] + @property def list_db_system_shapes( self, @@ -839,6 +890,38 @@ def create_autonomous_database( ) return self._stubs["create_autonomous_database"] + @property + def update_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.UpdateAutonomousDatabaseRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update autonomous database method over gRPC. + + Updates the parameters of a single Autonomous + Database. + + Returns: + Callable[[~.UpdateAutonomousDatabaseRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_autonomous_database" not in self._stubs: + self._stubs[ + "update_autonomous_database" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/UpdateAutonomousDatabase", + request_serializer=oracledatabase.UpdateAutonomousDatabaseRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_autonomous_database"] + @property def delete_autonomous_database( self, @@ -1120,246 +1203,1191 @@ def restart_autonomous_database( ) return self._stubs["restart_autonomous_database"] - def _prep_wrapped_messages(self, client_info): - """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" - self._wrapped_methods = { - self.list_cloud_exadata_infrastructures: self._wrap_method( - self.list_cloud_exadata_infrastructures, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.get_cloud_exadata_infrastructure: self._wrap_method( - self.get_cloud_exadata_infrastructure, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.create_cloud_exadata_infrastructure: self._wrap_method( - self.create_cloud_exadata_infrastructure, - default_timeout=None, - client_info=client_info, - ), - self.delete_cloud_exadata_infrastructure: self._wrap_method( - self.delete_cloud_exadata_infrastructure, - default_timeout=None, - client_info=client_info, - ), - self.list_cloud_vm_clusters: self._wrap_method( - self.list_cloud_vm_clusters, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.get_cloud_vm_cluster: self._wrap_method( - self.get_cloud_vm_cluster, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.create_cloud_vm_cluster: self._wrap_method( - self.create_cloud_vm_cluster, - default_timeout=None, - client_info=client_info, - ), - self.delete_cloud_vm_cluster: self._wrap_method( - self.delete_cloud_vm_cluster, - default_timeout=None, - client_info=client_info, - ), - self.list_entitlements: self._wrap_method( - self.list_entitlements, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_db_servers: self._wrap_method( - self.list_db_servers, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_db_nodes: self._wrap_method( - self.list_db_nodes, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_gi_versions: self._wrap_method( - self.list_gi_versions, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_db_system_shapes: self._wrap_method( - self.list_db_system_shapes, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_autonomous_databases: self._wrap_method( - self.list_autonomous_databases, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.get_autonomous_database: self._wrap_method( - self.get_autonomous_database, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=10.0, - multiplier=1.3, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.create_autonomous_database: self._wrap_method( - self.create_autonomous_database, - default_timeout=None, - client_info=client_info, - ), - self.delete_autonomous_database: self._wrap_method( - self.delete_autonomous_database, - default_timeout=None, - client_info=client_info, - ), - self.restore_autonomous_database: self._wrap_method( - self.restore_autonomous_database, - default_timeout=None, - client_info=client_info, - ), - self.generate_autonomous_database_wallet: self._wrap_method( - self.generate_autonomous_database_wallet, - default_timeout=None, - client_info=client_info, - ), - self.list_autonomous_db_versions: self._wrap_method( - self.list_autonomous_db_versions, - default_retry=retries.AsyncRetry( + @property + def switchover_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.SwitchoverAutonomousDatabaseRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the switchover autonomous database method over gRPC. + + Initiates a switchover of specified autonomous + database to the associated peer database. + + Returns: + Callable[[~.SwitchoverAutonomousDatabaseRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "switchover_autonomous_database" not in self._stubs: + self._stubs[ + "switchover_autonomous_database" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/SwitchoverAutonomousDatabase", + request_serializer=oracledatabase.SwitchoverAutonomousDatabaseRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["switchover_autonomous_database"] + + @property + def failover_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.FailoverAutonomousDatabaseRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the failover autonomous database method over gRPC. + + Initiates a failover to target autonomous database + from the associated primary database. + + Returns: + Callable[[~.FailoverAutonomousDatabaseRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "failover_autonomous_database" not in self._stubs: + self._stubs[ + "failover_autonomous_database" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/FailoverAutonomousDatabase", + request_serializer=oracledatabase.FailoverAutonomousDatabaseRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["failover_autonomous_database"] + + @property + def list_odb_networks( + self, + ) -> Callable[ + [odb_network.ListOdbNetworksRequest], + Awaitable[odb_network.ListOdbNetworksResponse], + ]: + r"""Return a callable for the list odb networks method over gRPC. + + Lists the ODB Networks in a given project and + location. + + Returns: + Callable[[~.ListOdbNetworksRequest], + Awaitable[~.ListOdbNetworksResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_odb_networks" not in self._stubs: + self._stubs["list_odb_networks"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListOdbNetworks", + request_serializer=odb_network.ListOdbNetworksRequest.serialize, + response_deserializer=odb_network.ListOdbNetworksResponse.deserialize, + ) + return self._stubs["list_odb_networks"] + + @property + def get_odb_network( + self, + ) -> Callable[ + [odb_network.GetOdbNetworkRequest], Awaitable[odb_network.OdbNetwork] + ]: + r"""Return a callable for the get odb network method over gRPC. + + Gets details of a single ODB Network. + + Returns: + Callable[[~.GetOdbNetworkRequest], + Awaitable[~.OdbNetwork]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_odb_network" not in self._stubs: + self._stubs["get_odb_network"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetOdbNetwork", + request_serializer=odb_network.GetOdbNetworkRequest.serialize, + response_deserializer=odb_network.OdbNetwork.deserialize, + ) + return self._stubs["get_odb_network"] + + @property + def create_odb_network( + self, + ) -> Callable[ + [gco_odb_network.CreateOdbNetworkRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the create odb network method over gRPC. + + Creates a new ODB Network in a given project and + location. + + Returns: + Callable[[~.CreateOdbNetworkRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_odb_network" not in self._stubs: + self._stubs["create_odb_network"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateOdbNetwork", + request_serializer=gco_odb_network.CreateOdbNetworkRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_odb_network"] + + @property + def delete_odb_network( + self, + ) -> Callable[ + [odb_network.DeleteOdbNetworkRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete odb network method over gRPC. + + Deletes a single ODB Network. + + Returns: + Callable[[~.DeleteOdbNetworkRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_odb_network" not in self._stubs: + self._stubs["delete_odb_network"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteOdbNetwork", + request_serializer=odb_network.DeleteOdbNetworkRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_odb_network"] + + @property + def list_odb_subnets( + self, + ) -> Callable[ + [odb_subnet.ListOdbSubnetsRequest], Awaitable[odb_subnet.ListOdbSubnetsResponse] + ]: + r"""Return a callable for the list odb subnets method over gRPC. + + Lists all the ODB Subnets in a given ODB Network. + + Returns: + Callable[[~.ListOdbSubnetsRequest], + Awaitable[~.ListOdbSubnetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_odb_subnets" not in self._stubs: + self._stubs["list_odb_subnets"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListOdbSubnets", + request_serializer=odb_subnet.ListOdbSubnetsRequest.serialize, + response_deserializer=odb_subnet.ListOdbSubnetsResponse.deserialize, + ) + return self._stubs["list_odb_subnets"] + + @property + def get_odb_subnet( + self, + ) -> Callable[[odb_subnet.GetOdbSubnetRequest], Awaitable[odb_subnet.OdbSubnet]]: + r"""Return a callable for the get odb subnet method over gRPC. + + Gets details of a single ODB Subnet. + + Returns: + Callable[[~.GetOdbSubnetRequest], + Awaitable[~.OdbSubnet]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_odb_subnet" not in self._stubs: + self._stubs["get_odb_subnet"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetOdbSubnet", + request_serializer=odb_subnet.GetOdbSubnetRequest.serialize, + response_deserializer=odb_subnet.OdbSubnet.deserialize, + ) + return self._stubs["get_odb_subnet"] + + @property + def create_odb_subnet( + self, + ) -> Callable[ + [gco_odb_subnet.CreateOdbSubnetRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the create odb subnet method over gRPC. + + Creates a new ODB Subnet in a given ODB Network. + + Returns: + Callable[[~.CreateOdbSubnetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_odb_subnet" not in self._stubs: + self._stubs["create_odb_subnet"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateOdbSubnet", + request_serializer=gco_odb_subnet.CreateOdbSubnetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_odb_subnet"] + + @property + def delete_odb_subnet( + self, + ) -> Callable[ + [odb_subnet.DeleteOdbSubnetRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete odb subnet method over gRPC. + + Deletes a single ODB Subnet. + + Returns: + Callable[[~.DeleteOdbSubnetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_odb_subnet" not in self._stubs: + self._stubs["delete_odb_subnet"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteOdbSubnet", + request_serializer=odb_subnet.DeleteOdbSubnetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_odb_subnet"] + + @property + def list_exadb_vm_clusters( + self, + ) -> Callable[ + [oracledatabase.ListExadbVmClustersRequest], + Awaitable[oracledatabase.ListExadbVmClustersResponse], + ]: + r"""Return a callable for the list exadb vm clusters method over gRPC. + + Lists all the Exadb (Exascale) VM Clusters for the + given project and location. + + Returns: + Callable[[~.ListExadbVmClustersRequest], + Awaitable[~.ListExadbVmClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_exadb_vm_clusters" not in self._stubs: + self._stubs["list_exadb_vm_clusters"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListExadbVmClusters", + request_serializer=oracledatabase.ListExadbVmClustersRequest.serialize, + response_deserializer=oracledatabase.ListExadbVmClustersResponse.deserialize, + ) + return self._stubs["list_exadb_vm_clusters"] + + @property + def get_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.GetExadbVmClusterRequest], + Awaitable[exadb_vm_cluster.ExadbVmCluster], + ]: + r"""Return a callable for the get exadb vm cluster method over gRPC. + + Gets details of a single Exadb (Exascale) VM Cluster. + + Returns: + Callable[[~.GetExadbVmClusterRequest], + Awaitable[~.ExadbVmCluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_exadb_vm_cluster" not in self._stubs: + self._stubs["get_exadb_vm_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetExadbVmCluster", + request_serializer=oracledatabase.GetExadbVmClusterRequest.serialize, + response_deserializer=exadb_vm_cluster.ExadbVmCluster.deserialize, + ) + return self._stubs["get_exadb_vm_cluster"] + + @property + def create_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.CreateExadbVmClusterRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create exadb vm cluster method over gRPC. + + Creates a new Exadb (Exascale) VM Cluster resource. + + Returns: + Callable[[~.CreateExadbVmClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_exadb_vm_cluster" not in self._stubs: + self._stubs["create_exadb_vm_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateExadbVmCluster", + request_serializer=oracledatabase.CreateExadbVmClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_exadb_vm_cluster"] + + @property + def delete_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.DeleteExadbVmClusterRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete exadb vm cluster method over gRPC. + + Deletes a single Exadb (Exascale) VM Cluster. + + Returns: + Callable[[~.DeleteExadbVmClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_exadb_vm_cluster" not in self._stubs: + self._stubs["delete_exadb_vm_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteExadbVmCluster", + request_serializer=oracledatabase.DeleteExadbVmClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_exadb_vm_cluster"] + + @property + def update_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.UpdateExadbVmClusterRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update exadb vm cluster method over gRPC. + + Updates a single Exadb (Exascale) VM Cluster. To add + virtual machines to existing exadb vm cluster, only pass + the node count. + + Returns: + Callable[[~.UpdateExadbVmClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_exadb_vm_cluster" not in self._stubs: + self._stubs["update_exadb_vm_cluster"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/UpdateExadbVmCluster", + request_serializer=oracledatabase.UpdateExadbVmClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_exadb_vm_cluster"] + + @property + def remove_virtual_machine_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.RemoveVirtualMachineExadbVmClusterRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the remove virtual machine exadb + vm cluster method over gRPC. + + Removes virtual machines from an existing exadb vm + cluster. + + Returns: + Callable[[~.RemoveVirtualMachineExadbVmClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "remove_virtual_machine_exadb_vm_cluster" not in self._stubs: + self._stubs[ + "remove_virtual_machine_exadb_vm_cluster" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/RemoveVirtualMachineExadbVmCluster", + request_serializer=oracledatabase.RemoveVirtualMachineExadbVmClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["remove_virtual_machine_exadb_vm_cluster"] + + @property + def list_exascale_db_storage_vaults( + self, + ) -> Callable[ + [exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest], + Awaitable[exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse], + ]: + r"""Return a callable for the list exascale db storage + vaults method over gRPC. + + Lists all the ExascaleDB Storage Vaults for the given + project and location. + + Returns: + Callable[[~.ListExascaleDbStorageVaultsRequest], + Awaitable[~.ListExascaleDbStorageVaultsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_exascale_db_storage_vaults" not in self._stubs: + self._stubs[ + "list_exascale_db_storage_vaults" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListExascaleDbStorageVaults", + request_serializer=exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest.serialize, + response_deserializer=exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse.deserialize, + ) + return self._stubs["list_exascale_db_storage_vaults"] + + @property + def get_exascale_db_storage_vault( + self, + ) -> Callable[ + [exascale_db_storage_vault.GetExascaleDbStorageVaultRequest], + Awaitable[exascale_db_storage_vault.ExascaleDbStorageVault], + ]: + r"""Return a callable for the get exascale db storage vault method over gRPC. + + Gets details of a single ExascaleDB Storage Vault. + + Returns: + Callable[[~.GetExascaleDbStorageVaultRequest], + Awaitable[~.ExascaleDbStorageVault]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_exascale_db_storage_vault" not in self._stubs: + self._stubs[ + "get_exascale_db_storage_vault" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetExascaleDbStorageVault", + request_serializer=exascale_db_storage_vault.GetExascaleDbStorageVaultRequest.serialize, + response_deserializer=exascale_db_storage_vault.ExascaleDbStorageVault.deserialize, + ) + return self._stubs["get_exascale_db_storage_vault"] + + @property + def create_exascale_db_storage_vault( + self, + ) -> Callable[ + [gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create exascale db storage + vault method over gRPC. + + Creates a new ExascaleDB Storage Vault resource. + + Returns: + Callable[[~.CreateExascaleDbStorageVaultRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_exascale_db_storage_vault" not in self._stubs: + self._stubs[ + "create_exascale_db_storage_vault" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateExascaleDbStorageVault", + request_serializer=gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_exascale_db_storage_vault"] + + @property + def delete_exascale_db_storage_vault( + self, + ) -> Callable[ + [exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete exascale db storage + vault method over gRPC. + + Deletes a single ExascaleDB Storage Vault. + + Returns: + Callable[[~.DeleteExascaleDbStorageVaultRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_exascale_db_storage_vault" not in self._stubs: + self._stubs[ + "delete_exascale_db_storage_vault" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteExascaleDbStorageVault", + request_serializer=exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_exascale_db_storage_vault"] + + @property + def list_db_system_initial_storage_sizes( + self, + ) -> Callable[ + [db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest], + Awaitable[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse + ], + ]: + r"""Return a callable for the list db system initial storage + sizes method over gRPC. + + Lists all the DbSystemInitialStorageSizes for the + given project and location. + + Returns: + Callable[[~.ListDbSystemInitialStorageSizesRequest], + Awaitable[~.ListDbSystemInitialStorageSizesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_db_system_initial_storage_sizes" not in self._stubs: + self._stubs[ + "list_db_system_initial_storage_sizes" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDbSystemInitialStorageSizes", + request_serializer=db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest.serialize, + response_deserializer=db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse.deserialize, + ) + return self._stubs["list_db_system_initial_storage_sizes"] + + @property + def list_databases( + self, + ) -> Callable[ + [database.ListDatabasesRequest], Awaitable[database.ListDatabasesResponse] + ]: + r"""Return a callable for the list databases method over gRPC. + + Lists all the Databases for the given project, + location and DbSystem. + + Returns: + Callable[[~.ListDatabasesRequest], + Awaitable[~.ListDatabasesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_databases" not in self._stubs: + self._stubs["list_databases"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDatabases", + request_serializer=database.ListDatabasesRequest.serialize, + response_deserializer=database.ListDatabasesResponse.deserialize, + ) + return self._stubs["list_databases"] + + @property + def get_database( + self, + ) -> Callable[[database.GetDatabaseRequest], Awaitable[database.Database]]: + r"""Return a callable for the get database method over gRPC. + + Gets details of a single Database. + + Returns: + Callable[[~.GetDatabaseRequest], + Awaitable[~.Database]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_database" not in self._stubs: + self._stubs["get_database"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetDatabase", + request_serializer=database.GetDatabaseRequest.serialize, + response_deserializer=database.Database.deserialize, + ) + return self._stubs["get_database"] + + @property + def list_pluggable_databases( + self, + ) -> Callable[ + [pluggable_database.ListPluggableDatabasesRequest], + Awaitable[pluggable_database.ListPluggableDatabasesResponse], + ]: + r"""Return a callable for the list pluggable databases method over gRPC. + + Lists all the PluggableDatabases for the given + project, location and Container Database. + + Returns: + Callable[[~.ListPluggableDatabasesRequest], + Awaitable[~.ListPluggableDatabasesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_pluggable_databases" not in self._stubs: + self._stubs["list_pluggable_databases"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListPluggableDatabases", + request_serializer=pluggable_database.ListPluggableDatabasesRequest.serialize, + response_deserializer=pluggable_database.ListPluggableDatabasesResponse.deserialize, + ) + return self._stubs["list_pluggable_databases"] + + @property + def get_pluggable_database( + self, + ) -> Callable[ + [pluggable_database.GetPluggableDatabaseRequest], + Awaitable[pluggable_database.PluggableDatabase], + ]: + r"""Return a callable for the get pluggable database method over gRPC. + + Gets details of a single PluggableDatabase. + + Returns: + Callable[[~.GetPluggableDatabaseRequest], + Awaitable[~.PluggableDatabase]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_pluggable_database" not in self._stubs: + self._stubs["get_pluggable_database"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetPluggableDatabase", + request_serializer=pluggable_database.GetPluggableDatabaseRequest.serialize, + response_deserializer=pluggable_database.PluggableDatabase.deserialize, + ) + return self._stubs["get_pluggable_database"] + + @property + def list_db_systems( + self, + ) -> Callable[ + [db_system.ListDbSystemsRequest], Awaitable[db_system.ListDbSystemsResponse] + ]: + r"""Return a callable for the list db systems method over gRPC. + + Lists all the DbSystems for the given project and + location. + + Returns: + Callable[[~.ListDbSystemsRequest], + Awaitable[~.ListDbSystemsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_db_systems" not in self._stubs: + self._stubs["list_db_systems"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDbSystems", + request_serializer=db_system.ListDbSystemsRequest.serialize, + response_deserializer=db_system.ListDbSystemsResponse.deserialize, + ) + return self._stubs["list_db_systems"] + + @property + def get_db_system( + self, + ) -> Callable[[db_system.GetDbSystemRequest], Awaitable[db_system.DbSystem]]: + r"""Return a callable for the get db system method over gRPC. + + Gets details of a single DbSystem. + + Returns: + Callable[[~.GetDbSystemRequest], + Awaitable[~.DbSystem]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_db_system" not in self._stubs: + self._stubs["get_db_system"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/GetDbSystem", + request_serializer=db_system.GetDbSystemRequest.serialize, + response_deserializer=db_system.DbSystem.deserialize, + ) + return self._stubs["get_db_system"] + + @property + def create_db_system( + self, + ) -> Callable[ + [gco_db_system.CreateDbSystemRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the create db system method over gRPC. + + Creates a new DbSystem in a given project and + location. + + Returns: + Callable[[~.CreateDbSystemRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_db_system" not in self._stubs: + self._stubs["create_db_system"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/CreateDbSystem", + request_serializer=gco_db_system.CreateDbSystemRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_db_system"] + + @property + def delete_db_system( + self, + ) -> Callable[ + [db_system.DeleteDbSystemRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete db system method over gRPC. + + Deletes a single DbSystem. + + Returns: + Callable[[~.DeleteDbSystemRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_db_system" not in self._stubs: + self._stubs["delete_db_system"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/DeleteDbSystem", + request_serializer=db_system.DeleteDbSystemRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_db_system"] + + @property + def list_db_versions( + self, + ) -> Callable[ + [db_version.ListDbVersionsRequest], Awaitable[db_version.ListDbVersionsResponse] + ]: + r"""Return a callable for the list db versions method over gRPC. + + List DbVersions for the given project and location. + + Returns: + Callable[[~.ListDbVersionsRequest], + Awaitable[~.ListDbVersionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_db_versions" not in self._stubs: + self._stubs["list_db_versions"] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDbVersions", + request_serializer=db_version.ListDbVersionsRequest.serialize, + response_deserializer=db_version.ListDbVersionsResponse.deserialize, + ) + return self._stubs["list_db_versions"] + + @property + def list_database_character_sets( + self, + ) -> Callable[ + [database_character_set.ListDatabaseCharacterSetsRequest], + Awaitable[database_character_set.ListDatabaseCharacterSetsResponse], + ]: + r"""Return a callable for the list database character sets method over gRPC. + + List DatabaseCharacterSets for the given project and + location. + + Returns: + Callable[[~.ListDatabaseCharacterSetsRequest], + Awaitable[~.ListDatabaseCharacterSetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_database_character_sets" not in self._stubs: + self._stubs[ + "list_database_character_sets" + ] = self._logged_channel.unary_unary( + "/google.cloud.oracledatabase.v1.OracleDatabase/ListDatabaseCharacterSets", + request_serializer=database_character_set.ListDatabaseCharacterSetsRequest.serialize, + response_deserializer=database_character_set.ListDatabaseCharacterSetsResponse.deserialize, + ) + return self._stubs["list_database_character_sets"] + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.list_cloud_exadata_infrastructures: self._wrap_method( + self.list_cloud_exadata_infrastructures, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_cloud_exadata_infrastructure: self._wrap_method( + self.get_cloud_exadata_infrastructure, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_cloud_exadata_infrastructure: self._wrap_method( + self.create_cloud_exadata_infrastructure, + default_timeout=None, + client_info=client_info, + ), + self.delete_cloud_exadata_infrastructure: self._wrap_method( + self.delete_cloud_exadata_infrastructure, + default_timeout=None, + client_info=client_info, + ), + self.list_cloud_vm_clusters: self._wrap_method( + self.list_cloud_vm_clusters, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_cloud_vm_cluster: self._wrap_method( + self.get_cloud_vm_cluster, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_cloud_vm_cluster: self._wrap_method( + self.create_cloud_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.delete_cloud_vm_cluster: self._wrap_method( + self.delete_cloud_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.list_entitlements: self._wrap_method( + self.list_entitlements, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_db_servers: self._wrap_method( + self.list_db_servers, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_db_nodes: self._wrap_method( + self.list_db_nodes, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_gi_versions: self._wrap_method( + self.list_gi_versions, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_minor_versions: self._wrap_method( + self.list_minor_versions, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_db_system_shapes: self._wrap_method( + self.list_db_system_shapes, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_autonomous_databases: self._wrap_method( + self.list_autonomous_databases, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_autonomous_database: self._wrap_method( + self.get_autonomous_database, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_autonomous_database: self._wrap_method( + self.create_autonomous_database, + default_timeout=None, + client_info=client_info, + ), + self.update_autonomous_database: self._wrap_method( + self.update_autonomous_database, + default_timeout=None, + client_info=client_info, + ), + self.delete_autonomous_database: self._wrap_method( + self.delete_autonomous_database, + default_timeout=None, + client_info=client_info, + ), + self.restore_autonomous_database: self._wrap_method( + self.restore_autonomous_database, + default_timeout=None, + client_info=client_info, + ), + self.generate_autonomous_database_wallet: self._wrap_method( + self.generate_autonomous_database_wallet, + default_timeout=None, + client_info=client_info, + ), + self.list_autonomous_db_versions: self._wrap_method( + self.list_autonomous_db_versions, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_autonomous_database_character_sets: self._wrap_method( + self.list_autonomous_database_character_sets, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_autonomous_database_backups: self._wrap_method( + self.list_autonomous_database_backups, + default_retry=retries.AsyncRetry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -1367,16 +2395,39 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.list_autonomous_database_character_sets: self._wrap_method( - self.list_autonomous_database_character_sets, + self.stop_autonomous_database: self._wrap_method( + self.stop_autonomous_database, + default_timeout=None, + client_info=client_info, + ), + self.start_autonomous_database: self._wrap_method( + self.start_autonomous_database, + default_timeout=None, + client_info=client_info, + ), + self.restart_autonomous_database: self._wrap_method( + self.restart_autonomous_database, + default_timeout=None, + client_info=client_info, + ), + self.switchover_autonomous_database: self._wrap_method( + self.switchover_autonomous_database, + default_timeout=None, + client_info=client_info, + ), + self.failover_autonomous_database: self._wrap_method( + self.failover_autonomous_database, + default_timeout=None, + client_info=client_info, + ), + self.list_odb_networks: self._wrap_method( + self.list_odb_networks, default_retry=retries.AsyncRetry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -1384,16 +2435,14 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.list_autonomous_database_backups: self._wrap_method( - self.list_autonomous_database_backups, + self.get_odb_network: self._wrap_method( + self.get_odb_network, default_retry=retries.AsyncRetry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=60.0, @@ -1401,21 +2450,291 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.stop_autonomous_database: self._wrap_method( - self.stop_autonomous_database, + self.create_odb_network: self._wrap_method( + self.create_odb_network, default_timeout=None, client_info=client_info, ), - self.start_autonomous_database: self._wrap_method( - self.start_autonomous_database, + self.delete_odb_network: self._wrap_method( + self.delete_odb_network, default_timeout=None, client_info=client_info, ), - self.restart_autonomous_database: self._wrap_method( - self.restart_autonomous_database, + self.list_odb_subnets: self._wrap_method( + self.list_odb_subnets, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_odb_subnet: self._wrap_method( + self.get_odb_subnet, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_odb_subnet: self._wrap_method( + self.create_odb_subnet, + default_timeout=None, + client_info=client_info, + ), + self.delete_odb_subnet: self._wrap_method( + self.delete_odb_subnet, + default_timeout=None, + client_info=client_info, + ), + self.list_exadb_vm_clusters: self._wrap_method( + self.list_exadb_vm_clusters, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_exadb_vm_cluster: self._wrap_method( + self.get_exadb_vm_cluster, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_exadb_vm_cluster: self._wrap_method( + self.create_exadb_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.delete_exadb_vm_cluster: self._wrap_method( + self.delete_exadb_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.update_exadb_vm_cluster: self._wrap_method( + self.update_exadb_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.remove_virtual_machine_exadb_vm_cluster: self._wrap_method( + self.remove_virtual_machine_exadb_vm_cluster, + default_timeout=None, + client_info=client_info, + ), + self.list_exascale_db_storage_vaults: self._wrap_method( + self.list_exascale_db_storage_vaults, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_exascale_db_storage_vault: self._wrap_method( + self.get_exascale_db_storage_vault, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_exascale_db_storage_vault: self._wrap_method( + self.create_exascale_db_storage_vault, + default_timeout=None, + client_info=client_info, + ), + self.delete_exascale_db_storage_vault: self._wrap_method( + self.delete_exascale_db_storage_vault, + default_timeout=None, + client_info=client_info, + ), + self.list_db_system_initial_storage_sizes: self._wrap_method( + self.list_db_system_initial_storage_sizes, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_databases: self._wrap_method( + self.list_databases, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_database: self._wrap_method( + self.get_database, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_pluggable_databases: self._wrap_method( + self.list_pluggable_databases, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_pluggable_database: self._wrap_method( + self.get_pluggable_database, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_db_systems: self._wrap_method( + self.list_db_systems, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_db_system: self._wrap_method( + self.get_db_system, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_db_system: self._wrap_method( + self.create_db_system, + default_timeout=None, + client_info=client_info, + ), + self.delete_db_system: self._wrap_method( + self.delete_db_system, default_timeout=None, client_info=client_info, ), + self.list_db_versions: self._wrap_method( + self.list_db_versions, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_database_character_sets: self._wrap_method( + self.list_database_character_sets, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), self.get_location: self._wrap_method( self.get_location, default_timeout=None, diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/rest.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/rest.py index afdffa0890b6..ae0770e72e95 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/rest.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/rest.py @@ -31,11 +31,32 @@ from requests import __version__ as requests_version from google.cloud.oracledatabase_v1.types import ( - autonomous_database, + db_system_initial_storage_size, + db_version, exadata_infra, + exadb_vm_cluster, +) +from google.cloud.oracledatabase_v1.types import ( + autonomous_database, + database, + database_character_set, +) +from google.cloud.oracledatabase_v1.types import ( oracledatabase, + pluggable_database, vm_cluster, ) +from google.cloud.oracledatabase_v1.types import ( + exascale_db_storage_vault as gco_exascale_db_storage_vault, +) +from google.cloud.oracledatabase_v1.types import db_system +from google.cloud.oracledatabase_v1.types import db_system as gco_db_system +from google.cloud.oracledatabase_v1.types import exascale_db_storage_vault +from google.cloud.oracledatabase_v1.types import minor_version +from google.cloud.oracledatabase_v1.types import odb_network +from google.cloud.oracledatabase_v1.types import odb_network as gco_odb_network +from google.cloud.oracledatabase_v1.types import odb_subnet +from google.cloud.oracledatabase_v1.types import odb_subnet as gco_odb_subnet from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO from .rest_base import _BaseOracleDatabaseRestTransport @@ -103,6 +124,46 @@ def post_create_cloud_vm_cluster(self, response): logging.log(f"Received response: {response}") return response + def pre_create_db_system(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_db_system(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_exadb_vm_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_exadb_vm_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_exascale_db_storage_vault(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_exascale_db_storage_vault(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_odb_network(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_odb_network(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_odb_subnet(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_odb_subnet(self, response): + logging.log(f"Received response: {response}") + return response + def pre_delete_autonomous_database(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -127,6 +188,54 @@ def post_delete_cloud_vm_cluster(self, response): logging.log(f"Received response: {response}") return response + def pre_delete_db_system(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_db_system(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_exadb_vm_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_exadb_vm_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_exascale_db_storage_vault(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_exascale_db_storage_vault(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_odb_network(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_odb_network(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_odb_subnet(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_odb_subnet(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_failover_autonomous_database(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_failover_autonomous_database(self, response): + logging.log(f"Received response: {response}") + return response + def pre_generate_autonomous_database_wallet(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -159,6 +268,62 @@ def post_get_cloud_vm_cluster(self, response): logging.log(f"Received response: {response}") return response + def pre_get_database(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_database(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_db_system(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_db_system(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_exadb_vm_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_exadb_vm_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_exascale_db_storage_vault(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_exascale_db_storage_vault(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_odb_network(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_odb_network(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_odb_subnet(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_odb_subnet(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_pluggable_database(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_pluggable_database(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_autonomous_database_backups(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -207,6 +372,22 @@ def post_list_cloud_vm_clusters(self, response): logging.log(f"Received response: {response}") return response + def pre_list_database_character_sets(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_database_character_sets(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_databases(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_databases(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_db_nodes(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -223,6 +404,22 @@ def post_list_db_servers(self, response): logging.log(f"Received response: {response}") return response + def pre_list_db_system_initial_storage_sizes(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_db_system_initial_storage_sizes(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_db_systems(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_db_systems(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_db_system_shapes(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -231,6 +428,14 @@ def post_list_db_system_shapes(self, response): logging.log(f"Received response: {response}") return response + def pre_list_db_versions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_db_versions(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_entitlements(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -239,6 +444,22 @@ def post_list_entitlements(self, response): logging.log(f"Received response: {response}") return response + def pre_list_exadb_vm_clusters(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_exadb_vm_clusters(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_exascale_db_storage_vaults(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_exascale_db_storage_vaults(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_gi_versions(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -247,6 +468,46 @@ def post_list_gi_versions(self, response): logging.log(f"Received response: {response}") return response + def pre_list_minor_versions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_minor_versions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_odb_networks(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_odb_networks(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_odb_subnets(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_odb_subnets(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_pluggable_databases(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_pluggable_databases(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_remove_virtual_machine_exadb_vm_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_remove_virtual_machine_exadb_vm_cluster(self, response): + logging.log(f"Received response: {response}") + return response + def pre_restart_autonomous_database(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -279,6 +540,30 @@ def post_stop_autonomous_database(self, response): logging.log(f"Received response: {response}") return response + def pre_switchover_autonomous_database(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_switchover_autonomous_database(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_autonomous_database(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_autonomous_database(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_exadb_vm_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_exadb_vm_cluster(self, response): + logging.log(f"Received response: {response}") + return response + transport = OracleDatabaseRestTransport(interceptor=MyCustomOracleDatabaseInterceptor()) client = OracleDatabaseClient(transport=transport) @@ -432,1410 +717,8073 @@ def post_create_cloud_vm_cluster_with_metadata( """ return response, metadata - def pre_delete_autonomous_database( + def pre_create_db_system( self, - request: oracledatabase.DeleteAutonomousDatabaseRequest, + request: gco_db_system.CreateDbSystemRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.DeleteAutonomousDatabaseRequest, - Sequence[Tuple[str, Union[str, bytes]]], + gco_db_system.CreateDbSystemRequest, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for delete_autonomous_database + """Pre-rpc interceptor for create_db_system Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_delete_autonomous_database( + def post_create_db_system( self, response: operations_pb2.Operation ) -> operations_pb2.Operation: - """Post-rpc interceptor for delete_autonomous_database + """Post-rpc interceptor for create_db_system - DEPRECATED. Please use the `post_delete_autonomous_database_with_metadata` + DEPRECATED. Please use the `post_create_db_system_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_delete_autonomous_database` interceptor runs - before the `post_delete_autonomous_database_with_metadata` interceptor. + it is returned to user code. This `post_create_db_system` interceptor runs + before the `post_create_db_system_with_metadata` interceptor. """ return response - def post_delete_autonomous_database_with_metadata( + def post_create_db_system_with_metadata( self, response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for delete_autonomous_database + """Post-rpc interceptor for create_db_system Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_delete_autonomous_database_with_metadata` - interceptor in new development instead of the `post_delete_autonomous_database` interceptor. - When both interceptors are used, this `post_delete_autonomous_database_with_metadata` interceptor runs after the - `post_delete_autonomous_database` interceptor. The (possibly modified) response returned by - `post_delete_autonomous_database` will be passed to - `post_delete_autonomous_database_with_metadata`. + We recommend only using this `post_create_db_system_with_metadata` + interceptor in new development instead of the `post_create_db_system` interceptor. + When both interceptors are used, this `post_create_db_system_with_metadata` interceptor runs after the + `post_create_db_system` interceptor. The (possibly modified) response returned by + `post_create_db_system` will be passed to + `post_create_db_system_with_metadata`. """ return response, metadata - def pre_delete_cloud_exadata_infrastructure( + def pre_create_exadb_vm_cluster( self, - request: oracledatabase.DeleteCloudExadataInfrastructureRequest, + request: oracledatabase.CreateExadbVmClusterRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.DeleteCloudExadataInfrastructureRequest, + oracledatabase.CreateExadbVmClusterRequest, Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for delete_cloud_exadata_infrastructure + """Pre-rpc interceptor for create_exadb_vm_cluster Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_delete_cloud_exadata_infrastructure( + def post_create_exadb_vm_cluster( self, response: operations_pb2.Operation ) -> operations_pb2.Operation: - """Post-rpc interceptor for delete_cloud_exadata_infrastructure + """Post-rpc interceptor for create_exadb_vm_cluster - DEPRECATED. Please use the `post_delete_cloud_exadata_infrastructure_with_metadata` + DEPRECATED. Please use the `post_create_exadb_vm_cluster_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_delete_cloud_exadata_infrastructure` interceptor runs - before the `post_delete_cloud_exadata_infrastructure_with_metadata` interceptor. + it is returned to user code. This `post_create_exadb_vm_cluster` interceptor runs + before the `post_create_exadb_vm_cluster_with_metadata` interceptor. """ return response - def post_delete_cloud_exadata_infrastructure_with_metadata( + def post_create_exadb_vm_cluster_with_metadata( self, response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for delete_cloud_exadata_infrastructure + """Post-rpc interceptor for create_exadb_vm_cluster Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_delete_cloud_exadata_infrastructure_with_metadata` - interceptor in new development instead of the `post_delete_cloud_exadata_infrastructure` interceptor. - When both interceptors are used, this `post_delete_cloud_exadata_infrastructure_with_metadata` interceptor runs after the - `post_delete_cloud_exadata_infrastructure` interceptor. The (possibly modified) response returned by - `post_delete_cloud_exadata_infrastructure` will be passed to - `post_delete_cloud_exadata_infrastructure_with_metadata`. + We recommend only using this `post_create_exadb_vm_cluster_with_metadata` + interceptor in new development instead of the `post_create_exadb_vm_cluster` interceptor. + When both interceptors are used, this `post_create_exadb_vm_cluster_with_metadata` interceptor runs after the + `post_create_exadb_vm_cluster` interceptor. The (possibly modified) response returned by + `post_create_exadb_vm_cluster` will be passed to + `post_create_exadb_vm_cluster_with_metadata`. """ return response, metadata - def pre_delete_cloud_vm_cluster( + def pre_create_exascale_db_storage_vault( self, - request: oracledatabase.DeleteCloudVmClusterRequest, + request: gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.DeleteCloudVmClusterRequest, + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for delete_cloud_vm_cluster + """Pre-rpc interceptor for create_exascale_db_storage_vault Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_delete_cloud_vm_cluster( + def post_create_exascale_db_storage_vault( self, response: operations_pb2.Operation ) -> operations_pb2.Operation: - """Post-rpc interceptor for delete_cloud_vm_cluster + """Post-rpc interceptor for create_exascale_db_storage_vault - DEPRECATED. Please use the `post_delete_cloud_vm_cluster_with_metadata` + DEPRECATED. Please use the `post_create_exascale_db_storage_vault_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_delete_cloud_vm_cluster` interceptor runs - before the `post_delete_cloud_vm_cluster_with_metadata` interceptor. + it is returned to user code. This `post_create_exascale_db_storage_vault` interceptor runs + before the `post_create_exascale_db_storage_vault_with_metadata` interceptor. """ return response - def post_delete_cloud_vm_cluster_with_metadata( + def post_create_exascale_db_storage_vault_with_metadata( self, response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for delete_cloud_vm_cluster + """Post-rpc interceptor for create_exascale_db_storage_vault Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_delete_cloud_vm_cluster_with_metadata` - interceptor in new development instead of the `post_delete_cloud_vm_cluster` interceptor. - When both interceptors are used, this `post_delete_cloud_vm_cluster_with_metadata` interceptor runs after the - `post_delete_cloud_vm_cluster` interceptor. The (possibly modified) response returned by - `post_delete_cloud_vm_cluster` will be passed to - `post_delete_cloud_vm_cluster_with_metadata`. + We recommend only using this `post_create_exascale_db_storage_vault_with_metadata` + interceptor in new development instead of the `post_create_exascale_db_storage_vault` interceptor. + When both interceptors are used, this `post_create_exascale_db_storage_vault_with_metadata` interceptor runs after the + `post_create_exascale_db_storage_vault` interceptor. The (possibly modified) response returned by + `post_create_exascale_db_storage_vault` will be passed to + `post_create_exascale_db_storage_vault_with_metadata`. """ return response, metadata - def pre_generate_autonomous_database_wallet( + def pre_create_odb_network( self, - request: oracledatabase.GenerateAutonomousDatabaseWalletRequest, + request: gco_odb_network.CreateOdbNetworkRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.GenerateAutonomousDatabaseWalletRequest, - Sequence[Tuple[str, Union[str, bytes]]], + gco_odb_network.CreateOdbNetworkRequest, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for generate_autonomous_database_wallet + """Pre-rpc interceptor for create_odb_network Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_generate_autonomous_database_wallet( - self, response: oracledatabase.GenerateAutonomousDatabaseWalletResponse - ) -> oracledatabase.GenerateAutonomousDatabaseWalletResponse: - """Post-rpc interceptor for generate_autonomous_database_wallet + def post_create_odb_network( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_odb_network - DEPRECATED. Please use the `post_generate_autonomous_database_wallet_with_metadata` + DEPRECATED. Please use the `post_create_odb_network_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_generate_autonomous_database_wallet` interceptor runs - before the `post_generate_autonomous_database_wallet_with_metadata` interceptor. + it is returned to user code. This `post_create_odb_network` interceptor runs + before the `post_create_odb_network_with_metadata` interceptor. """ return response - def post_generate_autonomous_database_wallet_with_metadata( + def post_create_odb_network_with_metadata( self, - response: oracledatabase.GenerateAutonomousDatabaseWalletResponse, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.GenerateAutonomousDatabaseWalletResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for generate_autonomous_database_wallet + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_odb_network Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_generate_autonomous_database_wallet_with_metadata` - interceptor in new development instead of the `post_generate_autonomous_database_wallet` interceptor. - When both interceptors are used, this `post_generate_autonomous_database_wallet_with_metadata` interceptor runs after the - `post_generate_autonomous_database_wallet` interceptor. The (possibly modified) response returned by - `post_generate_autonomous_database_wallet` will be passed to - `post_generate_autonomous_database_wallet_with_metadata`. + We recommend only using this `post_create_odb_network_with_metadata` + interceptor in new development instead of the `post_create_odb_network` interceptor. + When both interceptors are used, this `post_create_odb_network_with_metadata` interceptor runs after the + `post_create_odb_network` interceptor. The (possibly modified) response returned by + `post_create_odb_network` will be passed to + `post_create_odb_network_with_metadata`. """ return response, metadata - def pre_get_autonomous_database( + def pre_create_odb_subnet( self, - request: oracledatabase.GetAutonomousDatabaseRequest, + request: gco_odb_subnet.CreateOdbSubnetRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.GetAutonomousDatabaseRequest, - Sequence[Tuple[str, Union[str, bytes]]], + gco_odb_subnet.CreateOdbSubnetRequest, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for get_autonomous_database + """Pre-rpc interceptor for create_odb_subnet Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_get_autonomous_database( - self, response: autonomous_database.AutonomousDatabase - ) -> autonomous_database.AutonomousDatabase: - """Post-rpc interceptor for get_autonomous_database + def post_create_odb_subnet( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_odb_subnet - DEPRECATED. Please use the `post_get_autonomous_database_with_metadata` + DEPRECATED. Please use the `post_create_odb_subnet_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_get_autonomous_database` interceptor runs - before the `post_get_autonomous_database_with_metadata` interceptor. + it is returned to user code. This `post_create_odb_subnet` interceptor runs + before the `post_create_odb_subnet_with_metadata` interceptor. """ return response - def post_get_autonomous_database_with_metadata( + def post_create_odb_subnet_with_metadata( self, - response: autonomous_database.AutonomousDatabase, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - autonomous_database.AutonomousDatabase, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Post-rpc interceptor for get_autonomous_database + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_odb_subnet Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_get_autonomous_database_with_metadata` - interceptor in new development instead of the `post_get_autonomous_database` interceptor. - When both interceptors are used, this `post_get_autonomous_database_with_metadata` interceptor runs after the - `post_get_autonomous_database` interceptor. The (possibly modified) response returned by - `post_get_autonomous_database` will be passed to - `post_get_autonomous_database_with_metadata`. + We recommend only using this `post_create_odb_subnet_with_metadata` + interceptor in new development instead of the `post_create_odb_subnet` interceptor. + When both interceptors are used, this `post_create_odb_subnet_with_metadata` interceptor runs after the + `post_create_odb_subnet` interceptor. The (possibly modified) response returned by + `post_create_odb_subnet` will be passed to + `post_create_odb_subnet_with_metadata`. """ return response, metadata - def pre_get_cloud_exadata_infrastructure( + def pre_delete_autonomous_database( self, - request: oracledatabase.GetCloudExadataInfrastructureRequest, + request: oracledatabase.DeleteAutonomousDatabaseRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.GetCloudExadataInfrastructureRequest, + oracledatabase.DeleteAutonomousDatabaseRequest, Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for get_cloud_exadata_infrastructure + """Pre-rpc interceptor for delete_autonomous_database Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_get_cloud_exadata_infrastructure( - self, response: exadata_infra.CloudExadataInfrastructure - ) -> exadata_infra.CloudExadataInfrastructure: - """Post-rpc interceptor for get_cloud_exadata_infrastructure + def post_delete_autonomous_database( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_autonomous_database - DEPRECATED. Please use the `post_get_cloud_exadata_infrastructure_with_metadata` + DEPRECATED. Please use the `post_delete_autonomous_database_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_get_cloud_exadata_infrastructure` interceptor runs - before the `post_get_cloud_exadata_infrastructure_with_metadata` interceptor. + it is returned to user code. This `post_delete_autonomous_database` interceptor runs + before the `post_delete_autonomous_database_with_metadata` interceptor. """ return response - def post_get_cloud_exadata_infrastructure_with_metadata( + def post_delete_autonomous_database_with_metadata( self, - response: exadata_infra.CloudExadataInfrastructure, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - exadata_infra.CloudExadataInfrastructure, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for get_cloud_exadata_infrastructure + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_autonomous_database Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_get_cloud_exadata_infrastructure_with_metadata` - interceptor in new development instead of the `post_get_cloud_exadata_infrastructure` interceptor. - When both interceptors are used, this `post_get_cloud_exadata_infrastructure_with_metadata` interceptor runs after the - `post_get_cloud_exadata_infrastructure` interceptor. The (possibly modified) response returned by - `post_get_cloud_exadata_infrastructure` will be passed to - `post_get_cloud_exadata_infrastructure_with_metadata`. + We recommend only using this `post_delete_autonomous_database_with_metadata` + interceptor in new development instead of the `post_delete_autonomous_database` interceptor. + When both interceptors are used, this `post_delete_autonomous_database_with_metadata` interceptor runs after the + `post_delete_autonomous_database` interceptor. The (possibly modified) response returned by + `post_delete_autonomous_database` will be passed to + `post_delete_autonomous_database_with_metadata`. """ return response, metadata - def pre_get_cloud_vm_cluster( + def pre_delete_cloud_exadata_infrastructure( self, - request: oracledatabase.GetCloudVmClusterRequest, + request: oracledatabase.DeleteCloudExadataInfrastructureRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.GetCloudVmClusterRequest, Sequence[Tuple[str, Union[str, bytes]]] + oracledatabase.DeleteCloudExadataInfrastructureRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for get_cloud_vm_cluster + """Pre-rpc interceptor for delete_cloud_exadata_infrastructure Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_get_cloud_vm_cluster( - self, response: vm_cluster.CloudVmCluster - ) -> vm_cluster.CloudVmCluster: - """Post-rpc interceptor for get_cloud_vm_cluster + def post_delete_cloud_exadata_infrastructure( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_cloud_exadata_infrastructure - DEPRECATED. Please use the `post_get_cloud_vm_cluster_with_metadata` + DEPRECATED. Please use the `post_delete_cloud_exadata_infrastructure_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_get_cloud_vm_cluster` interceptor runs - before the `post_get_cloud_vm_cluster_with_metadata` interceptor. + it is returned to user code. This `post_delete_cloud_exadata_infrastructure` interceptor runs + before the `post_delete_cloud_exadata_infrastructure_with_metadata` interceptor. """ return response - def post_get_cloud_vm_cluster_with_metadata( + def post_delete_cloud_exadata_infrastructure_with_metadata( self, - response: vm_cluster.CloudVmCluster, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[vm_cluster.CloudVmCluster, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_cloud_vm_cluster + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_cloud_exadata_infrastructure Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_get_cloud_vm_cluster_with_metadata` - interceptor in new development instead of the `post_get_cloud_vm_cluster` interceptor. - When both interceptors are used, this `post_get_cloud_vm_cluster_with_metadata` interceptor runs after the - `post_get_cloud_vm_cluster` interceptor. The (possibly modified) response returned by - `post_get_cloud_vm_cluster` will be passed to - `post_get_cloud_vm_cluster_with_metadata`. + We recommend only using this `post_delete_cloud_exadata_infrastructure_with_metadata` + interceptor in new development instead of the `post_delete_cloud_exadata_infrastructure` interceptor. + When both interceptors are used, this `post_delete_cloud_exadata_infrastructure_with_metadata` interceptor runs after the + `post_delete_cloud_exadata_infrastructure` interceptor. The (possibly modified) response returned by + `post_delete_cloud_exadata_infrastructure` will be passed to + `post_delete_cloud_exadata_infrastructure_with_metadata`. """ return response, metadata - def pre_list_autonomous_database_backups( + def pre_delete_cloud_vm_cluster( self, - request: oracledatabase.ListAutonomousDatabaseBackupsRequest, + request: oracledatabase.DeleteCloudVmClusterRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListAutonomousDatabaseBackupsRequest, + oracledatabase.DeleteCloudVmClusterRequest, Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for list_autonomous_database_backups + """Pre-rpc interceptor for delete_cloud_vm_cluster Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_autonomous_database_backups( - self, response: oracledatabase.ListAutonomousDatabaseBackupsResponse - ) -> oracledatabase.ListAutonomousDatabaseBackupsResponse: - """Post-rpc interceptor for list_autonomous_database_backups + def post_delete_cloud_vm_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_cloud_vm_cluster - DEPRECATED. Please use the `post_list_autonomous_database_backups_with_metadata` + DEPRECATED. Please use the `post_delete_cloud_vm_cluster_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_autonomous_database_backups` interceptor runs - before the `post_list_autonomous_database_backups_with_metadata` interceptor. + it is returned to user code. This `post_delete_cloud_vm_cluster` interceptor runs + before the `post_delete_cloud_vm_cluster_with_metadata` interceptor. """ return response - def post_list_autonomous_database_backups_with_metadata( + def post_delete_cloud_vm_cluster_with_metadata( self, - response: oracledatabase.ListAutonomousDatabaseBackupsResponse, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.ListAutonomousDatabaseBackupsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_autonomous_database_backups + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_cloud_vm_cluster Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_autonomous_database_backups_with_metadata` - interceptor in new development instead of the `post_list_autonomous_database_backups` interceptor. - When both interceptors are used, this `post_list_autonomous_database_backups_with_metadata` interceptor runs after the - `post_list_autonomous_database_backups` interceptor. The (possibly modified) response returned by - `post_list_autonomous_database_backups` will be passed to - `post_list_autonomous_database_backups_with_metadata`. + We recommend only using this `post_delete_cloud_vm_cluster_with_metadata` + interceptor in new development instead of the `post_delete_cloud_vm_cluster` interceptor. + When both interceptors are used, this `post_delete_cloud_vm_cluster_with_metadata` interceptor runs after the + `post_delete_cloud_vm_cluster` interceptor. The (possibly modified) response returned by + `post_delete_cloud_vm_cluster` will be passed to + `post_delete_cloud_vm_cluster_with_metadata`. """ return response, metadata - def pre_list_autonomous_database_character_sets( + def pre_delete_db_system( self, - request: oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, + request: db_system.DeleteDbSystemRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, - Sequence[Tuple[str, Union[str, bytes]]], + db_system.DeleteDbSystemRequest, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for list_autonomous_database_character_sets + """Pre-rpc interceptor for delete_db_system Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_autonomous_database_character_sets( - self, response: oracledatabase.ListAutonomousDatabaseCharacterSetsResponse - ) -> oracledatabase.ListAutonomousDatabaseCharacterSetsResponse: - """Post-rpc interceptor for list_autonomous_database_character_sets + def post_delete_db_system( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_db_system - DEPRECATED. Please use the `post_list_autonomous_database_character_sets_with_metadata` + DEPRECATED. Please use the `post_delete_db_system_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_autonomous_database_character_sets` interceptor runs - before the `post_list_autonomous_database_character_sets_with_metadata` interceptor. + it is returned to user code. This `post_delete_db_system` interceptor runs + before the `post_delete_db_system_with_metadata` interceptor. """ return response - def post_list_autonomous_database_character_sets_with_metadata( + def post_delete_db_system_with_metadata( self, - response: oracledatabase.ListAutonomousDatabaseCharacterSetsResponse, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_autonomous_database_character_sets + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_db_system Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_autonomous_database_character_sets_with_metadata` - interceptor in new development instead of the `post_list_autonomous_database_character_sets` interceptor. - When both interceptors are used, this `post_list_autonomous_database_character_sets_with_metadata` interceptor runs after the - `post_list_autonomous_database_character_sets` interceptor. The (possibly modified) response returned by - `post_list_autonomous_database_character_sets` will be passed to - `post_list_autonomous_database_character_sets_with_metadata`. + We recommend only using this `post_delete_db_system_with_metadata` + interceptor in new development instead of the `post_delete_db_system` interceptor. + When both interceptors are used, this `post_delete_db_system_with_metadata` interceptor runs after the + `post_delete_db_system` interceptor. The (possibly modified) response returned by + `post_delete_db_system` will be passed to + `post_delete_db_system_with_metadata`. """ return response, metadata - def pre_list_autonomous_databases( + def pre_delete_exadb_vm_cluster( self, - request: oracledatabase.ListAutonomousDatabasesRequest, + request: oracledatabase.DeleteExadbVmClusterRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListAutonomousDatabasesRequest, + oracledatabase.DeleteExadbVmClusterRequest, Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for list_autonomous_databases + """Pre-rpc interceptor for delete_exadb_vm_cluster Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_autonomous_databases( - self, response: oracledatabase.ListAutonomousDatabasesResponse - ) -> oracledatabase.ListAutonomousDatabasesResponse: - """Post-rpc interceptor for list_autonomous_databases + def post_delete_exadb_vm_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_exadb_vm_cluster - DEPRECATED. Please use the `post_list_autonomous_databases_with_metadata` + DEPRECATED. Please use the `post_delete_exadb_vm_cluster_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_autonomous_databases` interceptor runs - before the `post_list_autonomous_databases_with_metadata` interceptor. + it is returned to user code. This `post_delete_exadb_vm_cluster` interceptor runs + before the `post_delete_exadb_vm_cluster_with_metadata` interceptor. """ return response - def post_list_autonomous_databases_with_metadata( + def post_delete_exadb_vm_cluster_with_metadata( self, - response: oracledatabase.ListAutonomousDatabasesResponse, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.ListAutonomousDatabasesResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_autonomous_databases + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_exadb_vm_cluster Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_autonomous_databases_with_metadata` - interceptor in new development instead of the `post_list_autonomous_databases` interceptor. - When both interceptors are used, this `post_list_autonomous_databases_with_metadata` interceptor runs after the - `post_list_autonomous_databases` interceptor. The (possibly modified) response returned by - `post_list_autonomous_databases` will be passed to - `post_list_autonomous_databases_with_metadata`. + We recommend only using this `post_delete_exadb_vm_cluster_with_metadata` + interceptor in new development instead of the `post_delete_exadb_vm_cluster` interceptor. + When both interceptors are used, this `post_delete_exadb_vm_cluster_with_metadata` interceptor runs after the + `post_delete_exadb_vm_cluster` interceptor. The (possibly modified) response returned by + `post_delete_exadb_vm_cluster` will be passed to + `post_delete_exadb_vm_cluster_with_metadata`. """ return response, metadata - def pre_list_autonomous_db_versions( + def pre_delete_exascale_db_storage_vault( self, - request: oracledatabase.ListAutonomousDbVersionsRequest, + request: exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListAutonomousDbVersionsRequest, + exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for list_autonomous_db_versions + """Pre-rpc interceptor for delete_exascale_db_storage_vault Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_autonomous_db_versions( - self, response: oracledatabase.ListAutonomousDbVersionsResponse - ) -> oracledatabase.ListAutonomousDbVersionsResponse: - """Post-rpc interceptor for list_autonomous_db_versions + def post_delete_exascale_db_storage_vault( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_exascale_db_storage_vault - DEPRECATED. Please use the `post_list_autonomous_db_versions_with_metadata` + DEPRECATED. Please use the `post_delete_exascale_db_storage_vault_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_autonomous_db_versions` interceptor runs - before the `post_list_autonomous_db_versions_with_metadata` interceptor. + it is returned to user code. This `post_delete_exascale_db_storage_vault` interceptor runs + before the `post_delete_exascale_db_storage_vault_with_metadata` interceptor. """ return response - def post_list_autonomous_db_versions_with_metadata( + def post_delete_exascale_db_storage_vault_with_metadata( self, - response: oracledatabase.ListAutonomousDbVersionsResponse, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.ListAutonomousDbVersionsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_autonomous_db_versions + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_exascale_db_storage_vault Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_autonomous_db_versions_with_metadata` - interceptor in new development instead of the `post_list_autonomous_db_versions` interceptor. - When both interceptors are used, this `post_list_autonomous_db_versions_with_metadata` interceptor runs after the - `post_list_autonomous_db_versions` interceptor. The (possibly modified) response returned by - `post_list_autonomous_db_versions` will be passed to - `post_list_autonomous_db_versions_with_metadata`. + We recommend only using this `post_delete_exascale_db_storage_vault_with_metadata` + interceptor in new development instead of the `post_delete_exascale_db_storage_vault` interceptor. + When both interceptors are used, this `post_delete_exascale_db_storage_vault_with_metadata` interceptor runs after the + `post_delete_exascale_db_storage_vault` interceptor. The (possibly modified) response returned by + `post_delete_exascale_db_storage_vault` will be passed to + `post_delete_exascale_db_storage_vault_with_metadata`. """ return response, metadata - def pre_list_cloud_exadata_infrastructures( + def pre_delete_odb_network( self, - request: oracledatabase.ListCloudExadataInfrastructuresRequest, + request: odb_network.DeleteOdbNetworkRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListCloudExadataInfrastructuresRequest, - Sequence[Tuple[str, Union[str, bytes]]], + odb_network.DeleteOdbNetworkRequest, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for list_cloud_exadata_infrastructures + """Pre-rpc interceptor for delete_odb_network Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_cloud_exadata_infrastructures( - self, response: oracledatabase.ListCloudExadataInfrastructuresResponse - ) -> oracledatabase.ListCloudExadataInfrastructuresResponse: - """Post-rpc interceptor for list_cloud_exadata_infrastructures + def post_delete_odb_network( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_odb_network - DEPRECATED. Please use the `post_list_cloud_exadata_infrastructures_with_metadata` + DEPRECATED. Please use the `post_delete_odb_network_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_cloud_exadata_infrastructures` interceptor runs - before the `post_list_cloud_exadata_infrastructures_with_metadata` interceptor. + it is returned to user code. This `post_delete_odb_network` interceptor runs + before the `post_delete_odb_network_with_metadata` interceptor. """ return response - def post_list_cloud_exadata_infrastructures_with_metadata( + def post_delete_odb_network_with_metadata( self, - response: oracledatabase.ListCloudExadataInfrastructuresResponse, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.ListCloudExadataInfrastructuresResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_cloud_exadata_infrastructures + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_odb_network Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_cloud_exadata_infrastructures_with_metadata` - interceptor in new development instead of the `post_list_cloud_exadata_infrastructures` interceptor. - When both interceptors are used, this `post_list_cloud_exadata_infrastructures_with_metadata` interceptor runs after the - `post_list_cloud_exadata_infrastructures` interceptor. The (possibly modified) response returned by - `post_list_cloud_exadata_infrastructures` will be passed to - `post_list_cloud_exadata_infrastructures_with_metadata`. + We recommend only using this `post_delete_odb_network_with_metadata` + interceptor in new development instead of the `post_delete_odb_network` interceptor. + When both interceptors are used, this `post_delete_odb_network_with_metadata` interceptor runs after the + `post_delete_odb_network` interceptor. The (possibly modified) response returned by + `post_delete_odb_network` will be passed to + `post_delete_odb_network_with_metadata`. """ return response, metadata - def pre_list_cloud_vm_clusters( + def pre_delete_odb_subnet( self, - request: oracledatabase.ListCloudVmClustersRequest, + request: odb_subnet.DeleteOdbSubnetRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListCloudVmClustersRequest, - Sequence[Tuple[str, Union[str, bytes]]], + odb_subnet.DeleteOdbSubnetRequest, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for list_cloud_vm_clusters + """Pre-rpc interceptor for delete_odb_subnet Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_cloud_vm_clusters( - self, response: oracledatabase.ListCloudVmClustersResponse - ) -> oracledatabase.ListCloudVmClustersResponse: - """Post-rpc interceptor for list_cloud_vm_clusters + def post_delete_odb_subnet( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_odb_subnet - DEPRECATED. Please use the `post_list_cloud_vm_clusters_with_metadata` + DEPRECATED. Please use the `post_delete_odb_subnet_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_cloud_vm_clusters` interceptor runs - before the `post_list_cloud_vm_clusters_with_metadata` interceptor. + it is returned to user code. This `post_delete_odb_subnet` interceptor runs + before the `post_delete_odb_subnet_with_metadata` interceptor. """ return response - def post_list_cloud_vm_clusters_with_metadata( + def post_delete_odb_subnet_with_metadata( self, - response: oracledatabase.ListCloudVmClustersResponse, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.ListCloudVmClustersResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_cloud_vm_clusters + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_odb_subnet Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_cloud_vm_clusters_with_metadata` - interceptor in new development instead of the `post_list_cloud_vm_clusters` interceptor. - When both interceptors are used, this `post_list_cloud_vm_clusters_with_metadata` interceptor runs after the - `post_list_cloud_vm_clusters` interceptor. The (possibly modified) response returned by - `post_list_cloud_vm_clusters` will be passed to - `post_list_cloud_vm_clusters_with_metadata`. + We recommend only using this `post_delete_odb_subnet_with_metadata` + interceptor in new development instead of the `post_delete_odb_subnet` interceptor. + When both interceptors are used, this `post_delete_odb_subnet_with_metadata` interceptor runs after the + `post_delete_odb_subnet` interceptor. The (possibly modified) response returned by + `post_delete_odb_subnet` will be passed to + `post_delete_odb_subnet_with_metadata`. """ return response, metadata - def pre_list_db_nodes( + def pre_failover_autonomous_database( self, - request: oracledatabase.ListDbNodesRequest, + request: oracledatabase.FailoverAutonomousDatabaseRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListDbNodesRequest, Sequence[Tuple[str, Union[str, bytes]]] + oracledatabase.FailoverAutonomousDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for list_db_nodes + """Pre-rpc interceptor for failover_autonomous_database Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_db_nodes( - self, response: oracledatabase.ListDbNodesResponse - ) -> oracledatabase.ListDbNodesResponse: - """Post-rpc interceptor for list_db_nodes + def post_failover_autonomous_database( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for failover_autonomous_database - DEPRECATED. Please use the `post_list_db_nodes_with_metadata` + DEPRECATED. Please use the `post_failover_autonomous_database_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_db_nodes` interceptor runs - before the `post_list_db_nodes_with_metadata` interceptor. + it is returned to user code. This `post_failover_autonomous_database` interceptor runs + before the `post_failover_autonomous_database_with_metadata` interceptor. """ return response - def post_list_db_nodes_with_metadata( + def post_failover_autonomous_database_with_metadata( self, - response: oracledatabase.ListDbNodesResponse, + response: operations_pb2.Operation, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.ListDbNodesResponse, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Post-rpc interceptor for list_db_nodes + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for failover_autonomous_database Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_db_nodes_with_metadata` - interceptor in new development instead of the `post_list_db_nodes` interceptor. - When both interceptors are used, this `post_list_db_nodes_with_metadata` interceptor runs after the - `post_list_db_nodes` interceptor. The (possibly modified) response returned by - `post_list_db_nodes` will be passed to - `post_list_db_nodes_with_metadata`. + We recommend only using this `post_failover_autonomous_database_with_metadata` + interceptor in new development instead of the `post_failover_autonomous_database` interceptor. + When both interceptors are used, this `post_failover_autonomous_database_with_metadata` interceptor runs after the + `post_failover_autonomous_database` interceptor. The (possibly modified) response returned by + `post_failover_autonomous_database` will be passed to + `post_failover_autonomous_database_with_metadata`. """ return response, metadata - def pre_list_db_servers( + def pre_generate_autonomous_database_wallet( self, - request: oracledatabase.ListDbServersRequest, + request: oracledatabase.GenerateAutonomousDatabaseWalletRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListDbServersRequest, Sequence[Tuple[str, Union[str, bytes]]] + oracledatabase.GenerateAutonomousDatabaseWalletRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for list_db_servers + """Pre-rpc interceptor for generate_autonomous_database_wallet Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_db_servers( - self, response: oracledatabase.ListDbServersResponse - ) -> oracledatabase.ListDbServersResponse: - """Post-rpc interceptor for list_db_servers + def post_generate_autonomous_database_wallet( + self, response: oracledatabase.GenerateAutonomousDatabaseWalletResponse + ) -> oracledatabase.GenerateAutonomousDatabaseWalletResponse: + """Post-rpc interceptor for generate_autonomous_database_wallet - DEPRECATED. Please use the `post_list_db_servers_with_metadata` + DEPRECATED. Please use the `post_generate_autonomous_database_wallet_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_db_servers` interceptor runs - before the `post_list_db_servers_with_metadata` interceptor. + it is returned to user code. This `post_generate_autonomous_database_wallet` interceptor runs + before the `post_generate_autonomous_database_wallet_with_metadata` interceptor. """ return response - def post_list_db_servers_with_metadata( + def post_generate_autonomous_database_wallet_with_metadata( self, - response: oracledatabase.ListDbServersResponse, + response: oracledatabase.GenerateAutonomousDatabaseWalletResponse, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListDbServersResponse, Sequence[Tuple[str, Union[str, bytes]]] + oracledatabase.GenerateAutonomousDatabaseWalletResponse, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Post-rpc interceptor for list_db_servers + """Post-rpc interceptor for generate_autonomous_database_wallet Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_db_servers_with_metadata` - interceptor in new development instead of the `post_list_db_servers` interceptor. - When both interceptors are used, this `post_list_db_servers_with_metadata` interceptor runs after the - `post_list_db_servers` interceptor. The (possibly modified) response returned by - `post_list_db_servers` will be passed to - `post_list_db_servers_with_metadata`. + We recommend only using this `post_generate_autonomous_database_wallet_with_metadata` + interceptor in new development instead of the `post_generate_autonomous_database_wallet` interceptor. + When both interceptors are used, this `post_generate_autonomous_database_wallet_with_metadata` interceptor runs after the + `post_generate_autonomous_database_wallet` interceptor. The (possibly modified) response returned by + `post_generate_autonomous_database_wallet` will be passed to + `post_generate_autonomous_database_wallet_with_metadata`. """ return response, metadata - def pre_list_db_system_shapes( + def pre_get_autonomous_database( self, - request: oracledatabase.ListDbSystemShapesRequest, + request: oracledatabase.GetAutonomousDatabaseRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListDbSystemShapesRequest, + oracledatabase.GetAutonomousDatabaseRequest, Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for list_db_system_shapes + """Pre-rpc interceptor for get_autonomous_database Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_db_system_shapes( - self, response: oracledatabase.ListDbSystemShapesResponse - ) -> oracledatabase.ListDbSystemShapesResponse: - """Post-rpc interceptor for list_db_system_shapes + def post_get_autonomous_database( + self, response: autonomous_database.AutonomousDatabase + ) -> autonomous_database.AutonomousDatabase: + """Post-rpc interceptor for get_autonomous_database - DEPRECATED. Please use the `post_list_db_system_shapes_with_metadata` + DEPRECATED. Please use the `post_get_autonomous_database_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_db_system_shapes` interceptor runs - before the `post_list_db_system_shapes_with_metadata` interceptor. + it is returned to user code. This `post_get_autonomous_database` interceptor runs + before the `post_get_autonomous_database_with_metadata` interceptor. """ return response - def post_list_db_system_shapes_with_metadata( + def post_get_autonomous_database_with_metadata( self, - response: oracledatabase.ListDbSystemShapesResponse, + response: autonomous_database.AutonomousDatabase, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListDbSystemShapesResponse, - Sequence[Tuple[str, Union[str, bytes]]], + autonomous_database.AutonomousDatabase, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Post-rpc interceptor for list_db_system_shapes + """Post-rpc interceptor for get_autonomous_database Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_db_system_shapes_with_metadata` - interceptor in new development instead of the `post_list_db_system_shapes` interceptor. - When both interceptors are used, this `post_list_db_system_shapes_with_metadata` interceptor runs after the - `post_list_db_system_shapes` interceptor. The (possibly modified) response returned by - `post_list_db_system_shapes` will be passed to - `post_list_db_system_shapes_with_metadata`. + We recommend only using this `post_get_autonomous_database_with_metadata` + interceptor in new development instead of the `post_get_autonomous_database` interceptor. + When both interceptors are used, this `post_get_autonomous_database_with_metadata` interceptor runs after the + `post_get_autonomous_database` interceptor. The (possibly modified) response returned by + `post_get_autonomous_database` will be passed to + `post_get_autonomous_database_with_metadata`. """ return response, metadata - def pre_list_entitlements( + def pre_get_cloud_exadata_infrastructure( self, - request: oracledatabase.ListEntitlementsRequest, + request: oracledatabase.GetCloudExadataInfrastructureRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListEntitlementsRequest, Sequence[Tuple[str, Union[str, bytes]]] + oracledatabase.GetCloudExadataInfrastructureRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for list_entitlements + """Pre-rpc interceptor for get_cloud_exadata_infrastructure Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_entitlements( - self, response: oracledatabase.ListEntitlementsResponse - ) -> oracledatabase.ListEntitlementsResponse: - """Post-rpc interceptor for list_entitlements + def post_get_cloud_exadata_infrastructure( + self, response: exadata_infra.CloudExadataInfrastructure + ) -> exadata_infra.CloudExadataInfrastructure: + """Post-rpc interceptor for get_cloud_exadata_infrastructure - DEPRECATED. Please use the `post_list_entitlements_with_metadata` + DEPRECATED. Please use the `post_get_cloud_exadata_infrastructure_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_entitlements` interceptor runs - before the `post_list_entitlements_with_metadata` interceptor. + it is returned to user code. This `post_get_cloud_exadata_infrastructure` interceptor runs + before the `post_get_cloud_exadata_infrastructure_with_metadata` interceptor. """ return response - def post_list_entitlements_with_metadata( + def post_get_cloud_exadata_infrastructure_with_metadata( self, - response: oracledatabase.ListEntitlementsResponse, + response: exadata_infra.CloudExadataInfrastructure, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListEntitlementsResponse, Sequence[Tuple[str, Union[str, bytes]]] + exadata_infra.CloudExadataInfrastructure, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Post-rpc interceptor for list_entitlements + """Post-rpc interceptor for get_cloud_exadata_infrastructure Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_entitlements_with_metadata` - interceptor in new development instead of the `post_list_entitlements` interceptor. - When both interceptors are used, this `post_list_entitlements_with_metadata` interceptor runs after the - `post_list_entitlements` interceptor. The (possibly modified) response returned by - `post_list_entitlements` will be passed to - `post_list_entitlements_with_metadata`. + We recommend only using this `post_get_cloud_exadata_infrastructure_with_metadata` + interceptor in new development instead of the `post_get_cloud_exadata_infrastructure` interceptor. + When both interceptors are used, this `post_get_cloud_exadata_infrastructure_with_metadata` interceptor runs after the + `post_get_cloud_exadata_infrastructure` interceptor. The (possibly modified) response returned by + `post_get_cloud_exadata_infrastructure` will be passed to + `post_get_cloud_exadata_infrastructure_with_metadata`. """ return response, metadata - def pre_list_gi_versions( + def pre_get_cloud_vm_cluster( self, - request: oracledatabase.ListGiVersionsRequest, + request: oracledatabase.GetCloudVmClusterRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.ListGiVersionsRequest, Sequence[Tuple[str, Union[str, bytes]]] + oracledatabase.GetCloudVmClusterRequest, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for list_gi_versions + """Pre-rpc interceptor for get_cloud_vm_cluster Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_gi_versions( - self, response: oracledatabase.ListGiVersionsResponse - ) -> oracledatabase.ListGiVersionsResponse: - """Post-rpc interceptor for list_gi_versions + def post_get_cloud_vm_cluster( + self, response: vm_cluster.CloudVmCluster + ) -> vm_cluster.CloudVmCluster: + """Post-rpc interceptor for get_cloud_vm_cluster - DEPRECATED. Please use the `post_list_gi_versions_with_metadata` + DEPRECATED. Please use the `post_get_cloud_vm_cluster_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_list_gi_versions` interceptor runs - before the `post_list_gi_versions_with_metadata` interceptor. + it is returned to user code. This `post_get_cloud_vm_cluster` interceptor runs + before the `post_get_cloud_vm_cluster_with_metadata` interceptor. """ return response - def post_list_gi_versions_with_metadata( + def post_get_cloud_vm_cluster_with_metadata( self, - response: oracledatabase.ListGiVersionsResponse, + response: vm_cluster.CloudVmCluster, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.ListGiVersionsResponse, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Post-rpc interceptor for list_gi_versions + ) -> Tuple[vm_cluster.CloudVmCluster, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_cloud_vm_cluster Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_list_gi_versions_with_metadata` - interceptor in new development instead of the `post_list_gi_versions` interceptor. - When both interceptors are used, this `post_list_gi_versions_with_metadata` interceptor runs after the - `post_list_gi_versions` interceptor. The (possibly modified) response returned by - `post_list_gi_versions` will be passed to - `post_list_gi_versions_with_metadata`. + We recommend only using this `post_get_cloud_vm_cluster_with_metadata` + interceptor in new development instead of the `post_get_cloud_vm_cluster` interceptor. + When both interceptors are used, this `post_get_cloud_vm_cluster_with_metadata` interceptor runs after the + `post_get_cloud_vm_cluster` interceptor. The (possibly modified) response returned by + `post_get_cloud_vm_cluster` will be passed to + `post_get_cloud_vm_cluster_with_metadata`. """ return response, metadata - def pre_restart_autonomous_database( + def pre_get_database( self, - request: oracledatabase.RestartAutonomousDatabaseRequest, + request: database.GetDatabaseRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.RestartAutonomousDatabaseRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for restart_autonomous_database + ) -> Tuple[database.GetDatabaseRequest, Sequence[Tuple[str, Union[str, bytes]]]]: + """Pre-rpc interceptor for get_database Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_restart_autonomous_database( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for restart_autonomous_database + def post_get_database(self, response: database.Database) -> database.Database: + """Post-rpc interceptor for get_database - DEPRECATED. Please use the `post_restart_autonomous_database_with_metadata` + DEPRECATED. Please use the `post_get_database_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_restart_autonomous_database` interceptor runs - before the `post_restart_autonomous_database_with_metadata` interceptor. + it is returned to user code. This `post_get_database` interceptor runs + before the `post_get_database_with_metadata` interceptor. """ return response - def post_restart_autonomous_database_with_metadata( + def post_get_database_with_metadata( self, - response: operations_pb2.Operation, + response: database.Database, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for restart_autonomous_database + ) -> Tuple[database.Database, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_database Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_restart_autonomous_database_with_metadata` - interceptor in new development instead of the `post_restart_autonomous_database` interceptor. - When both interceptors are used, this `post_restart_autonomous_database_with_metadata` interceptor runs after the - `post_restart_autonomous_database` interceptor. The (possibly modified) response returned by - `post_restart_autonomous_database` will be passed to - `post_restart_autonomous_database_with_metadata`. + We recommend only using this `post_get_database_with_metadata` + interceptor in new development instead of the `post_get_database` interceptor. + When both interceptors are used, this `post_get_database_with_metadata` interceptor runs after the + `post_get_database` interceptor. The (possibly modified) response returned by + `post_get_database` will be passed to + `post_get_database_with_metadata`. """ return response, metadata - def pre_restore_autonomous_database( + def pre_get_db_system( self, - request: oracledatabase.RestoreAutonomousDatabaseRequest, + request: db_system.GetDbSystemRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - oracledatabase.RestoreAutonomousDatabaseRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for restore_autonomous_database + ) -> Tuple[db_system.GetDbSystemRequest, Sequence[Tuple[str, Union[str, bytes]]]]: + """Pre-rpc interceptor for get_db_system Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_restore_autonomous_database( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for restore_autonomous_database + def post_get_db_system(self, response: db_system.DbSystem) -> db_system.DbSystem: + """Post-rpc interceptor for get_db_system - DEPRECATED. Please use the `post_restore_autonomous_database_with_metadata` + DEPRECATED. Please use the `post_get_db_system_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_restore_autonomous_database` interceptor runs - before the `post_restore_autonomous_database_with_metadata` interceptor. + it is returned to user code. This `post_get_db_system` interceptor runs + before the `post_get_db_system_with_metadata` interceptor. """ return response - def post_restore_autonomous_database_with_metadata( + def post_get_db_system_with_metadata( self, - response: operations_pb2.Operation, + response: db_system.DbSystem, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for restore_autonomous_database + ) -> Tuple[db_system.DbSystem, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_db_system Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_restore_autonomous_database_with_metadata` - interceptor in new development instead of the `post_restore_autonomous_database` interceptor. - When both interceptors are used, this `post_restore_autonomous_database_with_metadata` interceptor runs after the - `post_restore_autonomous_database` interceptor. The (possibly modified) response returned by - `post_restore_autonomous_database` will be passed to - `post_restore_autonomous_database_with_metadata`. + We recommend only using this `post_get_db_system_with_metadata` + interceptor in new development instead of the `post_get_db_system` interceptor. + When both interceptors are used, this `post_get_db_system_with_metadata` interceptor runs after the + `post_get_db_system` interceptor. The (possibly modified) response returned by + `post_get_db_system` will be passed to + `post_get_db_system_with_metadata`. """ return response, metadata - def pre_start_autonomous_database( + def pre_get_exadb_vm_cluster( self, - request: oracledatabase.StartAutonomousDatabaseRequest, + request: oracledatabase.GetExadbVmClusterRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.StartAutonomousDatabaseRequest, - Sequence[Tuple[str, Union[str, bytes]]], + oracledatabase.GetExadbVmClusterRequest, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for start_autonomous_database + """Pre-rpc interceptor for get_exadb_vm_cluster Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_start_autonomous_database( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for start_autonomous_database + def post_get_exadb_vm_cluster( + self, response: exadb_vm_cluster.ExadbVmCluster + ) -> exadb_vm_cluster.ExadbVmCluster: + """Post-rpc interceptor for get_exadb_vm_cluster - DEPRECATED. Please use the `post_start_autonomous_database_with_metadata` + DEPRECATED. Please use the `post_get_exadb_vm_cluster_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_start_autonomous_database` interceptor runs - before the `post_start_autonomous_database_with_metadata` interceptor. + it is returned to user code. This `post_get_exadb_vm_cluster` interceptor runs + before the `post_get_exadb_vm_cluster_with_metadata` interceptor. """ return response - def post_start_autonomous_database_with_metadata( + def post_get_exadb_vm_cluster_with_metadata( self, - response: operations_pb2.Operation, + response: exadb_vm_cluster.ExadbVmCluster, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for start_autonomous_database + ) -> Tuple[ + exadb_vm_cluster.ExadbVmCluster, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_exadb_vm_cluster Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_start_autonomous_database_with_metadata` - interceptor in new development instead of the `post_start_autonomous_database` interceptor. - When both interceptors are used, this `post_start_autonomous_database_with_metadata` interceptor runs after the - `post_start_autonomous_database` interceptor. The (possibly modified) response returned by - `post_start_autonomous_database` will be passed to - `post_start_autonomous_database_with_metadata`. + We recommend only using this `post_get_exadb_vm_cluster_with_metadata` + interceptor in new development instead of the `post_get_exadb_vm_cluster` interceptor. + When both interceptors are used, this `post_get_exadb_vm_cluster_with_metadata` interceptor runs after the + `post_get_exadb_vm_cluster` interceptor. The (possibly modified) response returned by + `post_get_exadb_vm_cluster` will be passed to + `post_get_exadb_vm_cluster_with_metadata`. """ return response, metadata - def pre_stop_autonomous_database( + def pre_get_exascale_db_storage_vault( self, - request: oracledatabase.StopAutonomousDatabaseRequest, + request: exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - oracledatabase.StopAutonomousDatabaseRequest, + exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for stop_autonomous_database + """Pre-rpc interceptor for get_exascale_db_storage_vault Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_stop_autonomous_database( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for stop_autonomous_database + def post_get_exascale_db_storage_vault( + self, response: exascale_db_storage_vault.ExascaleDbStorageVault + ) -> exascale_db_storage_vault.ExascaleDbStorageVault: + """Post-rpc interceptor for get_exascale_db_storage_vault - DEPRECATED. Please use the `post_stop_autonomous_database_with_metadata` + DEPRECATED. Please use the `post_get_exascale_db_storage_vault_with_metadata` interceptor instead. Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. This `post_stop_autonomous_database` interceptor runs - before the `post_stop_autonomous_database_with_metadata` interceptor. + it is returned to user code. This `post_get_exascale_db_storage_vault` interceptor runs + before the `post_get_exascale_db_storage_vault_with_metadata` interceptor. """ return response - def post_stop_autonomous_database_with_metadata( + def post_get_exascale_db_storage_vault_with_metadata( self, - response: operations_pb2.Operation, + response: exascale_db_storage_vault.ExascaleDbStorageVault, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for stop_autonomous_database + ) -> Tuple[ + exascale_db_storage_vault.ExascaleDbStorageVault, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_exascale_db_storage_vault Override in a subclass to read or manipulate the response or metadata after it is returned by the OracleDatabase server but before it is returned to user code. - We recommend only using this `post_stop_autonomous_database_with_metadata` - interceptor in new development instead of the `post_stop_autonomous_database` interceptor. - When both interceptors are used, this `post_stop_autonomous_database_with_metadata` interceptor runs after the - `post_stop_autonomous_database` interceptor. The (possibly modified) response returned by - `post_stop_autonomous_database` will be passed to - `post_stop_autonomous_database_with_metadata`. + We recommend only using this `post_get_exascale_db_storage_vault_with_metadata` + interceptor in new development instead of the `post_get_exascale_db_storage_vault` interceptor. + When both interceptors are used, this `post_get_exascale_db_storage_vault_with_metadata` interceptor runs after the + `post_get_exascale_db_storage_vault` interceptor. The (possibly modified) response returned by + `post_get_exascale_db_storage_vault` will be passed to + `post_get_exascale_db_storage_vault_with_metadata`. """ return response, metadata - def pre_get_location( + def pre_get_odb_network( self, - request: locations_pb2.GetLocationRequest, + request: odb_network.GetOdbNetworkRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - locations_pb2.GetLocationRequest, Sequence[Tuple[str, Union[str, bytes]]] + odb_network.GetOdbNetworkRequest, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for get_location + """Pre-rpc interceptor for get_odb_network Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_get_location( - self, response: locations_pb2.Location - ) -> locations_pb2.Location: - """Post-rpc interceptor for get_location + def post_get_odb_network( + self, response: odb_network.OdbNetwork + ) -> odb_network.OdbNetwork: + """Post-rpc interceptor for get_odb_network - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_odb_network_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. + it is returned to user code. This `post_get_odb_network` interceptor runs + before the `post_get_odb_network_with_metadata` interceptor. """ return response - def pre_list_locations( + def post_get_odb_network_with_metadata( self, - request: locations_pb2.ListLocationsRequest, + response: odb_network.OdbNetwork, metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - locations_pb2.ListLocationsRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for list_locations + ) -> Tuple[odb_network.OdbNetwork, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_odb_network + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_get_odb_network_with_metadata` + interceptor in new development instead of the `post_get_odb_network` interceptor. + When both interceptors are used, this `post_get_odb_network_with_metadata` interceptor runs after the + `post_get_odb_network` interceptor. The (possibly modified) response returned by + `post_get_odb_network` will be passed to + `post_get_odb_network_with_metadata`. + """ + return response, metadata + + def pre_get_odb_subnet( + self, + request: odb_subnet.GetOdbSubnetRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[odb_subnet.GetOdbSubnetRequest, Sequence[Tuple[str, Union[str, bytes]]]]: + """Pre-rpc interceptor for get_odb_subnet Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_locations( - self, response: locations_pb2.ListLocationsResponse - ) -> locations_pb2.ListLocationsResponse: - """Post-rpc interceptor for list_locations + def post_get_odb_subnet( + self, response: odb_subnet.OdbSubnet + ) -> odb_subnet.OdbSubnet: + """Post-rpc interceptor for get_odb_subnet - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_odb_subnet_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. + it is returned to user code. This `post_get_odb_subnet` interceptor runs + before the `post_get_odb_subnet_with_metadata` interceptor. """ return response - def pre_cancel_operation( + def post_get_odb_subnet_with_metadata( self, - request: operations_pb2.CancelOperationRequest, + response: odb_subnet.OdbSubnet, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[odb_subnet.OdbSubnet, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_odb_subnet + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_get_odb_subnet_with_metadata` + interceptor in new development instead of the `post_get_odb_subnet` interceptor. + When both interceptors are used, this `post_get_odb_subnet_with_metadata` interceptor runs after the + `post_get_odb_subnet` interceptor. The (possibly modified) response returned by + `post_get_odb_subnet` will be passed to + `post_get_odb_subnet_with_metadata`. + """ + return response, metadata + + def pre_get_pluggable_database( + self, + request: pluggable_database.GetPluggableDatabaseRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + pluggable_database.GetPluggableDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for cancel_operation + """Pre-rpc interceptor for get_pluggable_database Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_cancel_operation(self, response: None) -> None: - """Post-rpc interceptor for cancel_operation + def post_get_pluggable_database( + self, response: pluggable_database.PluggableDatabase + ) -> pluggable_database.PluggableDatabase: + """Post-rpc interceptor for get_pluggable_database - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_pluggable_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. + it is returned to user code. This `post_get_pluggable_database` interceptor runs + before the `post_get_pluggable_database_with_metadata` interceptor. """ return response - def pre_delete_operation( + def post_get_pluggable_database_with_metadata( self, - request: operations_pb2.DeleteOperationRequest, + response: pluggable_database.PluggableDatabase, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + pluggable_database.PluggableDatabase, Sequence[Tuple[str, Union[str, bytes]]] ]: - """Pre-rpc interceptor for delete_operation + """Post-rpc interceptor for get_pluggable_database - Override in a subclass to manipulate the request or metadata - before they are sent to the OracleDatabase server. - """ - return request, metadata - - def post_delete_operation(self, response: None) -> None: - """Post-rpc interceptor for delete_operation + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. - Override in a subclass to manipulate the response - after it is returned by the OracleDatabase server but before - it is returned to user code. + We recommend only using this `post_get_pluggable_database_with_metadata` + interceptor in new development instead of the `post_get_pluggable_database` interceptor. + When both interceptors are used, this `post_get_pluggable_database_with_metadata` interceptor runs after the + `post_get_pluggable_database` interceptor. The (possibly modified) response returned by + `post_get_pluggable_database` will be passed to + `post_get_pluggable_database_with_metadata`. """ - return response + return response, metadata - def pre_get_operation( + def pre_list_autonomous_database_backups( self, - request: operations_pb2.GetOperationRequest, + request: oracledatabase.ListAutonomousDatabaseBackupsRequest, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + oracledatabase.ListAutonomousDatabaseBackupsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for get_operation + """Pre-rpc interceptor for list_autonomous_database_backups Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_get_operation( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for get_operation + def post_list_autonomous_database_backups( + self, response: oracledatabase.ListAutonomousDatabaseBackupsResponse + ) -> oracledatabase.ListAutonomousDatabaseBackupsResponse: + """Post-rpc interceptor for list_autonomous_database_backups - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_autonomous_database_backups_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. + it is returned to user code. This `post_list_autonomous_database_backups` interceptor runs + before the `post_list_autonomous_database_backups_with_metadata` interceptor. """ return response - def pre_list_operations( + def post_list_autonomous_database_backups_with_metadata( self, - request: operations_pb2.ListOperationsRequest, + response: oracledatabase.ListAutonomousDatabaseBackupsResponse, metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + oracledatabase.ListAutonomousDatabaseBackupsResponse, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for list_operations + """Post-rpc interceptor for list_autonomous_database_backups + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_autonomous_database_backups_with_metadata` + interceptor in new development instead of the `post_list_autonomous_database_backups` interceptor. + When both interceptors are used, this `post_list_autonomous_database_backups_with_metadata` interceptor runs after the + `post_list_autonomous_database_backups` interceptor. The (possibly modified) response returned by + `post_list_autonomous_database_backups` will be passed to + `post_list_autonomous_database_backups_with_metadata`. + """ + return response, metadata + + def pre_list_autonomous_database_character_sets( + self, + request: oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_autonomous_database_character_sets Override in a subclass to manipulate the request or metadata before they are sent to the OracleDatabase server. """ return request, metadata - def post_list_operations( - self, response: operations_pb2.ListOperationsResponse - ) -> operations_pb2.ListOperationsResponse: - """Post-rpc interceptor for list_operations + def post_list_autonomous_database_character_sets( + self, response: oracledatabase.ListAutonomousDatabaseCharacterSetsResponse + ) -> oracledatabase.ListAutonomousDatabaseCharacterSetsResponse: + """Post-rpc interceptor for list_autonomous_database_character_sets - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_autonomous_database_character_sets_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the OracleDatabase server but before - it is returned to user code. + it is returned to user code. This `post_list_autonomous_database_character_sets` interceptor runs + before the `post_list_autonomous_database_character_sets_with_metadata` interceptor. """ return response + def post_list_autonomous_database_character_sets_with_metadata( + self, + response: oracledatabase.ListAutonomousDatabaseCharacterSetsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_autonomous_database_character_sets -@dataclasses.dataclass -class OracleDatabaseRestStub: - _session: AuthorizedSession - _host: str - _interceptor: OracleDatabaseRestInterceptor + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + We recommend only using this `post_list_autonomous_database_character_sets_with_metadata` + interceptor in new development instead of the `post_list_autonomous_database_character_sets` interceptor. + When both interceptors are used, this `post_list_autonomous_database_character_sets_with_metadata` interceptor runs after the + `post_list_autonomous_database_character_sets` interceptor. The (possibly modified) response returned by + `post_list_autonomous_database_character_sets` will be passed to + `post_list_autonomous_database_character_sets_with_metadata`. + """ + return response, metadata -class OracleDatabaseRestTransport(_BaseOracleDatabaseRestTransport): - """REST backend synchronous transport for OracleDatabase. + def pre_list_autonomous_databases( + self, + request: oracledatabase.ListAutonomousDatabasesRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListAutonomousDatabasesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_autonomous_databases - Service describing handlers for resources + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. + def post_list_autonomous_databases( + self, response: oracledatabase.ListAutonomousDatabasesResponse + ) -> oracledatabase.ListAutonomousDatabasesResponse: + """Post-rpc interceptor for list_autonomous_databases - It sends JSON representations of protocol buffers over HTTP/1.1 - """ + DEPRECATED. Please use the `post_list_autonomous_databases_with_metadata` + interceptor instead. - def __init__( + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_autonomous_databases` interceptor runs + before the `post_list_autonomous_databases_with_metadata` interceptor. + """ + return response + + def post_list_autonomous_databases_with_metadata( self, - *, - host: str = "oracledatabase.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = "https", - interceptor: Optional[OracleDatabaseRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. + response: oracledatabase.ListAutonomousDatabasesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListAutonomousDatabasesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_autonomous_databases - Args: - host (Optional[str]): - The hostname to connect to (default: 'oracledatabase.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. - credentials_file (Optional[str]): Deprecated. A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. This argument will be - removed in the next major version of this library. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + We recommend only using this `post_list_autonomous_databases_with_metadata` + interceptor in new development instead of the `post_list_autonomous_databases` interceptor. + When both interceptors are used, this `post_list_autonomous_databases_with_metadata` interceptor runs after the + `post_list_autonomous_databases` interceptor. The (possibly modified) response returned by + `post_list_autonomous_databases` will be passed to + `post_list_autonomous_databases_with_metadata`. """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - url_scheme=url_scheme, - api_audience=api_audience, - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST - ) - self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or OracleDatabaseRestInterceptor() - self._prep_wrapped_messages(client_info) + return response, metadata - @property - def operations_client(self) -> operations_v1.AbstractOperationsClient: - """Create the client designed to process long-running operations. + def pre_list_autonomous_db_versions( + self, + request: oracledatabase.ListAutonomousDbVersionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListAutonomousDbVersionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_autonomous_db_versions - This property caches on the instance; repeated calls return the same - client. + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. """ - # Only create a new client if we do not already have one. - if self._operations_client is None: - http_options: Dict[str, List[Dict[str, str]]] = { - "google.longrunning.Operations.CancelOperation": [ - { - "method": "post", - "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", - "body": "*", - }, - ], - "google.longrunning.Operations.DeleteOperation": [ - { - "method": "delete", - "uri": "/v1/{name=projects/*/locations/*/operations/*}", - }, - ], - "google.longrunning.Operations.GetOperation": [ - { - "method": "get", - "uri": "/v1/{name=projects/*/locations/*/operations/*}", - }, - ], - "google.longrunning.Operations.ListOperations": [ - { - "method": "get", - "uri": "/v1/{name=projects/*/locations/*}/operations", + return request, metadata + + def post_list_autonomous_db_versions( + self, response: oracledatabase.ListAutonomousDbVersionsResponse + ) -> oracledatabase.ListAutonomousDbVersionsResponse: + """Post-rpc interceptor for list_autonomous_db_versions + + DEPRECATED. Please use the `post_list_autonomous_db_versions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_autonomous_db_versions` interceptor runs + before the `post_list_autonomous_db_versions_with_metadata` interceptor. + """ + return response + + def post_list_autonomous_db_versions_with_metadata( + self, + response: oracledatabase.ListAutonomousDbVersionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListAutonomousDbVersionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_autonomous_db_versions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_autonomous_db_versions_with_metadata` + interceptor in new development instead of the `post_list_autonomous_db_versions` interceptor. + When both interceptors are used, this `post_list_autonomous_db_versions_with_metadata` interceptor runs after the + `post_list_autonomous_db_versions` interceptor. The (possibly modified) response returned by + `post_list_autonomous_db_versions` will be passed to + `post_list_autonomous_db_versions_with_metadata`. + """ + return response, metadata + + def pre_list_cloud_exadata_infrastructures( + self, + request: oracledatabase.ListCloudExadataInfrastructuresRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListCloudExadataInfrastructuresRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_cloud_exadata_infrastructures + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_cloud_exadata_infrastructures( + self, response: oracledatabase.ListCloudExadataInfrastructuresResponse + ) -> oracledatabase.ListCloudExadataInfrastructuresResponse: + """Post-rpc interceptor for list_cloud_exadata_infrastructures + + DEPRECATED. Please use the `post_list_cloud_exadata_infrastructures_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_cloud_exadata_infrastructures` interceptor runs + before the `post_list_cloud_exadata_infrastructures_with_metadata` interceptor. + """ + return response + + def post_list_cloud_exadata_infrastructures_with_metadata( + self, + response: oracledatabase.ListCloudExadataInfrastructuresResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListCloudExadataInfrastructuresResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_cloud_exadata_infrastructures + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_cloud_exadata_infrastructures_with_metadata` + interceptor in new development instead of the `post_list_cloud_exadata_infrastructures` interceptor. + When both interceptors are used, this `post_list_cloud_exadata_infrastructures_with_metadata` interceptor runs after the + `post_list_cloud_exadata_infrastructures` interceptor. The (possibly modified) response returned by + `post_list_cloud_exadata_infrastructures` will be passed to + `post_list_cloud_exadata_infrastructures_with_metadata`. + """ + return response, metadata + + def pre_list_cloud_vm_clusters( + self, + request: oracledatabase.ListCloudVmClustersRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListCloudVmClustersRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_cloud_vm_clusters + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_cloud_vm_clusters( + self, response: oracledatabase.ListCloudVmClustersResponse + ) -> oracledatabase.ListCloudVmClustersResponse: + """Post-rpc interceptor for list_cloud_vm_clusters + + DEPRECATED. Please use the `post_list_cloud_vm_clusters_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_cloud_vm_clusters` interceptor runs + before the `post_list_cloud_vm_clusters_with_metadata` interceptor. + """ + return response + + def post_list_cloud_vm_clusters_with_metadata( + self, + response: oracledatabase.ListCloudVmClustersResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListCloudVmClustersResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_cloud_vm_clusters + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_cloud_vm_clusters_with_metadata` + interceptor in new development instead of the `post_list_cloud_vm_clusters` interceptor. + When both interceptors are used, this `post_list_cloud_vm_clusters_with_metadata` interceptor runs after the + `post_list_cloud_vm_clusters` interceptor. The (possibly modified) response returned by + `post_list_cloud_vm_clusters` will be passed to + `post_list_cloud_vm_clusters_with_metadata`. + """ + return response, metadata + + def pre_list_database_character_sets( + self, + request: database_character_set.ListDatabaseCharacterSetsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + database_character_set.ListDatabaseCharacterSetsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_database_character_sets + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_database_character_sets( + self, response: database_character_set.ListDatabaseCharacterSetsResponse + ) -> database_character_set.ListDatabaseCharacterSetsResponse: + """Post-rpc interceptor for list_database_character_sets + + DEPRECATED. Please use the `post_list_database_character_sets_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_database_character_sets` interceptor runs + before the `post_list_database_character_sets_with_metadata` interceptor. + """ + return response + + def post_list_database_character_sets_with_metadata( + self, + response: database_character_set.ListDatabaseCharacterSetsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + database_character_set.ListDatabaseCharacterSetsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_database_character_sets + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_database_character_sets_with_metadata` + interceptor in new development instead of the `post_list_database_character_sets` interceptor. + When both interceptors are used, this `post_list_database_character_sets_with_metadata` interceptor runs after the + `post_list_database_character_sets` interceptor. The (possibly modified) response returned by + `post_list_database_character_sets` will be passed to + `post_list_database_character_sets_with_metadata`. + """ + return response, metadata + + def pre_list_databases( + self, + request: database.ListDatabasesRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[database.ListDatabasesRequest, Sequence[Tuple[str, Union[str, bytes]]]]: + """Pre-rpc interceptor for list_databases + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_databases( + self, response: database.ListDatabasesResponse + ) -> database.ListDatabasesResponse: + """Post-rpc interceptor for list_databases + + DEPRECATED. Please use the `post_list_databases_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_databases` interceptor runs + before the `post_list_databases_with_metadata` interceptor. + """ + return response + + def post_list_databases_with_metadata( + self, + response: database.ListDatabasesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[database.ListDatabasesResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for list_databases + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_databases_with_metadata` + interceptor in new development instead of the `post_list_databases` interceptor. + When both interceptors are used, this `post_list_databases_with_metadata` interceptor runs after the + `post_list_databases` interceptor. The (possibly modified) response returned by + `post_list_databases` will be passed to + `post_list_databases_with_metadata`. + """ + return response, metadata + + def pre_list_db_nodes( + self, + request: oracledatabase.ListDbNodesRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListDbNodesRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_db_nodes + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_db_nodes( + self, response: oracledatabase.ListDbNodesResponse + ) -> oracledatabase.ListDbNodesResponse: + """Post-rpc interceptor for list_db_nodes + + DEPRECATED. Please use the `post_list_db_nodes_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_db_nodes` interceptor runs + before the `post_list_db_nodes_with_metadata` interceptor. + """ + return response + + def post_list_db_nodes_with_metadata( + self, + response: oracledatabase.ListDbNodesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListDbNodesResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_db_nodes + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_db_nodes_with_metadata` + interceptor in new development instead of the `post_list_db_nodes` interceptor. + When both interceptors are used, this `post_list_db_nodes_with_metadata` interceptor runs after the + `post_list_db_nodes` interceptor. The (possibly modified) response returned by + `post_list_db_nodes` will be passed to + `post_list_db_nodes_with_metadata`. + """ + return response, metadata + + def pre_list_db_servers( + self, + request: oracledatabase.ListDbServersRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListDbServersRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_db_servers + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_db_servers( + self, response: oracledatabase.ListDbServersResponse + ) -> oracledatabase.ListDbServersResponse: + """Post-rpc interceptor for list_db_servers + + DEPRECATED. Please use the `post_list_db_servers_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_db_servers` interceptor runs + before the `post_list_db_servers_with_metadata` interceptor. + """ + return response + + def post_list_db_servers_with_metadata( + self, + response: oracledatabase.ListDbServersResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListDbServersResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_db_servers + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_db_servers_with_metadata` + interceptor in new development instead of the `post_list_db_servers` interceptor. + When both interceptors are used, this `post_list_db_servers_with_metadata` interceptor runs after the + `post_list_db_servers` interceptor. The (possibly modified) response returned by + `post_list_db_servers` will be passed to + `post_list_db_servers_with_metadata`. + """ + return response, metadata + + def pre_list_db_system_initial_storage_sizes( + self, + request: db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_db_system_initial_storage_sizes + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_db_system_initial_storage_sizes( + self, + response: db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse, + ) -> db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse: + """Post-rpc interceptor for list_db_system_initial_storage_sizes + + DEPRECATED. Please use the `post_list_db_system_initial_storage_sizes_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_db_system_initial_storage_sizes` interceptor runs + before the `post_list_db_system_initial_storage_sizes_with_metadata` interceptor. + """ + return response + + def post_list_db_system_initial_storage_sizes_with_metadata( + self, + response: db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_db_system_initial_storage_sizes + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_db_system_initial_storage_sizes_with_metadata` + interceptor in new development instead of the `post_list_db_system_initial_storage_sizes` interceptor. + When both interceptors are used, this `post_list_db_system_initial_storage_sizes_with_metadata` interceptor runs after the + `post_list_db_system_initial_storage_sizes` interceptor. The (possibly modified) response returned by + `post_list_db_system_initial_storage_sizes` will be passed to + `post_list_db_system_initial_storage_sizes_with_metadata`. + """ + return response, metadata + + def pre_list_db_systems( + self, + request: db_system.ListDbSystemsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[db_system.ListDbSystemsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: + """Pre-rpc interceptor for list_db_systems + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_db_systems( + self, response: db_system.ListDbSystemsResponse + ) -> db_system.ListDbSystemsResponse: + """Post-rpc interceptor for list_db_systems + + DEPRECATED. Please use the `post_list_db_systems_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_db_systems` interceptor runs + before the `post_list_db_systems_with_metadata` interceptor. + """ + return response + + def post_list_db_systems_with_metadata( + self, + response: db_system.ListDbSystemsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + db_system.ListDbSystemsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_db_systems + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_db_systems_with_metadata` + interceptor in new development instead of the `post_list_db_systems` interceptor. + When both interceptors are used, this `post_list_db_systems_with_metadata` interceptor runs after the + `post_list_db_systems` interceptor. The (possibly modified) response returned by + `post_list_db_systems` will be passed to + `post_list_db_systems_with_metadata`. + """ + return response, metadata + + def pre_list_db_system_shapes( + self, + request: oracledatabase.ListDbSystemShapesRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListDbSystemShapesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_db_system_shapes + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_db_system_shapes( + self, response: oracledatabase.ListDbSystemShapesResponse + ) -> oracledatabase.ListDbSystemShapesResponse: + """Post-rpc interceptor for list_db_system_shapes + + DEPRECATED. Please use the `post_list_db_system_shapes_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_db_system_shapes` interceptor runs + before the `post_list_db_system_shapes_with_metadata` interceptor. + """ + return response + + def post_list_db_system_shapes_with_metadata( + self, + response: oracledatabase.ListDbSystemShapesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListDbSystemShapesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_db_system_shapes + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_db_system_shapes_with_metadata` + interceptor in new development instead of the `post_list_db_system_shapes` interceptor. + When both interceptors are used, this `post_list_db_system_shapes_with_metadata` interceptor runs after the + `post_list_db_system_shapes` interceptor. The (possibly modified) response returned by + `post_list_db_system_shapes` will be passed to + `post_list_db_system_shapes_with_metadata`. + """ + return response, metadata + + def pre_list_db_versions( + self, + request: db_version.ListDbVersionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + db_version.ListDbVersionsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_db_versions + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_db_versions( + self, response: db_version.ListDbVersionsResponse + ) -> db_version.ListDbVersionsResponse: + """Post-rpc interceptor for list_db_versions + + DEPRECATED. Please use the `post_list_db_versions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_db_versions` interceptor runs + before the `post_list_db_versions_with_metadata` interceptor. + """ + return response + + def post_list_db_versions_with_metadata( + self, + response: db_version.ListDbVersionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + db_version.ListDbVersionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_db_versions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_db_versions_with_metadata` + interceptor in new development instead of the `post_list_db_versions` interceptor. + When both interceptors are used, this `post_list_db_versions_with_metadata` interceptor runs after the + `post_list_db_versions` interceptor. The (possibly modified) response returned by + `post_list_db_versions` will be passed to + `post_list_db_versions_with_metadata`. + """ + return response, metadata + + def pre_list_entitlements( + self, + request: oracledatabase.ListEntitlementsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListEntitlementsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_entitlements + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_entitlements( + self, response: oracledatabase.ListEntitlementsResponse + ) -> oracledatabase.ListEntitlementsResponse: + """Post-rpc interceptor for list_entitlements + + DEPRECATED. Please use the `post_list_entitlements_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_entitlements` interceptor runs + before the `post_list_entitlements_with_metadata` interceptor. + """ + return response + + def post_list_entitlements_with_metadata( + self, + response: oracledatabase.ListEntitlementsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListEntitlementsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_entitlements + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_entitlements_with_metadata` + interceptor in new development instead of the `post_list_entitlements` interceptor. + When both interceptors are used, this `post_list_entitlements_with_metadata` interceptor runs after the + `post_list_entitlements` interceptor. The (possibly modified) response returned by + `post_list_entitlements` will be passed to + `post_list_entitlements_with_metadata`. + """ + return response, metadata + + def pre_list_exadb_vm_clusters( + self, + request: oracledatabase.ListExadbVmClustersRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListExadbVmClustersRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_exadb_vm_clusters + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_exadb_vm_clusters( + self, response: oracledatabase.ListExadbVmClustersResponse + ) -> oracledatabase.ListExadbVmClustersResponse: + """Post-rpc interceptor for list_exadb_vm_clusters + + DEPRECATED. Please use the `post_list_exadb_vm_clusters_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_exadb_vm_clusters` interceptor runs + before the `post_list_exadb_vm_clusters_with_metadata` interceptor. + """ + return response + + def post_list_exadb_vm_clusters_with_metadata( + self, + response: oracledatabase.ListExadbVmClustersResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListExadbVmClustersResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_exadb_vm_clusters + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_exadb_vm_clusters_with_metadata` + interceptor in new development instead of the `post_list_exadb_vm_clusters` interceptor. + When both interceptors are used, this `post_list_exadb_vm_clusters_with_metadata` interceptor runs after the + `post_list_exadb_vm_clusters` interceptor. The (possibly modified) response returned by + `post_list_exadb_vm_clusters` will be passed to + `post_list_exadb_vm_clusters_with_metadata`. + """ + return response, metadata + + def pre_list_exascale_db_storage_vaults( + self, + request: exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_exascale_db_storage_vaults + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_exascale_db_storage_vaults( + self, response: exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse + ) -> exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse: + """Post-rpc interceptor for list_exascale_db_storage_vaults + + DEPRECATED. Please use the `post_list_exascale_db_storage_vaults_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_exascale_db_storage_vaults` interceptor runs + before the `post_list_exascale_db_storage_vaults_with_metadata` interceptor. + """ + return response + + def post_list_exascale_db_storage_vaults_with_metadata( + self, + response: exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_exascale_db_storage_vaults + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_exascale_db_storage_vaults_with_metadata` + interceptor in new development instead of the `post_list_exascale_db_storage_vaults` interceptor. + When both interceptors are used, this `post_list_exascale_db_storage_vaults_with_metadata` interceptor runs after the + `post_list_exascale_db_storage_vaults` interceptor. The (possibly modified) response returned by + `post_list_exascale_db_storage_vaults` will be passed to + `post_list_exascale_db_storage_vaults_with_metadata`. + """ + return response, metadata + + def pre_list_gi_versions( + self, + request: oracledatabase.ListGiVersionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListGiVersionsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_gi_versions + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_gi_versions( + self, response: oracledatabase.ListGiVersionsResponse + ) -> oracledatabase.ListGiVersionsResponse: + """Post-rpc interceptor for list_gi_versions + + DEPRECATED. Please use the `post_list_gi_versions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_gi_versions` interceptor runs + before the `post_list_gi_versions_with_metadata` interceptor. + """ + return response + + def post_list_gi_versions_with_metadata( + self, + response: oracledatabase.ListGiVersionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.ListGiVersionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_gi_versions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_gi_versions_with_metadata` + interceptor in new development instead of the `post_list_gi_versions` interceptor. + When both interceptors are used, this `post_list_gi_versions_with_metadata` interceptor runs after the + `post_list_gi_versions` interceptor. The (possibly modified) response returned by + `post_list_gi_versions` will be passed to + `post_list_gi_versions_with_metadata`. + """ + return response, metadata + + def pre_list_minor_versions( + self, + request: minor_version.ListMinorVersionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + minor_version.ListMinorVersionsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_minor_versions + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_minor_versions( + self, response: minor_version.ListMinorVersionsResponse + ) -> minor_version.ListMinorVersionsResponse: + """Post-rpc interceptor for list_minor_versions + + DEPRECATED. Please use the `post_list_minor_versions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_minor_versions` interceptor runs + before the `post_list_minor_versions_with_metadata` interceptor. + """ + return response + + def post_list_minor_versions_with_metadata( + self, + response: minor_version.ListMinorVersionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + minor_version.ListMinorVersionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_minor_versions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_minor_versions_with_metadata` + interceptor in new development instead of the `post_list_minor_versions` interceptor. + When both interceptors are used, this `post_list_minor_versions_with_metadata` interceptor runs after the + `post_list_minor_versions` interceptor. The (possibly modified) response returned by + `post_list_minor_versions` will be passed to + `post_list_minor_versions_with_metadata`. + """ + return response, metadata + + def pre_list_odb_networks( + self, + request: odb_network.ListOdbNetworksRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + odb_network.ListOdbNetworksRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_odb_networks + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_odb_networks( + self, response: odb_network.ListOdbNetworksResponse + ) -> odb_network.ListOdbNetworksResponse: + """Post-rpc interceptor for list_odb_networks + + DEPRECATED. Please use the `post_list_odb_networks_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_odb_networks` interceptor runs + before the `post_list_odb_networks_with_metadata` interceptor. + """ + return response + + def post_list_odb_networks_with_metadata( + self, + response: odb_network.ListOdbNetworksResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + odb_network.ListOdbNetworksResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_odb_networks + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_odb_networks_with_metadata` + interceptor in new development instead of the `post_list_odb_networks` interceptor. + When both interceptors are used, this `post_list_odb_networks_with_metadata` interceptor runs after the + `post_list_odb_networks` interceptor. The (possibly modified) response returned by + `post_list_odb_networks` will be passed to + `post_list_odb_networks_with_metadata`. + """ + return response, metadata + + def pre_list_odb_subnets( + self, + request: odb_subnet.ListOdbSubnetsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + odb_subnet.ListOdbSubnetsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_odb_subnets + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_odb_subnets( + self, response: odb_subnet.ListOdbSubnetsResponse + ) -> odb_subnet.ListOdbSubnetsResponse: + """Post-rpc interceptor for list_odb_subnets + + DEPRECATED. Please use the `post_list_odb_subnets_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_odb_subnets` interceptor runs + before the `post_list_odb_subnets_with_metadata` interceptor. + """ + return response + + def post_list_odb_subnets_with_metadata( + self, + response: odb_subnet.ListOdbSubnetsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + odb_subnet.ListOdbSubnetsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_odb_subnets + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_odb_subnets_with_metadata` + interceptor in new development instead of the `post_list_odb_subnets` interceptor. + When both interceptors are used, this `post_list_odb_subnets_with_metadata` interceptor runs after the + `post_list_odb_subnets` interceptor. The (possibly modified) response returned by + `post_list_odb_subnets` will be passed to + `post_list_odb_subnets_with_metadata`. + """ + return response, metadata + + def pre_list_pluggable_databases( + self, + request: pluggable_database.ListPluggableDatabasesRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + pluggable_database.ListPluggableDatabasesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_pluggable_databases + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_pluggable_databases( + self, response: pluggable_database.ListPluggableDatabasesResponse + ) -> pluggable_database.ListPluggableDatabasesResponse: + """Post-rpc interceptor for list_pluggable_databases + + DEPRECATED. Please use the `post_list_pluggable_databases_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_list_pluggable_databases` interceptor runs + before the `post_list_pluggable_databases_with_metadata` interceptor. + """ + return response + + def post_list_pluggable_databases_with_metadata( + self, + response: pluggable_database.ListPluggableDatabasesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + pluggable_database.ListPluggableDatabasesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_pluggable_databases + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_list_pluggable_databases_with_metadata` + interceptor in new development instead of the `post_list_pluggable_databases` interceptor. + When both interceptors are used, this `post_list_pluggable_databases_with_metadata` interceptor runs after the + `post_list_pluggable_databases` interceptor. The (possibly modified) response returned by + `post_list_pluggable_databases` will be passed to + `post_list_pluggable_databases_with_metadata`. + """ + return response, metadata + + def pre_remove_virtual_machine_exadb_vm_cluster( + self, + request: oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for remove_virtual_machine_exadb_vm_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_remove_virtual_machine_exadb_vm_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for remove_virtual_machine_exadb_vm_cluster + + DEPRECATED. Please use the `post_remove_virtual_machine_exadb_vm_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_remove_virtual_machine_exadb_vm_cluster` interceptor runs + before the `post_remove_virtual_machine_exadb_vm_cluster_with_metadata` interceptor. + """ + return response + + def post_remove_virtual_machine_exadb_vm_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for remove_virtual_machine_exadb_vm_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_remove_virtual_machine_exadb_vm_cluster_with_metadata` + interceptor in new development instead of the `post_remove_virtual_machine_exadb_vm_cluster` interceptor. + When both interceptors are used, this `post_remove_virtual_machine_exadb_vm_cluster_with_metadata` interceptor runs after the + `post_remove_virtual_machine_exadb_vm_cluster` interceptor. The (possibly modified) response returned by + `post_remove_virtual_machine_exadb_vm_cluster` will be passed to + `post_remove_virtual_machine_exadb_vm_cluster_with_metadata`. + """ + return response, metadata + + def pre_restart_autonomous_database( + self, + request: oracledatabase.RestartAutonomousDatabaseRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.RestartAutonomousDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for restart_autonomous_database + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_restart_autonomous_database( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for restart_autonomous_database + + DEPRECATED. Please use the `post_restart_autonomous_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_restart_autonomous_database` interceptor runs + before the `post_restart_autonomous_database_with_metadata` interceptor. + """ + return response + + def post_restart_autonomous_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for restart_autonomous_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_restart_autonomous_database_with_metadata` + interceptor in new development instead of the `post_restart_autonomous_database` interceptor. + When both interceptors are used, this `post_restart_autonomous_database_with_metadata` interceptor runs after the + `post_restart_autonomous_database` interceptor. The (possibly modified) response returned by + `post_restart_autonomous_database` will be passed to + `post_restart_autonomous_database_with_metadata`. + """ + return response, metadata + + def pre_restore_autonomous_database( + self, + request: oracledatabase.RestoreAutonomousDatabaseRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.RestoreAutonomousDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for restore_autonomous_database + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_restore_autonomous_database( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for restore_autonomous_database + + DEPRECATED. Please use the `post_restore_autonomous_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_restore_autonomous_database` interceptor runs + before the `post_restore_autonomous_database_with_metadata` interceptor. + """ + return response + + def post_restore_autonomous_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for restore_autonomous_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_restore_autonomous_database_with_metadata` + interceptor in new development instead of the `post_restore_autonomous_database` interceptor. + When both interceptors are used, this `post_restore_autonomous_database_with_metadata` interceptor runs after the + `post_restore_autonomous_database` interceptor. The (possibly modified) response returned by + `post_restore_autonomous_database` will be passed to + `post_restore_autonomous_database_with_metadata`. + """ + return response, metadata + + def pre_start_autonomous_database( + self, + request: oracledatabase.StartAutonomousDatabaseRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.StartAutonomousDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for start_autonomous_database + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_start_autonomous_database( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for start_autonomous_database + + DEPRECATED. Please use the `post_start_autonomous_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_start_autonomous_database` interceptor runs + before the `post_start_autonomous_database_with_metadata` interceptor. + """ + return response + + def post_start_autonomous_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for start_autonomous_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_start_autonomous_database_with_metadata` + interceptor in new development instead of the `post_start_autonomous_database` interceptor. + When both interceptors are used, this `post_start_autonomous_database_with_metadata` interceptor runs after the + `post_start_autonomous_database` interceptor. The (possibly modified) response returned by + `post_start_autonomous_database` will be passed to + `post_start_autonomous_database_with_metadata`. + """ + return response, metadata + + def pre_stop_autonomous_database( + self, + request: oracledatabase.StopAutonomousDatabaseRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.StopAutonomousDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for stop_autonomous_database + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_stop_autonomous_database( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for stop_autonomous_database + + DEPRECATED. Please use the `post_stop_autonomous_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_stop_autonomous_database` interceptor runs + before the `post_stop_autonomous_database_with_metadata` interceptor. + """ + return response + + def post_stop_autonomous_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for stop_autonomous_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_stop_autonomous_database_with_metadata` + interceptor in new development instead of the `post_stop_autonomous_database` interceptor. + When both interceptors are used, this `post_stop_autonomous_database_with_metadata` interceptor runs after the + `post_stop_autonomous_database` interceptor. The (possibly modified) response returned by + `post_stop_autonomous_database` will be passed to + `post_stop_autonomous_database_with_metadata`. + """ + return response, metadata + + def pre_switchover_autonomous_database( + self, + request: oracledatabase.SwitchoverAutonomousDatabaseRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.SwitchoverAutonomousDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for switchover_autonomous_database + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_switchover_autonomous_database( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for switchover_autonomous_database + + DEPRECATED. Please use the `post_switchover_autonomous_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_switchover_autonomous_database` interceptor runs + before the `post_switchover_autonomous_database_with_metadata` interceptor. + """ + return response + + def post_switchover_autonomous_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for switchover_autonomous_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_switchover_autonomous_database_with_metadata` + interceptor in new development instead of the `post_switchover_autonomous_database` interceptor. + When both interceptors are used, this `post_switchover_autonomous_database_with_metadata` interceptor runs after the + `post_switchover_autonomous_database` interceptor. The (possibly modified) response returned by + `post_switchover_autonomous_database` will be passed to + `post_switchover_autonomous_database_with_metadata`. + """ + return response, metadata + + def pre_update_autonomous_database( + self, + request: oracledatabase.UpdateAutonomousDatabaseRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.UpdateAutonomousDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_autonomous_database + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_update_autonomous_database( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_autonomous_database + + DEPRECATED. Please use the `post_update_autonomous_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_update_autonomous_database` interceptor runs + before the `post_update_autonomous_database_with_metadata` interceptor. + """ + return response + + def post_update_autonomous_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_autonomous_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_update_autonomous_database_with_metadata` + interceptor in new development instead of the `post_update_autonomous_database` interceptor. + When both interceptors are used, this `post_update_autonomous_database_with_metadata` interceptor runs after the + `post_update_autonomous_database` interceptor. The (possibly modified) response returned by + `post_update_autonomous_database` will be passed to + `post_update_autonomous_database_with_metadata`. + """ + return response, metadata + + def pre_update_exadb_vm_cluster( + self, + request: oracledatabase.UpdateExadbVmClusterRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + oracledatabase.UpdateExadbVmClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_exadb_vm_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_update_exadb_vm_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_exadb_vm_cluster + + DEPRECATED. Please use the `post_update_exadb_vm_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. This `post_update_exadb_vm_cluster` interceptor runs + before the `post_update_exadb_vm_cluster_with_metadata` interceptor. + """ + return response + + def post_update_exadb_vm_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_exadb_vm_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the OracleDatabase server but before it is returned to user code. + + We recommend only using this `post_update_exadb_vm_cluster_with_metadata` + interceptor in new development instead of the `post_update_exadb_vm_cluster` interceptor. + When both interceptors are used, this `post_update_exadb_vm_cluster_with_metadata` interceptor runs after the + `post_update_exadb_vm_cluster` interceptor. The (possibly modified) response returned by + `post_update_exadb_vm_cluster` will be passed to + `post_update_exadb_vm_cluster_with_metadata`. + """ + return response, metadata + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.GetLocationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.ListLocationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the OracleDatabase server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the OracleDatabase server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class OracleDatabaseRestStub: + _session: AuthorizedSession + _host: str + _interceptor: OracleDatabaseRestInterceptor + + +class OracleDatabaseRestTransport(_BaseOracleDatabaseRestTransport): + """REST backend synchronous transport for OracleDatabase. + + Service describing handlers for resources + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "oracledatabase.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[OracleDatabaseRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'oracledatabase.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or OracleDatabaseRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + "body": "*", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _CreateAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.CreateAutonomousDatabase") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: oracledatabase.CreateAutonomousDatabaseRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create autonomous + database method over HTTP. + + Args: + request (~.oracledatabase.CreateAutonomousDatabaseRequest): + The request object. The request for ``AutonomousDatabase.Create``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_autonomous_database( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateAutonomousDatabase", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateAutonomousDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + OracleDatabaseRestTransport._CreateAutonomousDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_autonomous_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_autonomous_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_autonomous_database", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateAutonomousDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateCloudExadataInfrastructure( + _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.CreateCloudExadataInfrastructure") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: oracledatabase.CreateCloudExadataInfrastructureRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create cloud exadata + infrastructure method over HTTP. + + Args: + request (~.oracledatabase.CreateCloudExadataInfrastructureRequest): + The request object. The request for ``CloudExadataInfrastructure.Create``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure._get_http_options() + ) + + ( + request, + metadata, + ) = self._interceptor.pre_create_cloud_exadata_infrastructure( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateCloudExadataInfrastructure", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateCloudExadataInfrastructure", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._CreateCloudExadataInfrastructure._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_cloud_exadata_infrastructure(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_create_cloud_exadata_infrastructure_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_cloud_exadata_infrastructure", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateCloudExadataInfrastructure", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateCloudVmCluster( + _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.CreateCloudVmCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: oracledatabase.CreateCloudVmClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create cloud vm cluster method over HTTP. + + Args: + request (~.oracledatabase.CreateCloudVmClusterRequest): + The request object. The request for ``CloudVmCluster.Create``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_cloud_vm_cluster( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateCloudVmCluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateCloudVmCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._CreateCloudVmCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_cloud_vm_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_cloud_vm_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_cloud_vm_cluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateCloudVmCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateDbSystem( + _BaseOracleDatabaseRestTransport._BaseCreateDbSystem, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.CreateDbSystem") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: gco_db_system.CreateDbSystemRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create db system method over HTTP. + + Args: + request (~.gco_db_system.CreateDbSystemRequest): + The request object. The request for ``DbSystem.Create``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseCreateDbSystem._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_db_system( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateDbSystem._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseCreateDbSystem._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseCreateDbSystem._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateDbSystem", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateDbSystem", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._CreateDbSystem._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_db_system(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_db_system_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_db_system", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateDbSystem", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateExadbVmCluster( + _BaseOracleDatabaseRestTransport._BaseCreateExadbVmCluster, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.CreateExadbVmCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: oracledatabase.CreateExadbVmClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create exadb vm cluster method over HTTP. + + Args: + request (~.oracledatabase.CreateExadbVmClusterRequest): + The request object. The request for ``ExadbVmCluster.Create``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseCreateExadbVmCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_exadb_vm_cluster( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateExadbVmCluster._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseCreateExadbVmCluster._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseCreateExadbVmCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateExadbVmCluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateExadbVmCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._CreateExadbVmCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_exadb_vm_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_exadb_vm_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_exadb_vm_cluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateExadbVmCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateExascaleDbStorageVault( + _BaseOracleDatabaseRestTransport._BaseCreateExascaleDbStorageVault, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.CreateExascaleDbStorageVault") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create exascale db + storage vault method over HTTP. + + Args: + request (~.gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest): + The request object. The request for ``ExascaleDbStorageVault.Create``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseCreateExascaleDbStorageVault._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_exascale_db_storage_vault( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateExascaleDbStorageVault._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseCreateExascaleDbStorageVault._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseCreateExascaleDbStorageVault._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateExascaleDbStorageVault", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateExascaleDbStorageVault", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + OracleDatabaseRestTransport._CreateExascaleDbStorageVault._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_exascale_db_storage_vault(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_create_exascale_db_storage_vault_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_exascale_db_storage_vault", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateExascaleDbStorageVault", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateOdbNetwork( + _BaseOracleDatabaseRestTransport._BaseCreateOdbNetwork, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.CreateOdbNetwork") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: gco_odb_network.CreateOdbNetworkRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create odb network method over HTTP. + + Args: + request (~.gco_odb_network.CreateOdbNetworkRequest): + The request object. The request for ``OdbNetwork.Create``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseCreateOdbNetwork._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_odb_network( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateOdbNetwork._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseCreateOdbNetwork._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseCreateOdbNetwork._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateOdbNetwork", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateOdbNetwork", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._CreateOdbNetwork._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_odb_network(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_odb_network_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_odb_network", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateOdbNetwork", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateOdbSubnet( + _BaseOracleDatabaseRestTransport._BaseCreateOdbSubnet, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.CreateOdbSubnet") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: gco_odb_subnet.CreateOdbSubnetRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create odb subnet method over HTTP. + + Args: + request (~.gco_odb_subnet.CreateOdbSubnetRequest): + The request object. The request for ``OdbSubnet.Create``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseCreateOdbSubnet._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_odb_subnet( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateOdbSubnet._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseCreateOdbSubnet._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseCreateOdbSubnet._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateOdbSubnet", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateOdbSubnet", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._CreateOdbSubnet._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_odb_subnet(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_odb_subnet_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_odb_subnet", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "CreateOdbSubnet", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.DeleteAutonomousDatabase") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.DeleteAutonomousDatabaseRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete autonomous + database method over HTTP. + + Args: + request (~.oracledatabase.DeleteAutonomousDatabaseRequest): + The request object. The request for ``AutonomousDatabase.Delete``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_autonomous_database( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteAutonomousDatabase", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteAutonomousDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + OracleDatabaseRestTransport._DeleteAutonomousDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_autonomous_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_autonomous_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_autonomous_database", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteAutonomousDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteCloudExadataInfrastructure( + _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.DeleteCloudExadataInfrastructure") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.DeleteCloudExadataInfrastructureRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete cloud exadata + infrastructure method over HTTP. + + Args: + request (~.oracledatabase.DeleteCloudExadataInfrastructureRequest): + The request object. The request for ``CloudExadataInfrastructure.Delete``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure._get_http_options() + ) + + ( + request, + metadata, + ) = self._interceptor.pre_delete_cloud_exadata_infrastructure( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteCloudExadataInfrastructure", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteCloudExadataInfrastructure", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._DeleteCloudExadataInfrastructure._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_cloud_exadata_infrastructure(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_delete_cloud_exadata_infrastructure_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_cloud_exadata_infrastructure", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteCloudExadataInfrastructure", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteCloudVmCluster( + _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.DeleteCloudVmCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.DeleteCloudVmClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete cloud vm cluster method over HTTP. + + Args: + request (~.oracledatabase.DeleteCloudVmClusterRequest): + The request object. The request for ``CloudVmCluster.Delete``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_cloud_vm_cluster( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteCloudVmCluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteCloudVmCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._DeleteCloudVmCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_cloud_vm_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_cloud_vm_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_cloud_vm_cluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteCloudVmCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteDbSystem( + _BaseOracleDatabaseRestTransport._BaseDeleteDbSystem, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.DeleteDbSystem") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: db_system.DeleteDbSystemRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete db system method over HTTP. + + Args: + request (~.db_system.DeleteDbSystemRequest): + The request object. The request for ``DbSystem.Delete``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseDeleteDbSystem._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_db_system( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteDbSystem._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseDeleteDbSystem._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteDbSystem", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteDbSystem", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._DeleteDbSystem._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_db_system(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_db_system_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_db_system", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteDbSystem", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteExadbVmCluster( + _BaseOracleDatabaseRestTransport._BaseDeleteExadbVmCluster, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.DeleteExadbVmCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.DeleteExadbVmClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete exadb vm cluster method over HTTP. + + Args: + request (~.oracledatabase.DeleteExadbVmClusterRequest): + The request object. The request for ``ExadbVmCluster.Delete``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseDeleteExadbVmCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_exadb_vm_cluster( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteExadbVmCluster._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseDeleteExadbVmCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteExadbVmCluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteExadbVmCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._DeleteExadbVmCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_exadb_vm_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_exadb_vm_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_exadb_vm_cluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteExadbVmCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteExascaleDbStorageVault( + _BaseOracleDatabaseRestTransport._BaseDeleteExascaleDbStorageVault, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.DeleteExascaleDbStorageVault") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete exascale db + storage vault method over HTTP. + + Args: + request (~.exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest): + The request object. The request message for + ``ExascaleDbStorageVault.Delete``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseDeleteExascaleDbStorageVault._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_exascale_db_storage_vault( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteExascaleDbStorageVault._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseDeleteExascaleDbStorageVault._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteExascaleDbStorageVault", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteExascaleDbStorageVault", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + OracleDatabaseRestTransport._DeleteExascaleDbStorageVault._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_exascale_db_storage_vault(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_delete_exascale_db_storage_vault_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_exascale_db_storage_vault", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteExascaleDbStorageVault", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteOdbNetwork( + _BaseOracleDatabaseRestTransport._BaseDeleteOdbNetwork, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.DeleteOdbNetwork") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: odb_network.DeleteOdbNetworkRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete odb network method over HTTP. + + Args: + request (~.odb_network.DeleteOdbNetworkRequest): + The request object. The request for ``OdbNetwork.Delete``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseDeleteOdbNetwork._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_odb_network( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteOdbNetwork._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseDeleteOdbNetwork._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteOdbNetwork", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteOdbNetwork", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._DeleteOdbNetwork._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_odb_network(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_odb_network_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_odb_network", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteOdbNetwork", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteOdbSubnet( + _BaseOracleDatabaseRestTransport._BaseDeleteOdbSubnet, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.DeleteOdbSubnet") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: odb_subnet.DeleteOdbSubnetRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete odb subnet method over HTTP. + + Args: + request (~.odb_subnet.DeleteOdbSubnetRequest): + The request object. The request for ``OdbSubnet.Delete``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseDeleteOdbSubnet._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_odb_subnet( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteOdbSubnet._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseDeleteOdbSubnet._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteOdbSubnet", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteOdbSubnet", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._DeleteOdbSubnet._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_odb_subnet(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_odb_subnet_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_odb_subnet", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "DeleteOdbSubnet", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _FailoverAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseFailoverAutonomousDatabase, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.FailoverAutonomousDatabase") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: oracledatabase.FailoverAutonomousDatabaseRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the failover autonomous + database method over HTTP. + + Args: + request (~.oracledatabase.FailoverAutonomousDatabaseRequest): + The request object. The request for + ``OracleDatabase.FailoverAutonomousDatabase``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseFailoverAutonomousDatabase._get_http_options() + ) + + request, metadata = self._interceptor.pre_failover_autonomous_database( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseFailoverAutonomousDatabase._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseFailoverAutonomousDatabase._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseFailoverAutonomousDatabase._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.FailoverAutonomousDatabase", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "FailoverAutonomousDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + OracleDatabaseRestTransport._FailoverAutonomousDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_failover_autonomous_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_failover_autonomous_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.failover_autonomous_database", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "FailoverAutonomousDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GenerateAutonomousDatabaseWallet( + _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GenerateAutonomousDatabaseWallet") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: oracledatabase.GenerateAutonomousDatabaseWalletRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> oracledatabase.GenerateAutonomousDatabaseWalletResponse: + r"""Call the generate autonomous + database wallet method over HTTP. + + Args: + request (~.oracledatabase.GenerateAutonomousDatabaseWalletRequest): + The request object. The request for ``AutonomousDatabase.GenerateWallet``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.oracledatabase.GenerateAutonomousDatabaseWalletResponse: + The response for ``AutonomousDatabase.GenerateWallet``. + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_http_options() + ) + + ( + request, + metadata, + ) = self._interceptor.pre_generate_autonomous_database_wallet( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_transcoded_request( + http_options, request + ) + + body = _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GenerateAutonomousDatabaseWallet", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GenerateAutonomousDatabaseWallet", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GenerateAutonomousDatabaseWallet._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = oracledatabase.GenerateAutonomousDatabaseWalletResponse() + pb_resp = oracledatabase.GenerateAutonomousDatabaseWalletResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_generate_autonomous_database_wallet(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_generate_autonomous_database_wallet_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + oracledatabase.GenerateAutonomousDatabaseWalletResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.generate_autonomous_database_wallet", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GenerateAutonomousDatabaseWallet", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetAutonomousDatabase") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.GetAutonomousDatabaseRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> autonomous_database.AutonomousDatabase: + r"""Call the get autonomous database method over HTTP. + + Args: + request (~.oracledatabase.GetAutonomousDatabaseRequest): + The request object. The request for ``AutonomousDatabase.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.autonomous_database.AutonomousDatabase: + Details of the Autonomous Database + resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/AutonomousDatabase/ + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_autonomous_database( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetAutonomousDatabase", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetAutonomousDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GetAutonomousDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = autonomous_database.AutonomousDatabase() + pb_resp = autonomous_database.AutonomousDatabase.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_autonomous_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_autonomous_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = autonomous_database.AutonomousDatabase.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_autonomous_database", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetAutonomousDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetCloudExadataInfrastructure( + _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetCloudExadataInfrastructure") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.GetCloudExadataInfrastructureRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> exadata_infra.CloudExadataInfrastructure: + r"""Call the get cloud exadata + infrastructure method over HTTP. + + Args: + request (~.oracledatabase.GetCloudExadataInfrastructureRequest): + The request object. The request for ``CloudExadataInfrastructure.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.exadata_infra.CloudExadataInfrastructure: + Represents CloudExadataInfrastructure + resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/CloudExadataInfrastructure/ + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_cloud_exadata_infrastructure( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetCloudExadataInfrastructure", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetCloudExadataInfrastructure", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GetCloudExadataInfrastructure._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = exadata_infra.CloudExadataInfrastructure() + pb_resp = exadata_infra.CloudExadataInfrastructure.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_cloud_exadata_infrastructure(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_get_cloud_exadata_infrastructure_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = exadata_infra.CloudExadataInfrastructure.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_cloud_exadata_infrastructure", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetCloudExadataInfrastructure", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetCloudVmCluster( + _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetCloudVmCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.GetCloudVmClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> vm_cluster.CloudVmCluster: + r"""Call the get cloud vm cluster method over HTTP. + + Args: + request (~.oracledatabase.GetCloudVmClusterRequest): + The request object. The request for ``CloudVmCluster.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.vm_cluster.CloudVmCluster: + Details of the Cloud VM Cluster + resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/CloudVmCluster/ + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_cloud_vm_cluster( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetCloudVmCluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetCloudVmCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GetCloudVmCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = vm_cluster.CloudVmCluster() + pb_resp = vm_cluster.CloudVmCluster.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_cloud_vm_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_cloud_vm_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = vm_cluster.CloudVmCluster.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_cloud_vm_cluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetCloudVmCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetDatabase( + _BaseOracleDatabaseRestTransport._BaseGetDatabase, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetDatabase") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: database.GetDatabaseRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> database.Database: + r"""Call the get database method over HTTP. + + Args: + request (~.database.GetDatabaseRequest): + The request object. The request for ``Database.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.database.Database: + Details of the Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/Database/ + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetDatabase._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_database(request, metadata) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetDatabase._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetDatabase._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetDatabase", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GetDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = database.Database() + pb_resp = database.Database.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = database.Database.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_database", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetDbSystem( + _BaseOracleDatabaseRestTransport._BaseGetDbSystem, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetDbSystem") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: db_system.GetDbSystemRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> db_system.DbSystem: + r"""Call the get db system method over HTTP. + + Args: + request (~.db_system.GetDbSystemRequest): + The request object. The request for ``DbSystem.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.db_system.DbSystem: + Details of the DbSystem (BaseDB) + resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/DbSystem/ + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetDbSystem._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_db_system(request, metadata) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetDbSystem._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetDbSystem._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetDbSystem", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetDbSystem", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GetDbSystem._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = db_system.DbSystem() + pb_resp = db_system.DbSystem.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_db_system(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_db_system_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = db_system.DbSystem.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_db_system", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetDbSystem", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetExadbVmCluster( + _BaseOracleDatabaseRestTransport._BaseGetExadbVmCluster, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetExadbVmCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.GetExadbVmClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> exadb_vm_cluster.ExadbVmCluster: + r"""Call the get exadb vm cluster method over HTTP. + + Args: + request (~.oracledatabase.GetExadbVmClusterRequest): + The request object. The request for ``ExadbVmCluster.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.exadb_vm_cluster.ExadbVmCluster: + ExadbVmCluster represents a cluster + of VMs that are used to run Exadata + workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetExadbVmCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_exadb_vm_cluster( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetExadbVmCluster._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetExadbVmCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetExadbVmCluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetExadbVmCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GetExadbVmCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = exadb_vm_cluster.ExadbVmCluster() + pb_resp = exadb_vm_cluster.ExadbVmCluster.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_exadb_vm_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_exadb_vm_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = exadb_vm_cluster.ExadbVmCluster.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_exadb_vm_cluster", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetExadbVmCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetExascaleDbStorageVault( + _BaseOracleDatabaseRestTransport._BaseGetExascaleDbStorageVault, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetExascaleDbStorageVault") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> exascale_db_storage_vault.ExascaleDbStorageVault: + r"""Call the get exascale db storage + vault method over HTTP. + + Args: + request (~.exascale_db_storage_vault.GetExascaleDbStorageVaultRequest): + The request object. The request for ``ExascaleDbStorageVault.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.exascale_db_storage_vault.ExascaleDbStorageVault: + ExascaleDbStorageVault represents a + storage vault exadb vm cluster resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExascaleDbStorageVault/ + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetExascaleDbStorageVault._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_exascale_db_storage_vault( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetExascaleDbStorageVault._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetExascaleDbStorageVault._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetExascaleDbStorageVault", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetExascaleDbStorageVault", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + OracleDatabaseRestTransport._GetExascaleDbStorageVault._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = exascale_db_storage_vault.ExascaleDbStorageVault() + pb_resp = exascale_db_storage_vault.ExascaleDbStorageVault.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_exascale_db_storage_vault(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_get_exascale_db_storage_vault_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + exascale_db_storage_vault.ExascaleDbStorageVault.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_exascale_db_storage_vault", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetExascaleDbStorageVault", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetOdbNetwork( + _BaseOracleDatabaseRestTransport._BaseGetOdbNetwork, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetOdbNetwork") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: odb_network.GetOdbNetworkRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> odb_network.OdbNetwork: + r"""Call the get odb network method over HTTP. + + Args: + request (~.odb_network.GetOdbNetworkRequest): + The request object. The request for ``OdbNetwork.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.odb_network.OdbNetwork: + Represents OdbNetwork resource. + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetOdbNetwork._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_odb_network(request, metadata) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetOdbNetwork._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetOdbNetwork._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetOdbNetwork", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetOdbNetwork", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GetOdbNetwork._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = odb_network.OdbNetwork() + pb_resp = odb_network.OdbNetwork.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_odb_network(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_odb_network_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = odb_network.OdbNetwork.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_odb_network", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetOdbNetwork", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetOdbSubnet( + _BaseOracleDatabaseRestTransport._BaseGetOdbSubnet, OracleDatabaseRestStub + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetOdbSubnet") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: odb_subnet.GetOdbSubnetRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> odb_subnet.OdbSubnet: + r"""Call the get odb subnet method over HTTP. + + Args: + request (~.odb_subnet.GetOdbSubnetRequest): + The request object. The request for ``OdbSubnet.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.odb_subnet.OdbSubnet: + Represents OdbSubnet resource. + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetOdbSubnet._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_odb_subnet(request, metadata) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetOdbSubnet._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetOdbSubnet._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetOdbSubnet", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetOdbSubnet", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GetOdbSubnet._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = odb_subnet.OdbSubnet() + pb_resp = odb_subnet.OdbSubnet.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_odb_subnet(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_odb_subnet_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = odb_subnet.OdbSubnet.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_odb_subnet", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetOdbSubnet", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetPluggableDatabase( + _BaseOracleDatabaseRestTransport._BaseGetPluggableDatabase, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.GetPluggableDatabase") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: pluggable_database.GetPluggableDatabaseRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pluggable_database.PluggableDatabase: + r"""Call the get pluggable database method over HTTP. + + Args: + request (~.pluggable_database.GetPluggableDatabaseRequest): + The request object. The request for ``PluggableDatabase.Get``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.pluggable_database.PluggableDatabase: + The PluggableDatabase resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/PluggableDatabase/ + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseGetPluggableDatabase._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_pluggable_database( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetPluggableDatabase._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseGetPluggableDatabase._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetPluggableDatabase", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetPluggableDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._GetPluggableDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = pluggable_database.PluggableDatabase() + pb_resp = pluggable_database.PluggableDatabase.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_pluggable_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_pluggable_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = pluggable_database.PluggableDatabase.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_pluggable_database", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "GetPluggableDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListAutonomousDatabaseBackups( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.ListAutonomousDatabaseBackups") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.ListAutonomousDatabaseBackupsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> oracledatabase.ListAutonomousDatabaseBackupsResponse: + r"""Call the list autonomous database + backups method over HTTP. + + Args: + request (~.oracledatabase.ListAutonomousDatabaseBackupsRequest): + The request object. The request for ``AutonomousDatabaseBackup.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.oracledatabase.ListAutonomousDatabaseBackupsResponse: + The response for ``AutonomousDatabaseBackup.List``. + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_autonomous_database_backups( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListAutonomousDatabaseBackups", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListAutonomousDatabaseBackups", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._ListAutonomousDatabaseBackups._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = oracledatabase.ListAutonomousDatabaseBackupsResponse() + pb_resp = oracledatabase.ListAutonomousDatabaseBackupsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_autonomous_database_backups(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_list_autonomous_database_backups_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + oracledatabase.ListAutonomousDatabaseBackupsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_database_backups", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListAutonomousDatabaseBackups", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListAutonomousDatabaseCharacterSets( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash( + "OracleDatabaseRestTransport.ListAutonomousDatabaseCharacterSets" + ) + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> oracledatabase.ListAutonomousDatabaseCharacterSetsResponse: + r"""Call the list autonomous database + character sets method over HTTP. + + Args: + request (~.oracledatabase.ListAutonomousDatabaseCharacterSetsRequest): + The request object. The request for ``AutonomousDatabaseCharacterSet.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.oracledatabase.ListAutonomousDatabaseCharacterSetsResponse: + The response for + ``AutonomousDatabaseCharacterSet.List``. + + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets._get_http_options() + ) + + ( + request, + metadata, + ) = self._interceptor.pre_list_autonomous_database_character_sets( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListAutonomousDatabaseCharacterSets", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListAutonomousDatabaseCharacterSets", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = OracleDatabaseRestTransport._ListAutonomousDatabaseCharacterSets._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() + pb_resp = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.pb( + resp + ) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_autonomous_database_character_sets(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_list_autonomous_database_character_sets_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_database_character_sets", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListAutonomousDatabaseCharacterSets", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListAutonomousDatabases( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.ListAutonomousDatabases") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.ListAutonomousDatabasesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> oracledatabase.ListAutonomousDatabasesResponse: + r"""Call the list autonomous databases method over HTTP. + + Args: + request (~.oracledatabase.ListAutonomousDatabasesRequest): + The request object. The request for ``AutonomousDatabase.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.oracledatabase.ListAutonomousDatabasesResponse: + The response for ``AutonomousDatabase.List``. + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_autonomous_databases( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListAutonomousDatabases", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListAutonomousDatabases", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + OracleDatabaseRestTransport._ListAutonomousDatabases._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = oracledatabase.ListAutonomousDatabasesResponse() + pb_resp = oracledatabase.ListAutonomousDatabasesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_autonomous_databases(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_autonomous_databases_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + oracledatabase.ListAutonomousDatabasesResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_databases", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListAutonomousDatabases", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListAutonomousDbVersions( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.ListAutonomousDbVersions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.ListAutonomousDbVersionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> oracledatabase.ListAutonomousDbVersionsResponse: + r"""Call the list autonomous db + versions method over HTTP. + + Args: + request (~.oracledatabase.ListAutonomousDbVersionsRequest): + The request object. The request for ``AutonomousDbVersion.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.oracledatabase.ListAutonomousDbVersionsResponse: + The response for ``AutonomousDbVersion.List``. + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_autonomous_db_versions( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListAutonomousDbVersions", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListAutonomousDbVersions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + OracleDatabaseRestTransport._ListAutonomousDbVersions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = oracledatabase.ListAutonomousDbVersionsResponse() + pb_resp = oracledatabase.ListAutonomousDbVersionsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_autonomous_db_versions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_autonomous_db_versions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + oracledatabase.ListAutonomousDbVersionsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_db_versions", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListAutonomousDbVersions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListCloudExadataInfrastructures( + _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures, + OracleDatabaseRestStub, + ): + def __hash__(self): + return hash("OracleDatabaseRestTransport.ListCloudExadataInfrastructures") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: oracledatabase.ListCloudExadataInfrastructuresRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> oracledatabase.ListCloudExadataInfrastructuresResponse: + r"""Call the list cloud exadata + infrastructures method over HTTP. + + Args: + request (~.oracledatabase.ListCloudExadataInfrastructuresRequest): + The request object. The request for ``CloudExadataInfrastructures.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.oracledatabase.ListCloudExadataInfrastructuresResponse: + The response for ``CloudExadataInfrastructures.list``. + """ + + http_options = ( + _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures._get_http_options() + ) + + ( + request, + metadata, + ) = self._interceptor.pre_list_cloud_exadata_infrastructures( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListCloudExadataInfrastructures", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListCloudExadataInfrastructures", + "httpRequest": http_request, + "metadata": http_request["headers"], }, - ], - } + ) - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v1", + # Send the request + response = OracleDatabaseRestTransport._ListCloudExadataInfrastructures._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) - self._operations_client = operations_v1.AbstractOperationsClient( - transport=rest_transport - ) + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) - # Return the client from cache. - return self._operations_client + # Return the response + resp = oracledatabase.ListCloudExadataInfrastructuresResponse() + pb_resp = oracledatabase.ListCloudExadataInfrastructuresResponse.pb(resp) - class _CreateAutonomousDatabase( - _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase, + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_cloud_exadata_infrastructures(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_list_cloud_exadata_infrastructures_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + oracledatabase.ListCloudExadataInfrastructuresResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_cloud_exadata_infrastructures", + extra={ + "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", + "rpcName": "ListCloudExadataInfrastructures", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListCloudVmClusters( + _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters, OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.CreateAutonomousDatabase") + return hash("OracleDatabaseRestTransport.ListCloudVmClusters") @staticmethod def _get_response( @@ -1856,57 +8804,48 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, ) return response def __call__( self, - request: oracledatabase.CreateAutonomousDatabaseRequest, + request: oracledatabase.ListCloudVmClustersRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create autonomous - database method over HTTP. - - Args: - request (~.oracledatabase.CreateAutonomousDatabaseRequest): - The request object. The request for ``AutonomousDatabase.Create``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + ) -> oracledatabase.ListCloudVmClustersResponse: + r"""Call the list cloud vm clusters method over HTTP. - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. + Args: + request (~.oracledatabase.ListCloudVmClustersRequest): + The request object. The request for ``CloudVmCluster.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.oracledatabase.ListCloudVmClustersResponse: + The response for ``CloudVmCluster.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters._get_http_options() ) - request, metadata = self._interceptor.pre_create_autonomous_database( + request, metadata = self._interceptor.pre_list_cloud_vm_clusters( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters._get_transcoded_request( http_options, request ) - body = _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase._get_request_body_json( - transcoded_request - ) - # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseCreateAutonomousDatabase._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters._get_query_params_json( transcoded_request ) @@ -1918,7 +8857,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1928,26 +8867,23 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateAutonomousDatabase", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListCloudVmClusters", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "CreateAutonomousDatabase", + "rpcName": "ListCloudVmClusters", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = ( - OracleDatabaseRestTransport._CreateAutonomousDatabase._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) + response = OracleDatabaseRestTransport._ListCloudVmClusters._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1956,19 +8892,23 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = oracledatabase.ListCloudVmClustersResponse() + pb_resp = oracledatabase.ListCloudVmClustersResponse.pb(resp) - resp = self._interceptor.post_create_autonomous_database(resp) + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_cloud_vm_clusters(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_autonomous_database_with_metadata( + resp, _ = self._interceptor.post_list_cloud_vm_clusters_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = json_format.MessageToJson(resp) + response_payload = ( + oracledatabase.ListCloudVmClustersResponse.to_json(response) + ) except: response_payload = None http_response = { @@ -1977,22 +8917,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_autonomous_database", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_cloud_vm_clusters", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "CreateAutonomousDatabase", + "rpcName": "ListCloudVmClusters", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _CreateCloudExadataInfrastructure( - _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure, + class _ListDatabaseCharacterSets( + _BaseOracleDatabaseRestTransport._BaseListDatabaseCharacterSets, OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.CreateCloudExadataInfrastructure") + return hash("OracleDatabaseRestTransport.ListDatabaseCharacterSets") @staticmethod def _get_response( @@ -2013,24 +8953,23 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, ) return response def __call__( self, - request: oracledatabase.CreateCloudExadataInfrastructureRequest, + request: database_character_set.ListDatabaseCharacterSetsRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create cloud exadata - infrastructure method over HTTP. + ) -> database_character_set.ListDatabaseCharacterSetsResponse: + r"""Call the list database character + sets method over HTTP. Args: - request (~.oracledatabase.CreateCloudExadataInfrastructureRequest): - The request object. The request for ``CloudExadataInfrastructure.Create``. + request (~.database_character_set.ListDatabaseCharacterSetsRequest): + The request object. The request for ``DatabaseCharacterSet.List``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2040,33 +8979,23 @@ def __call__( be of type `bytes`. Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - + ~.database_character_set.ListDatabaseCharacterSetsResponse: + The response for ``DatabaseCharacterSet.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListDatabaseCharacterSets._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_create_cloud_exadata_infrastructure( + request, metadata = self._interceptor.pre_list_database_character_sets( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDatabaseCharacterSets._get_transcoded_request( http_options, request ) - body = _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure._get_request_body_json( - transcoded_request - ) - # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseCreateCloudExadataInfrastructure._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListDatabaseCharacterSets._get_query_params_json( transcoded_request ) @@ -2078,7 +9007,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2088,24 +9017,25 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateCloudExadataInfrastructure", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDatabaseCharacterSets", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "CreateCloudExadataInfrastructure", + "rpcName": "ListDatabaseCharacterSets", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._CreateCloudExadataInfrastructure._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, + response = ( + OracleDatabaseRestTransport._ListDatabaseCharacterSets._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2114,22 +9044,23 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = database_character_set.ListDatabaseCharacterSetsResponse() + pb_resp = database_character_set.ListDatabaseCharacterSetsResponse.pb(resp) - resp = self._interceptor.post_create_cloud_exadata_infrastructure(resp) + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_database_character_sets(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_cloud_exadata_infrastructure_with_metadata( + resp, _ = self._interceptor.post_list_database_character_sets_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = json_format.MessageToJson(resp) + response_payload = database_character_set.ListDatabaseCharacterSetsResponse.to_json( + response + ) except: response_payload = None http_response = { @@ -2138,22 +9069,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_cloud_exadata_infrastructure", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_database_character_sets", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "CreateCloudExadataInfrastructure", + "rpcName": "ListDatabaseCharacterSets", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _CreateCloudVmCluster( - _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster, - OracleDatabaseRestStub, + class _ListDatabases( + _BaseOracleDatabaseRestTransport._BaseListDatabases, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.CreateCloudVmCluster") + return hash("OracleDatabaseRestTransport.ListDatabases") @staticmethod def _get_response( @@ -2174,23 +9104,22 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, ) return response def __call__( self, - request: oracledatabase.CreateCloudVmClusterRequest, + request: database.ListDatabasesRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create cloud vm cluster method over HTTP. + ) -> database.ListDatabasesResponse: + r"""Call the list databases method over HTTP. Args: - request (~.oracledatabase.CreateCloudVmClusterRequest): - The request object. The request for ``CloudVmCluster.Create``. + request (~.database.ListDatabasesRequest): + The request object. The request for ``Database.List``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2200,30 +9129,21 @@ def __call__( be of type `bytes`. Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - + ~.database.ListDatabasesResponse: + The response for ``Database.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListDatabases._get_http_options() ) - request, metadata = self._interceptor.pre_create_cloud_vm_cluster( - request, metadata - ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster._get_transcoded_request( + request, metadata = self._interceptor.pre_list_databases(request, metadata) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDatabases._get_transcoded_request( http_options, request ) - body = _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster._get_request_body_json( - transcoded_request - ) - # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseCreateCloudVmCluster._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListDatabases._get_query_params_json( transcoded_request ) @@ -2235,7 +9155,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2245,24 +9165,23 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.CreateCloudVmCluster", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDatabases", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "CreateCloudVmCluster", + "rpcName": "ListDatabases", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._CreateCloudVmCluster._get_response( + response = OracleDatabaseRestTransport._ListDatabases._get_response( self._host, metadata, query_params, self._session, timeout, transcoded_request, - body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2271,19 +9190,21 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = database.ListDatabasesResponse() + pb_resp = database.ListDatabasesResponse.pb(resp) - resp = self._interceptor.post_create_cloud_vm_cluster(resp) + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_databases(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_cloud_vm_cluster_with_metadata( + resp, _ = self._interceptor.post_list_databases_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = json_format.MessageToJson(resp) + response_payload = database.ListDatabasesResponse.to_json(response) except: response_payload = None http_response = { @@ -2292,22 +9213,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.create_cloud_vm_cluster", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_databases", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "CreateCloudVmCluster", + "rpcName": "ListDatabases", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _DeleteAutonomousDatabase( - _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase, - OracleDatabaseRestStub, + class _ListDbNodes( + _BaseOracleDatabaseRestTransport._BaseListDbNodes, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.DeleteAutonomousDatabase") + return hash("OracleDatabaseRestTransport.ListDbNodes") @staticmethod def _get_response( @@ -2333,47 +9253,41 @@ def _get_response( def __call__( self, - request: oracledatabase.DeleteAutonomousDatabaseRequest, + request: oracledatabase.ListDbNodesRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the delete autonomous - database method over HTTP. - - Args: - request (~.oracledatabase.DeleteAutonomousDatabaseRequest): - The request object. The request for ``AutonomousDatabase.Delete``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + ) -> oracledatabase.ListDbNodesResponse: + r"""Call the list db nodes method over HTTP. - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. + Args: + request (~.oracledatabase.ListDbNodesRequest): + The request object. The request for ``DbNode.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.oracledatabase.ListDbNodesResponse: + The response for ``DbNode.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListDbNodes._get_http_options() ) - request, metadata = self._interceptor.pre_delete_autonomous_database( - request, metadata - ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase._get_transcoded_request( + request, metadata = self._interceptor.pre_list_db_nodes(request, metadata) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDbNodes._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListDbNodes._get_query_params_json( transcoded_request ) @@ -2385,7 +9299,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2395,25 +9309,23 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteAutonomousDatabase", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDbNodes", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "DeleteAutonomousDatabase", + "rpcName": "ListDbNodes", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = ( - OracleDatabaseRestTransport._DeleteAutonomousDatabase._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) + response = OracleDatabaseRestTransport._ListDbNodes._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2422,19 +9334,23 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = oracledatabase.ListDbNodesResponse() + pb_resp = oracledatabase.ListDbNodesResponse.pb(resp) - resp = self._interceptor.post_delete_autonomous_database(resp) + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_db_nodes(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_delete_autonomous_database_with_metadata( + resp, _ = self._interceptor.post_list_db_nodes_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = json_format.MessageToJson(resp) + response_payload = oracledatabase.ListDbNodesResponse.to_json( + response + ) except: response_payload = None http_response = { @@ -2443,22 +9359,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_autonomous_database", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_nodes", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "DeleteAutonomousDatabase", + "rpcName": "ListDbNodes", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _DeleteCloudExadataInfrastructure( - _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure, - OracleDatabaseRestStub, + class _ListDbServers( + _BaseOracleDatabaseRestTransport._BaseListDbServers, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.DeleteCloudExadataInfrastructure") + return hash("OracleDatabaseRestTransport.ListDbServers") @staticmethod def _get_response( @@ -2484,50 +9399,41 @@ def _get_response( def __call__( self, - request: oracledatabase.DeleteCloudExadataInfrastructureRequest, + request: oracledatabase.ListDbServersRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the delete cloud exadata - infrastructure method over HTTP. - - Args: - request (~.oracledatabase.DeleteCloudExadataInfrastructureRequest): - The request object. The request for ``CloudExadataInfrastructure.Delete``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + ) -> oracledatabase.ListDbServersResponse: + r"""Call the list db servers method over HTTP. - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. + Args: + request (~.oracledatabase.ListDbServersRequest): + The request object. The request for ``DbServer.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.oracledatabase.ListDbServersResponse: + The response for ``DbServer.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListDbServers._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_delete_cloud_exadata_infrastructure( - request, metadata - ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure._get_transcoded_request( + request, metadata = self._interceptor.pre_list_db_servers(request, metadata) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDbServers._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListDbServers._get_query_params_json( transcoded_request ) @@ -2539,7 +9445,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2549,17 +9455,17 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteCloudExadataInfrastructure", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDbServers", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "DeleteCloudExadataInfrastructure", + "rpcName": "ListDbServers", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._DeleteCloudExadataInfrastructure._get_response( + response = OracleDatabaseRestTransport._ListDbServers._get_response( self._host, metadata, query_params, @@ -2574,22 +9480,23 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = oracledatabase.ListDbServersResponse() + pb_resp = oracledatabase.ListDbServersResponse.pb(resp) - resp = self._interceptor.post_delete_cloud_exadata_infrastructure(resp) + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_db_servers(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_cloud_exadata_infrastructure_with_metadata( + resp, _ = self._interceptor.post_list_db_servers_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = json_format.MessageToJson(resp) + response_payload = oracledatabase.ListDbServersResponse.to_json( + response + ) except: response_payload = None http_response = { @@ -2598,22 +9505,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_cloud_exadata_infrastructure", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_servers", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "DeleteCloudExadataInfrastructure", + "rpcName": "ListDbServers", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _DeleteCloudVmCluster( - _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster, + class _ListDbSystemInitialStorageSizes( + _BaseOracleDatabaseRestTransport._BaseListDbSystemInitialStorageSizes, OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.DeleteCloudVmCluster") + return hash("OracleDatabaseRestTransport.ListDbSystemInitialStorageSizes") @staticmethod def _get_response( @@ -2639,46 +9546,47 @@ def _get_response( def __call__( self, - request: oracledatabase.DeleteCloudVmClusterRequest, + request: db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the delete cloud vm cluster method over HTTP. - - Args: - request (~.oracledatabase.DeleteCloudVmClusterRequest): - The request object. The request for ``CloudVmCluster.Delete``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + ) -> db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse: + r"""Call the list db system initial + storage sizes method over HTTP. - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. + Args: + request (~.db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest): + The request object. The request for ``DbSystemInitialStorageSizes.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse: + The response for ``DbSystemInitialStorageSizes.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListDbSystemInitialStorageSizes._get_http_options() ) - request, metadata = self._interceptor.pre_delete_cloud_vm_cluster( + ( + request, + metadata, + ) = self._interceptor.pre_list_db_system_initial_storage_sizes( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDbSystemInitialStorageSizes._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListDbSystemInitialStorageSizes._get_query_params_json( transcoded_request ) @@ -2690,7 +9598,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2700,17 +9608,17 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.DeleteCloudVmCluster", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDbSystemInitialStorageSizes", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "DeleteCloudVmCluster", + "rpcName": "ListDbSystemInitialStorageSizes", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._DeleteCloudVmCluster._get_response( + response = OracleDatabaseRestTransport._ListDbSystemInitialStorageSizes._get_response( self._host, metadata, query_params, @@ -2725,19 +9633,30 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) + pb_resp = db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse.pb( + resp + ) - resp = self._interceptor.post_delete_cloud_vm_cluster(resp) + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_db_system_initial_storage_sizes(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_delete_cloud_vm_cluster_with_metadata( + ( + resp, + _, + ) = self._interceptor.post_list_db_system_initial_storage_sizes_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = json_format.MessageToJson(resp) + response_payload = db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse.to_json( + response + ) except: response_payload = None http_response = { @@ -2746,22 +9665,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_cloud_vm_cluster", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_system_initial_storage_sizes", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "DeleteCloudVmCluster", + "rpcName": "ListDbSystemInitialStorageSizes", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _GenerateAutonomousDatabaseWallet( - _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet, - OracleDatabaseRestStub, + class _ListDbSystems( + _BaseOracleDatabaseRestTransport._BaseListDbSystems, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.GenerateAutonomousDatabaseWallet") + return hash("OracleDatabaseRestTransport.ListDbSystems") @staticmethod def _get_response( @@ -2782,57 +9700,46 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, ) return response def __call__( self, - request: oracledatabase.GenerateAutonomousDatabaseWalletRequest, + request: db_system.ListDbSystemsRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.GenerateAutonomousDatabaseWalletResponse: - r"""Call the generate autonomous - database wallet method over HTTP. + ) -> db_system.ListDbSystemsResponse: + r"""Call the list db systems method over HTTP. - Args: - request (~.oracledatabase.GenerateAutonomousDatabaseWalletRequest): - The request object. The request for ``AutonomousDatabase.GenerateWallet``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + Args: + request (~.db_system.ListDbSystemsRequest): + The request object. The request for ``DbSystem.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - Returns: - ~.oracledatabase.GenerateAutonomousDatabaseWalletResponse: - The response for ``AutonomousDatabase.GenerateWallet``. + Returns: + ~.db_system.ListDbSystemsResponse: + The response for ``DbSystem.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListDbSystems._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_generate_autonomous_database_wallet( - request, metadata - ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_transcoded_request( + request, metadata = self._interceptor.pre_list_db_systems(request, metadata) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDbSystems._get_transcoded_request( http_options, request ) - body = _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_request_body_json( - transcoded_request - ) - # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListDbSystems._get_query_params_json( transcoded_request ) @@ -2854,24 +9761,23 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GenerateAutonomousDatabaseWallet", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDbSystems", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "GenerateAutonomousDatabaseWallet", + "rpcName": "ListDbSystems", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._GenerateAutonomousDatabaseWallet._get_response( + response = OracleDatabaseRestTransport._ListDbSystems._get_response( self._host, metadata, query_params, self._session, timeout, transcoded_request, - body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2880,28 +9786,21 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.GenerateAutonomousDatabaseWalletResponse() - pb_resp = oracledatabase.GenerateAutonomousDatabaseWalletResponse.pb(resp) + resp = db_system.ListDbSystemsResponse() + pb_resp = db_system.ListDbSystemsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_generate_autonomous_database_wallet(resp) + resp = self._interceptor.post_list_db_systems(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_generate_autonomous_database_wallet_with_metadata( + resp, _ = self._interceptor.post_list_db_systems_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = ( - oracledatabase.GenerateAutonomousDatabaseWalletResponse.to_json( - response - ) - ) + response_payload = db_system.ListDbSystemsResponse.to_json(response) except: response_payload = None http_response = { @@ -2910,22 +9809,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.generate_autonomous_database_wallet", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_systems", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "GenerateAutonomousDatabaseWallet", + "rpcName": "ListDbSystems", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _GetAutonomousDatabase( - _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase, - OracleDatabaseRestStub, + class _ListDbSystemShapes( + _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.GetAutonomousDatabase") + return hash("OracleDatabaseRestTransport.ListDbSystemShapes") @staticmethod def _get_response( @@ -2951,17 +9849,17 @@ def _get_response( def __call__( self, - request: oracledatabase.GetAutonomousDatabaseRequest, + request: oracledatabase.ListDbSystemShapesRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> autonomous_database.AutonomousDatabase: - r"""Call the get autonomous database method over HTTP. + ) -> oracledatabase.ListDbSystemShapesResponse: + r"""Call the list db system shapes method over HTTP. Args: - request (~.oracledatabase.GetAutonomousDatabaseRequest): - The request object. The request for ``AutonomousDatabase.Get``. + request (~.oracledatabase.ListDbSystemShapesRequest): + The request object. The request for ``DbSystemShape.List``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2971,26 +9869,23 @@ def __call__( be of type `bytes`. Returns: - ~.autonomous_database.AutonomousDatabase: - Details of the Autonomous Database - resource. - https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/AutonomousDatabase/ - + ~.oracledatabase.ListDbSystemShapesResponse: + The response for ``DbSystemShape.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes._get_http_options() ) - request, metadata = self._interceptor.pre_get_autonomous_database( + request, metadata = self._interceptor.pre_list_db_system_shapes( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes._get_query_params_json( transcoded_request ) @@ -3012,17 +9907,17 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetAutonomousDatabase", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDbSystemShapes", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "GetAutonomousDatabase", + "rpcName": "ListDbSystemShapes", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._GetAutonomousDatabase._get_response( + response = OracleDatabaseRestTransport._ListDbSystemShapes._get_response( self._host, metadata, query_params, @@ -3037,22 +9932,22 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = autonomous_database.AutonomousDatabase() - pb_resp = autonomous_database.AutonomousDatabase.pb(resp) + resp = oracledatabase.ListDbSystemShapesResponse() + pb_resp = oracledatabase.ListDbSystemShapesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_autonomous_database(resp) + resp = self._interceptor.post_list_db_system_shapes(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_autonomous_database_with_metadata( + resp, _ = self._interceptor.post_list_db_system_shapes_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = autonomous_database.AutonomousDatabase.to_json( - response + response_payload = ( + oracledatabase.ListDbSystemShapesResponse.to_json(response) ) except: response_payload = None @@ -3062,22 +9957,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_autonomous_database", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_system_shapes", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "GetAutonomousDatabase", + "rpcName": "ListDbSystemShapes", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _GetCloudExadataInfrastructure( - _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure, - OracleDatabaseRestStub, + class _ListDbVersions( + _BaseOracleDatabaseRestTransport._BaseListDbVersions, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.GetCloudExadataInfrastructure") + return hash("OracleDatabaseRestTransport.ListDbVersions") @staticmethod def _get_response( @@ -3103,47 +9997,43 @@ def _get_response( def __call__( self, - request: oracledatabase.GetCloudExadataInfrastructureRequest, + request: db_version.ListDbVersionsRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> exadata_infra.CloudExadataInfrastructure: - r"""Call the get cloud exadata - infrastructure method over HTTP. - - Args: - request (~.oracledatabase.GetCloudExadataInfrastructureRequest): - The request object. The request for ``CloudExadataInfrastructure.Get``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + ) -> db_version.ListDbVersionsResponse: + r"""Call the list db versions method over HTTP. - Returns: - ~.exadata_infra.CloudExadataInfrastructure: - Represents CloudExadataInfrastructure - resource. - https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/CloudExadataInfrastructure/ + Args: + request (~.db_version.ListDbVersionsRequest): + The request object. The request for ``DbVersions.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.db_version.ListDbVersionsResponse: + The response for ``DbVersions.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListDbVersions._get_http_options() ) - request, metadata = self._interceptor.pre_get_cloud_exadata_infrastructure( + request, metadata = self._interceptor.pre_list_db_versions( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDbVersions._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListDbVersions._get_query_params_json( transcoded_request ) @@ -3165,17 +10055,17 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetCloudExadataInfrastructure", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDbVersions", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "GetCloudExadataInfrastructure", + "rpcName": "ListDbVersions", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._GetCloudExadataInfrastructure._get_response( + response = OracleDatabaseRestTransport._ListDbVersions._get_response( self._host, metadata, query_params, @@ -3190,24 +10080,21 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = exadata_infra.CloudExadataInfrastructure() - pb_resp = exadata_infra.CloudExadataInfrastructure.pb(resp) + resp = db_version.ListDbVersionsResponse() + pb_resp = db_version.ListDbVersionsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_cloud_exadata_infrastructure(resp) + resp = self._interceptor.post_list_db_versions(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_get_cloud_exadata_infrastructure_with_metadata( + resp, _ = self._interceptor.post_list_db_versions_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = exadata_infra.CloudExadataInfrastructure.to_json( + response_payload = db_version.ListDbVersionsResponse.to_json( response ) except: @@ -3218,21 +10105,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_cloud_exadata_infrastructure", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_versions", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "GetCloudExadataInfrastructure", + "rpcName": "ListDbVersions", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _GetCloudVmCluster( - _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster, OracleDatabaseRestStub + class _ListEntitlements( + _BaseOracleDatabaseRestTransport._BaseListEntitlements, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.GetCloudVmCluster") + return hash("OracleDatabaseRestTransport.ListEntitlements") @staticmethod def _get_response( @@ -3258,17 +10145,17 @@ def _get_response( def __call__( self, - request: oracledatabase.GetCloudVmClusterRequest, + request: oracledatabase.ListEntitlementsRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> vm_cluster.CloudVmCluster: - r"""Call the get cloud vm cluster method over HTTP. + ) -> oracledatabase.ListEntitlementsResponse: + r"""Call the list entitlements method over HTTP. Args: - request (~.oracledatabase.GetCloudVmClusterRequest): - The request object. The request for ``CloudVmCluster.Get``. + request (~.oracledatabase.ListEntitlementsRequest): + The request object. The request for ``Entitlement.List``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3278,26 +10165,23 @@ def __call__( be of type `bytes`. Returns: - ~.vm_cluster.CloudVmCluster: - Details of the Cloud VM Cluster - resource. - https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/CloudVmCluster/ - + ~.oracledatabase.ListEntitlementsResponse: + The response for ``Entitlement.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListEntitlements._get_http_options() ) - request, metadata = self._interceptor.pre_get_cloud_vm_cluster( + request, metadata = self._interceptor.pre_list_entitlements( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListEntitlements._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListEntitlements._get_query_params_json( transcoded_request ) @@ -3319,17 +10203,17 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.GetCloudVmCluster", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListEntitlements", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "GetCloudVmCluster", + "rpcName": "ListEntitlements", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._GetCloudVmCluster._get_response( + response = OracleDatabaseRestTransport._ListEntitlements._get_response( self._host, metadata, query_params, @@ -3344,21 +10228,23 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = vm_cluster.CloudVmCluster() - pb_resp = vm_cluster.CloudVmCluster.pb(resp) + resp = oracledatabase.ListEntitlementsResponse() + pb_resp = oracledatabase.ListEntitlementsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_cloud_vm_cluster(resp) + resp = self._interceptor.post_list_entitlements(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_cloud_vm_cluster_with_metadata( + resp, _ = self._interceptor.post_list_entitlements_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = vm_cluster.CloudVmCluster.to_json(response) + response_payload = oracledatabase.ListEntitlementsResponse.to_json( + response + ) except: response_payload = None http_response = { @@ -3367,22 +10253,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.get_cloud_vm_cluster", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_entitlements", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "GetCloudVmCluster", + "rpcName": "ListEntitlements", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListAutonomousDatabaseBackups( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups, + class _ListExadbVmClusters( + _BaseOracleDatabaseRestTransport._BaseListExadbVmClusters, OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListAutonomousDatabaseBackups") + return hash("OracleDatabaseRestTransport.ListExadbVmClusters") @staticmethod def _get_response( @@ -3408,44 +10294,43 @@ def _get_response( def __call__( self, - request: oracledatabase.ListAutonomousDatabaseBackupsRequest, + request: oracledatabase.ListExadbVmClustersRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListAutonomousDatabaseBackupsResponse: - r"""Call the list autonomous database - backups method over HTTP. + ) -> oracledatabase.ListExadbVmClustersResponse: + r"""Call the list exadb vm clusters method over HTTP. - Args: - request (~.oracledatabase.ListAutonomousDatabaseBackupsRequest): - The request object. The request for ``AutonomousDatabaseBackup.List``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + Args: + request (~.oracledatabase.ListExadbVmClustersRequest): + The request object. The request for ``ExadbVmCluster.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - Returns: - ~.oracledatabase.ListAutonomousDatabaseBackupsResponse: - The response for ``AutonomousDatabaseBackup.List``. + Returns: + ~.oracledatabase.ListExadbVmClustersResponse: + The response for ``ExadbVmCluster.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListExadbVmClusters._get_http_options() ) - request, metadata = self._interceptor.pre_list_autonomous_database_backups( + request, metadata = self._interceptor.pre_list_exadb_vm_clusters( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListExadbVmClusters._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListExadbVmClusters._get_query_params_json( transcoded_request ) @@ -3467,17 +10352,17 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListAutonomousDatabaseBackups", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListExadbVmClusters", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListAutonomousDatabaseBackups", + "rpcName": "ListExadbVmClusters", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._ListAutonomousDatabaseBackups._get_response( + response = OracleDatabaseRestTransport._ListExadbVmClusters._get_response( self._host, metadata, query_params, @@ -3492,17 +10377,14 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListAutonomousDatabaseBackupsResponse() - pb_resp = oracledatabase.ListAutonomousDatabaseBackupsResponse.pb(resp) + resp = oracledatabase.ListExadbVmClustersResponse() + pb_resp = oracledatabase.ListExadbVmClustersResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_autonomous_database_backups(resp) + resp = self._interceptor.post_list_exadb_vm_clusters(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_autonomous_database_backups_with_metadata( + resp, _ = self._interceptor.post_list_exadb_vm_clusters_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( @@ -3510,9 +10392,7 @@ def __call__( ): # pragma: NO COVER try: response_payload = ( - oracledatabase.ListAutonomousDatabaseBackupsResponse.to_json( - response - ) + oracledatabase.ListExadbVmClustersResponse.to_json(response) ) except: response_payload = None @@ -3522,24 +10402,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_database_backups", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_exadb_vm_clusters", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListAutonomousDatabaseBackups", + "rpcName": "ListExadbVmClusters", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListAutonomousDatabaseCharacterSets( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets, + class _ListExascaleDbStorageVaults( + _BaseOracleDatabaseRestTransport._BaseListExascaleDbStorageVaults, OracleDatabaseRestStub, ): def __hash__(self): - return hash( - "OracleDatabaseRestTransport.ListAutonomousDatabaseCharacterSets" - ) + return hash("OracleDatabaseRestTransport.ListExascaleDbStorageVaults") @staticmethod def _get_response( @@ -3565,18 +10443,18 @@ def _get_response( def __call__( self, - request: oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, + request: exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListAutonomousDatabaseCharacterSetsResponse: - r"""Call the list autonomous database - character sets method over HTTP. + ) -> exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse: + r"""Call the list exascale db storage + vaults method over HTTP. Args: - request (~.oracledatabase.ListAutonomousDatabaseCharacterSetsRequest): - The request object. The request for ``AutonomousDatabaseCharacterSet.List``. + request (~.exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest): + The request object. The request for ``ExascaleDbStorageVault.List``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3586,28 +10464,23 @@ def __call__( be of type `bytes`. Returns: - ~.oracledatabase.ListAutonomousDatabaseCharacterSetsResponse: - The response for - ``AutonomousDatabaseCharacterSet.List``. - + ~.exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse: + The response for ``ExascaleDbStorageVault.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListExascaleDbStorageVaults._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_list_autonomous_database_character_sets( + request, metadata = self._interceptor.pre_list_exascale_db_storage_vaults( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListExascaleDbStorageVaults._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListExascaleDbStorageVaults._get_query_params_json( transcoded_request ) @@ -3629,23 +10502,25 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListAutonomousDatabaseCharacterSets", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListExascaleDbStorageVaults", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListAutonomousDatabaseCharacterSets", + "rpcName": "ListExascaleDbStorageVaults", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._ListAutonomousDatabaseCharacterSets._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, + response = ( + OracleDatabaseRestTransport._ListExascaleDbStorageVaults._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3654,26 +10529,26 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() - pb_resp = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.pb( + resp = exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() + pb_resp = exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse.pb( resp ) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_autonomous_database_character_sets(resp) + resp = self._interceptor.post_list_exascale_db_storage_vaults(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] ( resp, _, - ) = self._interceptor.post_list_autonomous_database_character_sets_with_metadata( + ) = self._interceptor.post_list_exascale_db_storage_vaults_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.to_json( + response_payload = exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse.to_json( response ) except: @@ -3684,22 +10559,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_database_character_sets", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_exascale_db_storage_vaults", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListAutonomousDatabaseCharacterSets", + "rpcName": "ListExascaleDbStorageVaults", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListAutonomousDatabases( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases, - OracleDatabaseRestStub, + class _ListGiVersions( + _BaseOracleDatabaseRestTransport._BaseListGiVersions, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListAutonomousDatabases") + return hash("OracleDatabaseRestTransport.ListGiVersions") @staticmethod def _get_response( @@ -3725,17 +10599,17 @@ def _get_response( def __call__( self, - request: oracledatabase.ListAutonomousDatabasesRequest, + request: oracledatabase.ListGiVersionsRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListAutonomousDatabasesResponse: - r"""Call the list autonomous databases method over HTTP. + ) -> oracledatabase.ListGiVersionsResponse: + r"""Call the list gi versions method over HTTP. Args: - request (~.oracledatabase.ListAutonomousDatabasesRequest): - The request object. The request for ``AutonomousDatabase.List``. + request (~.oracledatabase.ListGiVersionsRequest): + The request object. The request for ``GiVersion.List``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3745,23 +10619,23 @@ def __call__( be of type `bytes`. Returns: - ~.oracledatabase.ListAutonomousDatabasesResponse: - The response for ``AutonomousDatabase.List``. + ~.oracledatabase.ListGiVersionsResponse: + The response for ``GiVersion.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListGiVersions._get_http_options() ) - request, metadata = self._interceptor.pre_list_autonomous_databases( + request, metadata = self._interceptor.pre_list_gi_versions( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListGiVersions._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListGiVersions._get_query_params_json( transcoded_request ) @@ -3783,25 +10657,23 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListAutonomousDatabases", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListGiVersions", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListAutonomousDatabases", + "rpcName": "ListGiVersions", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = ( - OracleDatabaseRestTransport._ListAutonomousDatabases._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) + response = OracleDatabaseRestTransport._ListGiVersions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3810,22 +10682,22 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListAutonomousDatabasesResponse() - pb_resp = oracledatabase.ListAutonomousDatabasesResponse.pb(resp) + resp = oracledatabase.ListGiVersionsResponse() + pb_resp = oracledatabase.ListGiVersionsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_autonomous_databases(resp) + resp = self._interceptor.post_list_gi_versions(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_autonomous_databases_with_metadata( + resp, _ = self._interceptor.post_list_gi_versions_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = ( - oracledatabase.ListAutonomousDatabasesResponse.to_json(response) + response_payload = oracledatabase.ListGiVersionsResponse.to_json( + response ) except: response_payload = None @@ -3835,22 +10707,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_databases", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_gi_versions", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListAutonomousDatabases", + "rpcName": "ListGiVersions", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListAutonomousDbVersions( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions, - OracleDatabaseRestStub, + class _ListMinorVersions( + _BaseOracleDatabaseRestTransport._BaseListMinorVersions, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListAutonomousDbVersions") + return hash("OracleDatabaseRestTransport.ListMinorVersions") @staticmethod def _get_response( @@ -3876,44 +10747,43 @@ def _get_response( def __call__( self, - request: oracledatabase.ListAutonomousDbVersionsRequest, + request: minor_version.ListMinorVersionsRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListAutonomousDbVersionsResponse: - r"""Call the list autonomous db - versions method over HTTP. + ) -> minor_version.ListMinorVersionsResponse: + r"""Call the list minor versions method over HTTP. - Args: - request (~.oracledatabase.ListAutonomousDbVersionsRequest): - The request object. The request for ``AutonomousDbVersion.List``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + Args: + request (~.minor_version.ListMinorVersionsRequest): + The request object. The request for ``MinorVersion.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - Returns: - ~.oracledatabase.ListAutonomousDbVersionsResponse: - The response for ``AutonomousDbVersion.List``. + Returns: + ~.minor_version.ListMinorVersionsResponse: + The response for ``MinorVersion.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListMinorVersions._get_http_options() ) - request, metadata = self._interceptor.pre_list_autonomous_db_versions( + request, metadata = self._interceptor.pre_list_minor_versions( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListMinorVersions._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListMinorVersions._get_query_params_json( transcoded_request ) @@ -3935,25 +10805,23 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListAutonomousDbVersions", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListMinorVersions", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListAutonomousDbVersions", + "rpcName": "ListMinorVersions", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = ( - OracleDatabaseRestTransport._ListAutonomousDbVersions._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) + response = OracleDatabaseRestTransport._ListMinorVersions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3962,24 +10830,22 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListAutonomousDbVersionsResponse() - pb_resp = oracledatabase.ListAutonomousDbVersionsResponse.pb(resp) + resp = minor_version.ListMinorVersionsResponse() + pb_resp = minor_version.ListMinorVersionsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_autonomous_db_versions(resp) + resp = self._interceptor.post_list_minor_versions(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_autonomous_db_versions_with_metadata( + resp, _ = self._interceptor.post_list_minor_versions_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = ( - oracledatabase.ListAutonomousDbVersionsResponse.to_json( - response - ) + response_payload = minor_version.ListMinorVersionsResponse.to_json( + response ) except: response_payload = None @@ -3989,22 +10855,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_db_versions", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_minor_versions", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListAutonomousDbVersions", + "rpcName": "ListMinorVersions", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListCloudExadataInfrastructures( - _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures, - OracleDatabaseRestStub, + class _ListOdbNetworks( + _BaseOracleDatabaseRestTransport._BaseListOdbNetworks, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListCloudExadataInfrastructures") + return hash("OracleDatabaseRestTransport.ListOdbNetworks") @staticmethod def _get_response( @@ -4030,47 +10895,43 @@ def _get_response( def __call__( self, - request: oracledatabase.ListCloudExadataInfrastructuresRequest, + request: odb_network.ListOdbNetworksRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListCloudExadataInfrastructuresResponse: - r"""Call the list cloud exadata - infrastructures method over HTTP. + ) -> odb_network.ListOdbNetworksResponse: + r"""Call the list odb networks method over HTTP. - Args: - request (~.oracledatabase.ListCloudExadataInfrastructuresRequest): - The request object. The request for ``CloudExadataInfrastructures.List``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + Args: + request (~.odb_network.ListOdbNetworksRequest): + The request object. The request for ``OdbNetwork.List``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - Returns: - ~.oracledatabase.ListCloudExadataInfrastructuresResponse: - The response for ``CloudExadataInfrastructures.list``. + Returns: + ~.odb_network.ListOdbNetworksResponse: + The response for ``OdbNetwork.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListOdbNetworks._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_list_cloud_exadata_infrastructures( + request, metadata = self._interceptor.pre_list_odb_networks( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListOdbNetworks._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListOdbNetworks._get_query_params_json( transcoded_request ) @@ -4092,17 +10953,17 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListCloudExadataInfrastructures", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListOdbNetworks", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListCloudExadataInfrastructures", + "rpcName": "ListOdbNetworks", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._ListCloudExadataInfrastructures._get_response( + response = OracleDatabaseRestTransport._ListOdbNetworks._get_response( self._host, metadata, query_params, @@ -4117,27 +10978,22 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListCloudExadataInfrastructuresResponse() - pb_resp = oracledatabase.ListCloudExadataInfrastructuresResponse.pb(resp) + resp = odb_network.ListOdbNetworksResponse() + pb_resp = odb_network.ListOdbNetworksResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_cloud_exadata_infrastructures(resp) + resp = self._interceptor.post_list_odb_networks(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_cloud_exadata_infrastructures_with_metadata( + resp, _ = self._interceptor.post_list_odb_networks_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = ( - oracledatabase.ListCloudExadataInfrastructuresResponse.to_json( - response - ) + response_payload = odb_network.ListOdbNetworksResponse.to_json( + response ) except: response_payload = None @@ -4147,22 +11003,21 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_cloud_exadata_infrastructures", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_odb_networks", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListCloudExadataInfrastructures", + "rpcName": "ListOdbNetworks", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListCloudVmClusters( - _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters, - OracleDatabaseRestStub, + class _ListOdbSubnets( + _BaseOracleDatabaseRestTransport._BaseListOdbSubnets, OracleDatabaseRestStub ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListCloudVmClusters") + return hash("OracleDatabaseRestTransport.ListOdbSubnets") @staticmethod def _get_response( @@ -4188,17 +11043,17 @@ def _get_response( def __call__( self, - request: oracledatabase.ListCloudVmClustersRequest, + request: odb_subnet.ListOdbSubnetsRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListCloudVmClustersResponse: - r"""Call the list cloud vm clusters method over HTTP. + ) -> odb_subnet.ListOdbSubnetsResponse: + r"""Call the list odb subnets method over HTTP. Args: - request (~.oracledatabase.ListCloudVmClustersRequest): - The request object. The request for ``CloudVmCluster.List``. + request (~.odb_subnet.ListOdbSubnetsRequest): + The request object. The request for ``OdbSubnet.List``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -4208,23 +11063,23 @@ def __call__( be of type `bytes`. Returns: - ~.oracledatabase.ListCloudVmClustersResponse: - The response for ``CloudVmCluster.List``. + ~.odb_subnet.ListOdbSubnetsResponse: + The response for ``OdbSubnet.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListOdbSubnets._get_http_options() ) - request, metadata = self._interceptor.pre_list_cloud_vm_clusters( + request, metadata = self._interceptor.pre_list_odb_subnets( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListOdbSubnets._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListOdbSubnets._get_query_params_json( transcoded_request ) @@ -4246,17 +11101,17 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListCloudVmClusters", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListOdbSubnets", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListCloudVmClusters", + "rpcName": "ListOdbSubnets", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._ListCloudVmClusters._get_response( + response = OracleDatabaseRestTransport._ListOdbSubnets._get_response( self._host, metadata, query_params, @@ -4271,22 +11126,22 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListCloudVmClustersResponse() - pb_resp = oracledatabase.ListCloudVmClustersResponse.pb(resp) + resp = odb_subnet.ListOdbSubnetsResponse() + pb_resp = odb_subnet.ListOdbSubnetsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_cloud_vm_clusters(resp) + resp = self._interceptor.post_list_odb_subnets(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_cloud_vm_clusters_with_metadata( + resp, _ = self._interceptor.post_list_odb_subnets_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = ( - oracledatabase.ListCloudVmClustersResponse.to_json(response) + response_payload = odb_subnet.ListOdbSubnetsResponse.to_json( + response ) except: response_payload = None @@ -4296,21 +11151,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_cloud_vm_clusters", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_odb_subnets", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListCloudVmClusters", + "rpcName": "ListOdbSubnets", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListDbNodes( - _BaseOracleDatabaseRestTransport._BaseListDbNodes, OracleDatabaseRestStub + class _ListPluggableDatabases( + _BaseOracleDatabaseRestTransport._BaseListPluggableDatabases, + OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListDbNodes") + return hash("OracleDatabaseRestTransport.ListPluggableDatabases") @staticmethod def _get_response( @@ -4336,17 +11192,17 @@ def _get_response( def __call__( self, - request: oracledatabase.ListDbNodesRequest, + request: pluggable_database.ListPluggableDatabasesRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListDbNodesResponse: - r"""Call the list db nodes method over HTTP. + ) -> pluggable_database.ListPluggableDatabasesResponse: + r"""Call the list pluggable databases method over HTTP. Args: - request (~.oracledatabase.ListDbNodesRequest): - The request object. The request for ``DbNode.List``. + request (~.pluggable_database.ListPluggableDatabasesRequest): + The request object. The request for ``PluggableDatabase.List``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -4356,21 +11212,23 @@ def __call__( be of type `bytes`. Returns: - ~.oracledatabase.ListDbNodesResponse: - The response for ``DbNode.List``. + ~.pluggable_database.ListPluggableDatabasesResponse: + The response for ``PluggableDatabase.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListDbNodes._get_http_options() + _BaseOracleDatabaseRestTransport._BaseListPluggableDatabases._get_http_options() ) - request, metadata = self._interceptor.pre_list_db_nodes(request, metadata) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDbNodes._get_transcoded_request( + request, metadata = self._interceptor.pre_list_pluggable_databases( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseListPluggableDatabases._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListDbNodes._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseListPluggableDatabases._get_query_params_json( transcoded_request ) @@ -4392,23 +11250,25 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDbNodes", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListPluggableDatabases", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListDbNodes", + "rpcName": "ListPluggableDatabases", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._ListDbNodes._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, + response = ( + OracleDatabaseRestTransport._ListPluggableDatabases._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -4417,22 +11277,24 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListDbNodesResponse() - pb_resp = oracledatabase.ListDbNodesResponse.pb(resp) + resp = pluggable_database.ListPluggableDatabasesResponse() + pb_resp = pluggable_database.ListPluggableDatabasesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_db_nodes(resp) + resp = self._interceptor.post_list_pluggable_databases(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_db_nodes_with_metadata( + resp, _ = self._interceptor.post_list_pluggable_databases_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = oracledatabase.ListDbNodesResponse.to_json( - response + response_payload = ( + pluggable_database.ListPluggableDatabasesResponse.to_json( + response + ) ) except: response_payload = None @@ -4442,21 +11304,24 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_nodes", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_pluggable_databases", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListDbNodes", + "rpcName": "ListPluggableDatabases", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListDbServers( - _BaseOracleDatabaseRestTransport._BaseListDbServers, OracleDatabaseRestStub + class _RemoveVirtualMachineExadbVmCluster( + _BaseOracleDatabaseRestTransport._BaseRemoveVirtualMachineExadbVmCluster, + OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListDbServers") + return hash( + "OracleDatabaseRestTransport.RemoveVirtualMachineExadbVmCluster" + ) @staticmethod def _get_response( @@ -4477,46 +11342,60 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, ) return response def __call__( self, - request: oracledatabase.ListDbServersRequest, + request: oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListDbServersResponse: - r"""Call the list db servers method over HTTP. + ) -> operations_pb2.Operation: + r"""Call the remove virtual machine + exadb vm cluster method over HTTP. - Args: - request (~.oracledatabase.ListDbServersRequest): - The request object. The request for ``DbServer.List``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + Args: + request (~.oracledatabase.RemoveVirtualMachineExadbVmClusterRequest): + The request object. The request for ``ExadbVmCluster.RemoveVirtualMachine``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. - Returns: - ~.oracledatabase.ListDbServersResponse: - The response for ``DbServer.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListDbServers._get_http_options() + _BaseOracleDatabaseRestTransport._BaseRemoveVirtualMachineExadbVmCluster._get_http_options() ) - request, metadata = self._interceptor.pre_list_db_servers(request, metadata) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDbServers._get_transcoded_request( + ( + request, + metadata, + ) = self._interceptor.pre_remove_virtual_machine_exadb_vm_cluster( + request, metadata + ) + transcoded_request = _BaseOracleDatabaseRestTransport._BaseRemoveVirtualMachineExadbVmCluster._get_transcoded_request( http_options, request ) + body = _BaseOracleDatabaseRestTransport._BaseRemoveVirtualMachineExadbVmCluster._get_request_body_json( + transcoded_request + ) + # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListDbServers._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseRemoveVirtualMachineExadbVmCluster._get_query_params_json( transcoded_request ) @@ -4528,7 +11407,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = type(request).to_json(request) + request_payload = json_format.MessageToJson(request) except: request_payload = None http_request = { @@ -4538,23 +11417,24 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDbServers", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.RemoveVirtualMachineExadbVmCluster", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListDbServers", + "rpcName": "RemoveVirtualMachineExadbVmCluster", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._ListDbServers._get_response( + response = OracleDatabaseRestTransport._RemoveVirtualMachineExadbVmCluster._get_response( self._host, metadata, query_params, self._session, timeout, transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -4563,23 +11443,22 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListDbServersResponse() - pb_resp = oracledatabase.ListDbServersResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_db_servers(resp) + resp = self._interceptor.post_remove_virtual_machine_exadb_vm_cluster(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_db_servers_with_metadata( + ( + resp, + _, + ) = self._interceptor.post_remove_virtual_machine_exadb_vm_cluster_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = oracledatabase.ListDbServersResponse.to_json( - response - ) + response_payload = json_format.MessageToJson(resp) except: response_payload = None http_response = { @@ -4588,21 +11467,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_servers", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.remove_virtual_machine_exadb_vm_cluster", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListDbServers", + "rpcName": "RemoveVirtualMachineExadbVmCluster", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListDbSystemShapes( - _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes, OracleDatabaseRestStub + class _RestartAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase, + OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListDbSystemShapes") + return hash("OracleDatabaseRestTransport.RestartAutonomousDatabase") @staticmethod def _get_response( @@ -4623,48 +11503,57 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, ) return response def __call__( self, - request: oracledatabase.ListDbSystemShapesRequest, + request: oracledatabase.RestartAutonomousDatabaseRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListDbSystemShapesResponse: - r"""Call the list db system shapes method over HTTP. + ) -> operations_pb2.Operation: + r"""Call the restart autonomous + database method over HTTP. - Args: - request (~.oracledatabase.ListDbSystemShapesRequest): - The request object. The request for ``DbSystemShape.List``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + Args: + request (~.oracledatabase.RestartAutonomousDatabaseRequest): + The request object. The request for ``AutonomousDatabase.Restart``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. - Returns: - ~.oracledatabase.ListDbSystemShapesResponse: - The response for ``DbSystemShape.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes._get_http_options() + _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_http_options() ) - request, metadata = self._interceptor.pre_list_db_system_shapes( + request, metadata = self._interceptor.pre_restart_autonomous_database( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_transcoded_request( http_options, request ) + body = _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_request_body_json( + transcoded_request + ) + # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_query_params_json( transcoded_request ) @@ -4676,7 +11565,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = type(request).to_json(request) + request_payload = json_format.MessageToJson(request) except: request_payload = None http_request = { @@ -4686,23 +11575,26 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListDbSystemShapes", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.RestartAutonomousDatabase", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListDbSystemShapes", + "rpcName": "RestartAutonomousDatabase", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._ListDbSystemShapes._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, + response = ( + OracleDatabaseRestTransport._RestartAutonomousDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -4711,23 +11603,19 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListDbSystemShapesResponse() - pb_resp = oracledatabase.ListDbSystemShapesResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_db_system_shapes(resp) + resp = self._interceptor.post_restart_autonomous_database(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_db_system_shapes_with_metadata( + resp, _ = self._interceptor.post_restart_autonomous_database_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = ( - oracledatabase.ListDbSystemShapesResponse.to_json(response) - ) + response_payload = json_format.MessageToJson(resp) except: response_payload = None http_response = { @@ -4736,21 +11624,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_system_shapes", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.restart_autonomous_database", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListDbSystemShapes", + "rpcName": "RestartAutonomousDatabase", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListEntitlements( - _BaseOracleDatabaseRestTransport._BaseListEntitlements, OracleDatabaseRestStub + class _RestoreAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase, + OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListEntitlements") + return hash("OracleDatabaseRestTransport.RestoreAutonomousDatabase") @staticmethod def _get_response( @@ -4771,48 +11660,57 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, ) return response def __call__( self, - request: oracledatabase.ListEntitlementsRequest, + request: oracledatabase.RestoreAutonomousDatabaseRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListEntitlementsResponse: - r"""Call the list entitlements method over HTTP. + ) -> operations_pb2.Operation: + r"""Call the restore autonomous + database method over HTTP. - Args: - request (~.oracledatabase.ListEntitlementsRequest): - The request object. The request for ``Entitlement.List``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + Args: + request (~.oracledatabase.RestoreAutonomousDatabaseRequest): + The request object. The request for ``AutonomousDatabase.Restore``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. - Returns: - ~.oracledatabase.ListEntitlementsResponse: - The response for ``Entitlement.List``. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListEntitlements._get_http_options() + _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_http_options() ) - request, metadata = self._interceptor.pre_list_entitlements( + request, metadata = self._interceptor.pre_restore_autonomous_database( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListEntitlements._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_transcoded_request( http_options, request ) + body = _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_request_body_json( + transcoded_request + ) + # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListEntitlements._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_query_params_json( transcoded_request ) @@ -4824,7 +11722,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = type(request).to_json(request) + request_payload = json_format.MessageToJson(request) except: request_payload = None http_request = { @@ -4834,23 +11732,26 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListEntitlements", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.RestoreAutonomousDatabase", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListEntitlements", + "rpcName": "RestoreAutonomousDatabase", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._ListEntitlements._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, + response = ( + OracleDatabaseRestTransport._RestoreAutonomousDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -4859,23 +11760,19 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListEntitlementsResponse() - pb_resp = oracledatabase.ListEntitlementsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_entitlements(resp) + resp = self._interceptor.post_restore_autonomous_database(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_entitlements_with_metadata( + resp, _ = self._interceptor.post_restore_autonomous_database_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = oracledatabase.ListEntitlementsResponse.to_json( - response - ) + response_payload = json_format.MessageToJson(resp) except: response_payload = None http_response = { @@ -4884,21 +11781,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_entitlements", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.restore_autonomous_database", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListEntitlements", + "rpcName": "RestoreAutonomousDatabase", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListGiVersions( - _BaseOracleDatabaseRestTransport._BaseListGiVersions, OracleDatabaseRestStub + class _StartAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase, + OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.ListGiVersions") + return hash("OracleDatabaseRestTransport.StartAutonomousDatabase") @staticmethod def _get_response( @@ -4919,22 +11817,23 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, ) return response def __call__( self, - request: oracledatabase.ListGiVersionsRequest, + request: oracledatabase.StartAutonomousDatabaseRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> oracledatabase.ListGiVersionsResponse: - r"""Call the list gi versions method over HTTP. + ) -> operations_pb2.Operation: + r"""Call the start autonomous database method over HTTP. Args: - request (~.oracledatabase.ListGiVersionsRequest): - The request object. The request for ``GiVersion.List``. + request (~.oracledatabase.StartAutonomousDatabaseRequest): + The request object. The request for ``AutonomousDatabase.Start``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -4944,23 +11843,30 @@ def __call__( be of type `bytes`. Returns: - ~.oracledatabase.ListGiVersionsResponse: - The response for ``GiVersion.List``. + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseListGiVersions._get_http_options() + _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase._get_http_options() ) - request, metadata = self._interceptor.pre_list_gi_versions( + request, metadata = self._interceptor.pre_start_autonomous_database( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseListGiVersions._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase._get_transcoded_request( http_options, request ) + body = _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase._get_request_body_json( + transcoded_request + ) + # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseListGiVersions._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase._get_query_params_json( transcoded_request ) @@ -4972,7 +11878,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = type(request).to_json(request) + request_payload = json_format.MessageToJson(request) except: request_payload = None http_request = { @@ -4982,23 +11888,26 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.ListGiVersions", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.StartAutonomousDatabase", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListGiVersions", + "rpcName": "StartAutonomousDatabase", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = OracleDatabaseRestTransport._ListGiVersions._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, + response = ( + OracleDatabaseRestTransport._StartAutonomousDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -5007,23 +11916,19 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = oracledatabase.ListGiVersionsResponse() - pb_resp = oracledatabase.ListGiVersionsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_gi_versions(resp) + resp = self._interceptor.post_start_autonomous_database(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_gi_versions_with_metadata( + resp, _ = self._interceptor.post_start_autonomous_database_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = oracledatabase.ListGiVersionsResponse.to_json( - response - ) + response_payload = json_format.MessageToJson(resp) except: response_payload = None http_response = { @@ -5032,22 +11937,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.list_gi_versions", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.start_autonomous_database", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "ListGiVersions", + "rpcName": "StartAutonomousDatabase", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _RestartAutonomousDatabase( - _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase, + class _StopAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase, OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.RestartAutonomousDatabase") + return hash("OracleDatabaseRestTransport.StopAutonomousDatabase") @staticmethod def _get_response( @@ -5074,51 +11979,50 @@ def _get_response( def __call__( self, - request: oracledatabase.RestartAutonomousDatabaseRequest, + request: oracledatabase.StopAutonomousDatabaseRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the restart autonomous - database method over HTTP. + r"""Call the stop autonomous database method over HTTP. - Args: - request (~.oracledatabase.RestartAutonomousDatabaseRequest): - The request object. The request for ``AutonomousDatabase.Restart``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + Args: + request (~.oracledatabase.StopAutonomousDatabaseRequest): + The request object. The request for ``AutonomousDatabase.Stop``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_http_options() + _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase._get_http_options() ) - request, metadata = self._interceptor.pre_restart_autonomous_database( + request, metadata = self._interceptor.pre_stop_autonomous_database( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase._get_transcoded_request( http_options, request ) - body = _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_request_body_json( + body = _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase._get_request_body_json( transcoded_request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase._get_query_params_json( transcoded_request ) @@ -5140,10 +12044,10 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.RestartAutonomousDatabase", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.StopAutonomousDatabase", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "RestartAutonomousDatabase", + "rpcName": "StopAutonomousDatabase", "httpRequest": http_request, "metadata": http_request["headers"], }, @@ -5151,7 +12055,7 @@ def __call__( # Send the request response = ( - OracleDatabaseRestTransport._RestartAutonomousDatabase._get_response( + OracleDatabaseRestTransport._StopAutonomousDatabase._get_response( self._host, metadata, query_params, @@ -5171,9 +12075,9 @@ def __call__( resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_restart_autonomous_database(resp) + resp = self._interceptor.post_stop_autonomous_database(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_restart_autonomous_database_with_metadata( + resp, _ = self._interceptor.post_stop_autonomous_database_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( @@ -5189,22 +12093,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.restart_autonomous_database", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.stop_autonomous_database", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "RestartAutonomousDatabase", + "rpcName": "StopAutonomousDatabase", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _RestoreAutonomousDatabase( - _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase, + class _SwitchoverAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseSwitchoverAutonomousDatabase, OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.RestoreAutonomousDatabase") + return hash("OracleDatabaseRestTransport.SwitchoverAutonomousDatabase") @staticmethod def _get_response( @@ -5231,18 +12135,19 @@ def _get_response( def __call__( self, - request: oracledatabase.RestoreAutonomousDatabaseRequest, + request: oracledatabase.SwitchoverAutonomousDatabaseRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the restore autonomous + r"""Call the switchover autonomous database method over HTTP. Args: - request (~.oracledatabase.RestoreAutonomousDatabaseRequest): - The request object. The request for ``AutonomousDatabase.Restore``. + request (~.oracledatabase.SwitchoverAutonomousDatabaseRequest): + The request object. The request for + ``OracleDatabase.SwitchoverAutonomousDatabase``. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -5260,22 +12165,22 @@ def __call__( """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_http_options() + _BaseOracleDatabaseRestTransport._BaseSwitchoverAutonomousDatabase._get_http_options() ) - request, metadata = self._interceptor.pre_restore_autonomous_database( + request, metadata = self._interceptor.pre_switchover_autonomous_database( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseSwitchoverAutonomousDatabase._get_transcoded_request( http_options, request ) - body = _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_request_body_json( + body = _BaseOracleDatabaseRestTransport._BaseSwitchoverAutonomousDatabase._get_request_body_json( transcoded_request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseSwitchoverAutonomousDatabase._get_query_params_json( transcoded_request ) @@ -5297,10 +12202,10 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.RestoreAutonomousDatabase", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.SwitchoverAutonomousDatabase", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "RestoreAutonomousDatabase", + "rpcName": "SwitchoverAutonomousDatabase", "httpRequest": http_request, "metadata": http_request["headers"], }, @@ -5308,7 +12213,7 @@ def __call__( # Send the request response = ( - OracleDatabaseRestTransport._RestoreAutonomousDatabase._get_response( + OracleDatabaseRestTransport._SwitchoverAutonomousDatabase._get_response( self._host, metadata, query_params, @@ -5328,9 +12233,12 @@ def __call__( resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_restore_autonomous_database(resp) + resp = self._interceptor.post_switchover_autonomous_database(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_restore_autonomous_database_with_metadata( + ( + resp, + _, + ) = self._interceptor.post_switchover_autonomous_database_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( @@ -5346,22 +12254,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.restore_autonomous_database", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.switchover_autonomous_database", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "RestoreAutonomousDatabase", + "rpcName": "SwitchoverAutonomousDatabase", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _StartAutonomousDatabase( - _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase, + class _UpdateAutonomousDatabase( + _BaseOracleDatabaseRestTransport._BaseUpdateAutonomousDatabase, OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.StartAutonomousDatabase") + return hash("OracleDatabaseRestTransport.UpdateAutonomousDatabase") @staticmethod def _get_response( @@ -5388,50 +12296,51 @@ def _get_response( def __call__( self, - request: oracledatabase.StartAutonomousDatabaseRequest, + request: oracledatabase.UpdateAutonomousDatabaseRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the start autonomous database method over HTTP. + r"""Call the update autonomous + database method over HTTP. - Args: - request (~.oracledatabase.StartAutonomousDatabaseRequest): - The request object. The request for ``AutonomousDatabase.Start``. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. + Args: + request (~.oracledatabase.UpdateAutonomousDatabaseRequest): + The request object. The request for ``AutonomousDatabase.Update``. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase._get_http_options() + _BaseOracleDatabaseRestTransport._BaseUpdateAutonomousDatabase._get_http_options() ) - request, metadata = self._interceptor.pre_start_autonomous_database( + request, metadata = self._interceptor.pre_update_autonomous_database( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseUpdateAutonomousDatabase._get_transcoded_request( http_options, request ) - body = _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase._get_request_body_json( + body = _BaseOracleDatabaseRestTransport._BaseUpdateAutonomousDatabase._get_request_body_json( transcoded_request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseStartAutonomousDatabase._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseUpdateAutonomousDatabase._get_query_params_json( transcoded_request ) @@ -5453,10 +12362,10 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.StartAutonomousDatabase", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.UpdateAutonomousDatabase", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "StartAutonomousDatabase", + "rpcName": "UpdateAutonomousDatabase", "httpRequest": http_request, "metadata": http_request["headers"], }, @@ -5464,7 +12373,7 @@ def __call__( # Send the request response = ( - OracleDatabaseRestTransport._StartAutonomousDatabase._get_response( + OracleDatabaseRestTransport._UpdateAutonomousDatabase._get_response( self._host, metadata, query_params, @@ -5484,9 +12393,9 @@ def __call__( resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_start_autonomous_database(resp) + resp = self._interceptor.post_update_autonomous_database(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_start_autonomous_database_with_metadata( + resp, _ = self._interceptor.post_update_autonomous_database_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( @@ -5502,22 +12411,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.start_autonomous_database", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.update_autonomous_database", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "StartAutonomousDatabase", + "rpcName": "UpdateAutonomousDatabase", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _StopAutonomousDatabase( - _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase, + class _UpdateExadbVmCluster( + _BaseOracleDatabaseRestTransport._BaseUpdateExadbVmCluster, OracleDatabaseRestStub, ): def __hash__(self): - return hash("OracleDatabaseRestTransport.StopAutonomousDatabase") + return hash("OracleDatabaseRestTransport.UpdateExadbVmCluster") @staticmethod def _get_response( @@ -5544,17 +12453,20 @@ def _get_response( def __call__( self, - request: oracledatabase.StopAutonomousDatabaseRequest, + request: oracledatabase.UpdateExadbVmClusterRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the stop autonomous database method over HTTP. + r"""Call the update exadb vm cluster method over HTTP. Args: - request (~.oracledatabase.StopAutonomousDatabaseRequest): - The request object. The request for ``AutonomousDatabase.Stop``. + request (~.oracledatabase.UpdateExadbVmClusterRequest): + The request object. The request for ``ExadbVmCluster.Update``. We only + support adding the Virtual Machine to the + ExadbVmCluster. Rest of the fields in ExadbVmCluster are + immutable. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -5572,22 +12484,22 @@ def __call__( """ http_options = ( - _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase._get_http_options() + _BaseOracleDatabaseRestTransport._BaseUpdateExadbVmCluster._get_http_options() ) - request, metadata = self._interceptor.pre_stop_autonomous_database( + request, metadata = self._interceptor.pre_update_exadb_vm_cluster( request, metadata ) - transcoded_request = _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase._get_transcoded_request( + transcoded_request = _BaseOracleDatabaseRestTransport._BaseUpdateExadbVmCluster._get_transcoded_request( http_options, request ) - body = _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase._get_request_body_json( + body = _BaseOracleDatabaseRestTransport._BaseUpdateExadbVmCluster._get_request_body_json( transcoded_request ) # Jsonify the query params - query_params = _BaseOracleDatabaseRestTransport._BaseStopAutonomousDatabase._get_query_params_json( + query_params = _BaseOracleDatabaseRestTransport._BaseUpdateExadbVmCluster._get_query_params_json( transcoded_request ) @@ -5609,26 +12521,24 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.StopAutonomousDatabase", + f"Sending request for google.cloud.oracledatabase_v1.OracleDatabaseClient.UpdateExadbVmCluster", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "StopAutonomousDatabase", + "rpcName": "UpdateExadbVmCluster", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = ( - OracleDatabaseRestTransport._StopAutonomousDatabase._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) + response = OracleDatabaseRestTransport._UpdateExadbVmCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -5640,9 +12550,9 @@ def __call__( resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_stop_autonomous_database(resp) + resp = self._interceptor.post_update_exadb_vm_cluster(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_stop_autonomous_database_with_metadata( + resp, _ = self._interceptor.post_update_exadb_vm_cluster_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( @@ -5658,10 +12568,10 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.stop_autonomous_database", + "Received response for google.cloud.oracledatabase_v1.OracleDatabaseClient.update_exadb_vm_cluster", extra={ "serviceName": "google.cloud.oracledatabase.v1.OracleDatabase", - "rpcName": "StopAutonomousDatabase", + "rpcName": "UpdateExadbVmCluster", "metadata": http_response["headers"], "httpResponse": http_response, }, @@ -5699,6 +12609,51 @@ def create_cloud_vm_cluster( # In C++ this would require a dynamic_cast return self._CreateCloudVmCluster(self._session, self._host, self._interceptor) # type: ignore + @property + def create_db_system( + self, + ) -> Callable[[gco_db_system.CreateDbSystemRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateDbSystem(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.CreateExadbVmClusterRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateExadbVmCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_exascale_db_storage_vault( + self, + ) -> Callable[ + [gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateExascaleDbStorageVault(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_odb_network( + self, + ) -> Callable[[gco_odb_network.CreateOdbNetworkRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateOdbNetwork(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_odb_subnet( + self, + ) -> Callable[[gco_odb_subnet.CreateOdbSubnetRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateOdbSubnet(self._session, self._host, self._interceptor) # type: ignore + @property def delete_autonomous_database( self, @@ -5730,6 +12685,61 @@ def delete_cloud_vm_cluster( # In C++ this would require a dynamic_cast return self._DeleteCloudVmCluster(self._session, self._host, self._interceptor) # type: ignore + @property + def delete_db_system( + self, + ) -> Callable[[db_system.DeleteDbSystemRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteDbSystem(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.DeleteExadbVmClusterRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteExadbVmCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_exascale_db_storage_vault( + self, + ) -> Callable[ + [exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteExascaleDbStorageVault(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_odb_network( + self, + ) -> Callable[[odb_network.DeleteOdbNetworkRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteOdbNetwork(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_odb_subnet( + self, + ) -> Callable[[odb_subnet.DeleteOdbSubnetRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteOdbSubnet(self._session, self._host, self._interceptor) # type: ignore + + @property + def failover_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.FailoverAutonomousDatabaseRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._FailoverAutonomousDatabase(self._session, self._host, self._interceptor) # type: ignore + @property def generate_autonomous_database_wallet( self, @@ -5771,6 +12781,70 @@ def get_cloud_vm_cluster( # In C++ this would require a dynamic_cast return self._GetCloudVmCluster(self._session, self._host, self._interceptor) # type: ignore + @property + def get_database( + self, + ) -> Callable[[database.GetDatabaseRequest], database.Database]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetDatabase(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_db_system( + self, + ) -> Callable[[db_system.GetDbSystemRequest], db_system.DbSystem]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetDbSystem(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.GetExadbVmClusterRequest], exadb_vm_cluster.ExadbVmCluster + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetExadbVmCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_exascale_db_storage_vault( + self, + ) -> Callable[ + [exascale_db_storage_vault.GetExascaleDbStorageVaultRequest], + exascale_db_storage_vault.ExascaleDbStorageVault, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetExascaleDbStorageVault(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_odb_network( + self, + ) -> Callable[[odb_network.GetOdbNetworkRequest], odb_network.OdbNetwork]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetOdbNetwork(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_odb_subnet( + self, + ) -> Callable[[odb_subnet.GetOdbSubnetRequest], odb_subnet.OdbSubnet]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetOdbSubnet(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_pluggable_database( + self, + ) -> Callable[ + [pluggable_database.GetPluggableDatabaseRequest], + pluggable_database.PluggableDatabase, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetPluggableDatabase(self._session, self._host, self._interceptor) # type: ignore + @property def list_autonomous_database_backups( self, @@ -5837,6 +12911,25 @@ def list_cloud_vm_clusters( # In C++ this would require a dynamic_cast return self._ListCloudVmClusters(self._session, self._host, self._interceptor) # type: ignore + @property + def list_database_character_sets( + self, + ) -> Callable[ + [database_character_set.ListDatabaseCharacterSetsRequest], + database_character_set.ListDatabaseCharacterSetsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListDatabaseCharacterSets(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_databases( + self, + ) -> Callable[[database.ListDatabasesRequest], database.ListDatabasesResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListDatabases(self._session, self._host, self._interceptor) # type: ignore + @property def list_db_nodes( self, @@ -5857,6 +12950,25 @@ def list_db_servers( # In C++ this would require a dynamic_cast return self._ListDbServers(self._session, self._host, self._interceptor) # type: ignore + @property + def list_db_system_initial_storage_sizes( + self, + ) -> Callable[ + [db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest], + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListDbSystemInitialStorageSizes(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_db_systems( + self, + ) -> Callable[[db_system.ListDbSystemsRequest], db_system.ListDbSystemsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListDbSystems(self._session, self._host, self._interceptor) # type: ignore + @property def list_db_system_shapes( self, @@ -5868,6 +12980,16 @@ def list_db_system_shapes( # In C++ this would require a dynamic_cast return self._ListDbSystemShapes(self._session, self._host, self._interceptor) # type: ignore + @property + def list_db_versions( + self, + ) -> Callable[ + [db_version.ListDbVersionsRequest], db_version.ListDbVersionsResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListDbVersions(self._session, self._host, self._interceptor) # type: ignore + @property def list_entitlements( self, @@ -5879,6 +13001,28 @@ def list_entitlements( # In C++ this would require a dynamic_cast return self._ListEntitlements(self._session, self._host, self._interceptor) # type: ignore + @property + def list_exadb_vm_clusters( + self, + ) -> Callable[ + [oracledatabase.ListExadbVmClustersRequest], + oracledatabase.ListExadbVmClustersResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListExadbVmClusters(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_exascale_db_storage_vaults( + self, + ) -> Callable[ + [exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest], + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListExascaleDbStorageVaults(self._session, self._host, self._interceptor) # type: ignore + @property def list_gi_versions( self, @@ -5889,6 +13033,59 @@ def list_gi_versions( # In C++ this would require a dynamic_cast return self._ListGiVersions(self._session, self._host, self._interceptor) # type: ignore + @property + def list_minor_versions( + self, + ) -> Callable[ + [minor_version.ListMinorVersionsRequest], + minor_version.ListMinorVersionsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListMinorVersions(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_odb_networks( + self, + ) -> Callable[ + [odb_network.ListOdbNetworksRequest], odb_network.ListOdbNetworksResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListOdbNetworks(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_odb_subnets( + self, + ) -> Callable[ + [odb_subnet.ListOdbSubnetsRequest], odb_subnet.ListOdbSubnetsResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListOdbSubnets(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_pluggable_databases( + self, + ) -> Callable[ + [pluggable_database.ListPluggableDatabasesRequest], + pluggable_database.ListPluggableDatabasesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListPluggableDatabases(self._session, self._host, self._interceptor) # type: ignore + + @property + def remove_virtual_machine_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.RemoveVirtualMachineExadbVmClusterRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._RemoveVirtualMachineExadbVmCluster(self._session, self._host, self._interceptor) # type: ignore + @property def restart_autonomous_database( self, @@ -5929,6 +13126,36 @@ def stop_autonomous_database( # In C++ this would require a dynamic_cast return self._StopAutonomousDatabase(self._session, self._host, self._interceptor) # type: ignore + @property + def switchover_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.SwitchoverAutonomousDatabaseRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SwitchoverAutonomousDatabase(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_autonomous_database( + self, + ) -> Callable[ + [oracledatabase.UpdateAutonomousDatabaseRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateAutonomousDatabase(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_exadb_vm_cluster( + self, + ) -> Callable[ + [oracledatabase.UpdateExadbVmClusterRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateExadbVmCluster(self._session, self._host, self._interceptor) # type: ignore + @property def get_location(self): return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/rest_base.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/rest_base.py index 46169f687141..89a149ddc106 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/rest_base.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/services/oracle_database/transports/rest_base.py @@ -23,11 +23,32 @@ from google.protobuf import json_format from google.cloud.oracledatabase_v1.types import ( - autonomous_database, + db_system_initial_storage_size, + db_version, exadata_infra, + exadb_vm_cluster, +) +from google.cloud.oracledatabase_v1.types import ( + autonomous_database, + database, + database_character_set, +) +from google.cloud.oracledatabase_v1.types import ( oracledatabase, + pluggable_database, vm_cluster, ) +from google.cloud.oracledatabase_v1.types import ( + exascale_db_storage_vault as gco_exascale_db_storage_vault, +) +from google.cloud.oracledatabase_v1.types import db_system +from google.cloud.oracledatabase_v1.types import db_system as gco_db_system +from google.cloud.oracledatabase_v1.types import exascale_db_storage_vault +from google.cloud.oracledatabase_v1.types import minor_version +from google.cloud.oracledatabase_v1.types import odb_network +from google.cloud.oracledatabase_v1.types import odb_network as gco_odb_network +from google.cloud.oracledatabase_v1.types import odb_subnet +from google.cloud.oracledatabase_v1.types import odb_subnet as gco_odb_subnet from .base import DEFAULT_CLIENT_INFO, OracleDatabaseTransport @@ -273,11 +294,13 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseDeleteAutonomousDatabase: + class _BaseCreateDbSystem: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "dbSystemId": "", + } @classmethod def _get_unset_required_fields(cls, message_dict): @@ -291,18 +314,28 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "delete", - "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}", + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/dbSystems", + "body": "db_system", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.DeleteAutonomousDatabaseRequest.pb(request) + pb_request = gco_db_system.CreateDbSystemRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + @staticmethod def _get_query_params_json(transcoded_request): query_params = json.loads( @@ -312,7 +345,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseCreateDbSystem._get_unset_required_fields( query_params ) ) @@ -320,11 +353,13 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseDeleteCloudExadataInfrastructure: + class _BaseCreateExadbVmCluster: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "exadbVmClusterId": "", + } @classmethod def _get_unset_required_fields(cls, message_dict): @@ -338,20 +373,28 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "delete", - "uri": "/v1/{name=projects/*/locations/*/cloudExadataInfrastructures/*}", + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/exadbVmClusters", + "body": "exadb_vm_cluster", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.DeleteCloudExadataInfrastructureRequest.pb( - request - ) + pb_request = oracledatabase.CreateExadbVmClusterRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + @staticmethod def _get_query_params_json(transcoded_request): query_params = json.loads( @@ -361,7 +404,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseCreateExadbVmCluster._get_unset_required_fields( query_params ) ) @@ -369,11 +412,13 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseDeleteCloudVmCluster: + class _BaseCreateExascaleDbStorageVault: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "exascaleDbStorageVaultId": "", + } @classmethod def _get_unset_required_fields(cls, message_dict): @@ -387,18 +432,32 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "delete", - "uri": "/v1/{name=projects/*/locations/*/cloudVmClusters/*}", + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/exascaleDbStorageVaults", + "body": "exascale_db_storage_vault", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.DeleteCloudVmClusterRequest.pb(request) + pb_request = ( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest.pb( + request + ) + ) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + @staticmethod def _get_query_params_json(transcoded_request): query_params = json.loads( @@ -408,7 +467,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseCreateExascaleDbStorageVault._get_unset_required_fields( query_params ) ) @@ -416,11 +475,13 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseGenerateAutonomousDatabaseWallet: + class _BaseCreateOdbNetwork: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "odbNetworkId": "", + } @classmethod def _get_unset_required_fields(cls, message_dict): @@ -435,17 +496,15 @@ def _get_http_options(): http_options: List[Dict[str, str]] = [ { "method": "post", - "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}:generateWallet", - "body": "*", + "uri": "/v1/{parent=projects/*/locations/*}/odbNetworks", + "body": "odb_network", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.GenerateAutonomousDatabaseWalletRequest.pb( - request - ) + pb_request = gco_odb_network.CreateOdbNetworkRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -467,7 +526,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseCreateOdbNetwork._get_unset_required_fields( query_params ) ) @@ -475,11 +534,13 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseGetAutonomousDatabase: + class _BaseCreateOdbSubnet: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "odbSubnetId": "", + } @classmethod def _get_unset_required_fields(cls, message_dict): @@ -493,18 +554,28 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}", + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*/odbNetworks/*}/odbSubnets", + "body": "odb_subnet", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.GetAutonomousDatabaseRequest.pb(request) + pb_request = gco_odb_subnet.CreateOdbSubnetRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + @staticmethod def _get_query_params_json(transcoded_request): query_params = json.loads( @@ -514,7 +585,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseCreateOdbSubnet._get_unset_required_fields( query_params ) ) @@ -522,7 +593,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseGetCloudExadataInfrastructure: + class _BaseDeleteAutonomousDatabase: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -540,15 +611,15 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{name=projects/*/locations/*/cloudExadataInfrastructures/*}", + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.GetCloudExadataInfrastructureRequest.pb(request) + pb_request = oracledatabase.DeleteAutonomousDatabaseRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -561,7 +632,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseDeleteAutonomousDatabase._get_unset_required_fields( query_params ) ) @@ -569,7 +640,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseGetCloudVmCluster: + class _BaseDeleteCloudExadataInfrastructure: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -587,15 +658,17 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{name=projects/*/locations/*/cloudVmClusters/*}", + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/cloudExadataInfrastructures/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.GetCloudVmClusterRequest.pb(request) + pb_request = oracledatabase.DeleteCloudExadataInfrastructureRequest.pb( + request + ) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -608,7 +681,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseDeleteCloudExadataInfrastructure._get_unset_required_fields( query_params ) ) @@ -616,7 +689,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListAutonomousDatabaseBackups: + class _BaseDeleteCloudVmCluster: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -634,15 +707,15 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{parent=projects/*/locations/*}/autonomousDatabaseBackups", + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/cloudVmClusters/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListAutonomousDatabaseBackupsRequest.pb(request) + pb_request = oracledatabase.DeleteCloudVmClusterRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -655,7 +728,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseDeleteCloudVmCluster._get_unset_required_fields( query_params ) ) @@ -663,7 +736,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListAutonomousDatabaseCharacterSets: + class _BaseDeleteDbSystem: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -681,17 +754,15 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{parent=projects/*/locations/*}/autonomousDatabaseCharacterSets", + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dbSystems/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest.pb( - request - ) + pb_request = db_system.DeleteDbSystemRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -704,7 +775,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseDeleteDbSystem._get_unset_required_fields( query_params ) ) @@ -712,7 +783,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListAutonomousDatabases: + class _BaseDeleteExadbVmCluster: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -730,15 +801,15 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{parent=projects/*/locations/*}/autonomousDatabases", + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/exadbVmClusters/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListAutonomousDatabasesRequest.pb(request) + pb_request = oracledatabase.DeleteExadbVmClusterRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -751,7 +822,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseDeleteExadbVmCluster._get_unset_required_fields( query_params ) ) @@ -759,7 +830,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListAutonomousDbVersions: + class _BaseDeleteExascaleDbStorageVault: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -777,15 +848,19 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{parent=projects/*/locations/*}/autonomousDbVersions", + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/exascaleDbStorageVaults/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListAutonomousDbVersionsRequest.pb(request) + pb_request = ( + exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest.pb( + request + ) + ) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -798,7 +873,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseDeleteExascaleDbStorageVault._get_unset_required_fields( query_params ) ) @@ -806,7 +881,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListCloudExadataInfrastructures: + class _BaseDeleteOdbNetwork: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -824,17 +899,15 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{parent=projects/*/locations/*}/cloudExadataInfrastructures", + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/odbNetworks/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListCloudExadataInfrastructuresRequest.pb( - request - ) + pb_request = odb_network.DeleteOdbNetworkRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -847,7 +920,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseDeleteOdbNetwork._get_unset_required_fields( query_params ) ) @@ -855,7 +928,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListCloudVmClusters: + class _BaseDeleteOdbSubnet: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -873,15 +946,15 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{parent=projects/*/locations/*}/cloudVmClusters", + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/odbNetworks/*/odbSubnets/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListCloudVmClustersRequest.pb(request) + pb_request = odb_subnet.DeleteOdbSubnetRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -894,7 +967,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseDeleteOdbSubnet._get_unset_required_fields( query_params ) ) @@ -902,7 +975,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListDbNodes: + class _BaseFailoverAutonomousDatabase: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -920,18 +993,28 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{parent=projects/*/locations/*/cloudVmClusters/*}/dbNodes", + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}:failover", + "body": "*", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListDbNodesRequest.pb(request) + pb_request = oracledatabase.FailoverAutonomousDatabaseRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + @staticmethod def _get_query_params_json(transcoded_request): query_params = json.loads( @@ -941,7 +1024,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListDbNodes._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseFailoverAutonomousDatabase._get_unset_required_fields( query_params ) ) @@ -949,7 +1032,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListDbServers: + class _BaseGenerateAutonomousDatabaseWallet: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -967,18 +1050,30 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "get", - "uri": "/v1/{parent=projects/*/locations/*/cloudExadataInfrastructures/*}/dbServers", + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}:generateWallet", + "body": "*", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListDbServersRequest.pb(request) + pb_request = oracledatabase.GenerateAutonomousDatabaseWalletRequest.pb( + request + ) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + @staticmethod def _get_query_params_json(transcoded_request): query_params = json.loads( @@ -988,7 +1083,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListDbServers._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseGenerateAutonomousDatabaseWallet._get_unset_required_fields( query_params ) ) @@ -996,7 +1091,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListDbSystemShapes: + class _BaseGetAutonomousDatabase: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -1015,14 +1110,14 @@ def _get_http_options(): http_options: List[Dict[str, str]] = [ { "method": "get", - "uri": "/v1/{parent=projects/*/locations/*}/dbSystemShapes", + "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListDbSystemShapesRequest.pb(request) + pb_request = oracledatabase.GetAutonomousDatabaseRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -1035,7 +1130,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseGetAutonomousDatabase._get_unset_required_fields( query_params ) ) @@ -1043,7 +1138,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListEntitlements: + class _BaseGetCloudExadataInfrastructure: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -1062,14 +1157,14 @@ def _get_http_options(): http_options: List[Dict[str, str]] = [ { "method": "get", - "uri": "/v1/{parent=projects/*/locations/*}/entitlements", + "uri": "/v1/{name=projects/*/locations/*/cloudExadataInfrastructures/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListEntitlementsRequest.pb(request) + pb_request = oracledatabase.GetCloudExadataInfrastructureRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -1082,7 +1177,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListEntitlements._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseGetCloudExadataInfrastructure._get_unset_required_fields( query_params ) ) @@ -1090,7 +1185,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseListGiVersions: + class _BaseGetCloudVmCluster: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -1109,14 +1204,14 @@ def _get_http_options(): http_options: List[Dict[str, str]] = [ { "method": "get", - "uri": "/v1/{parent=projects/*/locations/*}/giVersions", + "uri": "/v1/{name=projects/*/locations/*/cloudVmClusters/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.ListGiVersionsRequest.pb(request) + pb_request = oracledatabase.GetCloudVmClusterRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request @@ -1129,7 +1224,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseListGiVersions._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseGetCloudVmCluster._get_unset_required_fields( query_params ) ) @@ -1137,7 +1232,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseRestartAutonomousDatabase: + class _BaseGetDatabase: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -1155,28 +1250,18 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "post", - "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}:restart", - "body": "*", + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/databases/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.RestartAutonomousDatabaseRequest.pb(request) + pb_request = database.GetDatabaseRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - @staticmethod def _get_query_params_json(transcoded_request): query_params = json.loads( @@ -1186,7 +1271,7 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseGetDatabase._get_unset_required_fields( query_params ) ) @@ -1194,7 +1279,7 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params - class _BaseRestoreAutonomousDatabase: + class _BaseGetDbSystem: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -1212,28 +1297,18 @@ def _get_unset_required_fields(cls, message_dict): def _get_http_options(): http_options: List[Dict[str, str]] = [ { - "method": "post", - "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}:restore", - "body": "*", + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dbSystems/*}", }, ] return http_options @staticmethod def _get_transcoded_request(http_options, request): - pb_request = oracledatabase.RestoreAutonomousDatabaseRequest.pb(request) + pb_request = db_system.GetDbSystemRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) return transcoded_request - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - @staticmethod def _get_query_params_json(transcoded_request): query_params = json.loads( @@ -1243,7 +1318,1465 @@ def _get_query_params_json(transcoded_request): ) ) query_params.update( - _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_unset_required_fields( + _BaseOracleDatabaseRestTransport._BaseGetDbSystem._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetExadbVmCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/exadbVmClusters/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.GetExadbVmClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseGetExadbVmCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetExascaleDbStorageVault: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/exascaleDbStorageVaults/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseGetExascaleDbStorageVault._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetOdbNetwork: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/odbNetworks/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = odb_network.GetOdbNetworkRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseGetOdbNetwork._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetOdbSubnet: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/odbNetworks/*/odbSubnets/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = odb_subnet.GetOdbSubnetRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseGetOdbSubnet._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetPluggableDatabase: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pluggableDatabases/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = pluggable_database.GetPluggableDatabaseRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseGetPluggableDatabase._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListAutonomousDatabaseBackups: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/autonomousDatabaseBackups", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListAutonomousDatabaseBackupsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseBackups._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListAutonomousDatabaseCharacterSets: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/autonomousDatabaseCharacterSets", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabaseCharacterSets._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListAutonomousDatabases: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/autonomousDatabases", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListAutonomousDatabasesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDatabases._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListAutonomousDbVersions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/autonomousDbVersions", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListAutonomousDbVersionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListAutonomousDbVersions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListCloudExadataInfrastructures: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/cloudExadataInfrastructures", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListCloudExadataInfrastructuresRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListCloudExadataInfrastructures._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListCloudVmClusters: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/cloudVmClusters", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListCloudVmClustersRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListCloudVmClusters._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListDatabaseCharacterSets: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/databaseCharacterSets", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = database_character_set.ListDatabaseCharacterSetsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListDatabaseCharacterSets._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListDatabases: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/databases", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = database.ListDatabasesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListDatabases._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListDbNodes: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*/cloudVmClusters/*}/dbNodes", + }, + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*/exadbVmClusters/*}/dbNodes", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListDbNodesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListDbNodes._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListDbServers: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*/cloudExadataInfrastructures/*}/dbServers", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListDbServersRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListDbServers._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListDbSystemInitialStorageSizes: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/dbSystemInitialStorageSizes", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListDbSystemInitialStorageSizes._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListDbSystems: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/dbSystems", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = db_system.ListDbSystemsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListDbSystems._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListDbSystemShapes: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/dbSystemShapes", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListDbSystemShapesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListDbSystemShapes._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListDbVersions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/dbVersions", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = db_version.ListDbVersionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListDbVersions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListEntitlements: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/entitlements", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListEntitlementsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListEntitlements._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListExadbVmClusters: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/exadbVmClusters", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListExadbVmClustersRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListExadbVmClusters._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListExascaleDbStorageVaults: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/exascaleDbStorageVaults", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest.pb(request) + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListExascaleDbStorageVaults._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListGiVersions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/giVersions", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.ListGiVersionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListGiVersions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListMinorVersions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*/giVersions/*}/minorVersions", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = minor_version.ListMinorVersionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListMinorVersions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListOdbNetworks: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/odbNetworks", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = odb_network.ListOdbNetworksRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListOdbNetworks._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListOdbSubnets: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*/odbNetworks/*}/odbSubnets", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = odb_subnet.ListOdbSubnetsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListOdbSubnets._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListPluggableDatabases: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/pluggableDatabases", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = pluggable_database.ListPluggableDatabasesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseListPluggableDatabases._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseRemoveVirtualMachineExadbVmCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/exadbVmClusters/*}:removeVirtualMachine", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseRemoveVirtualMachineExadbVmCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseRestartAutonomousDatabase: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}:restart", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.RestartAutonomousDatabaseRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseRestartAutonomousDatabase._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseRestoreAutonomousDatabase: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}:restore", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.RestoreAutonomousDatabaseRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseRestoreAutonomousDatabase._get_unset_required_fields( query_params ) ) @@ -1365,6 +2898,177 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseSwitchoverAutonomousDatabase: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/autonomousDatabases/*}:switchover", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.SwitchoverAutonomousDatabaseRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseSwitchoverAutonomousDatabase._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateAutonomousDatabase: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1/{autonomous_database.name=projects/*/locations/*/autonomousDatabases/*}", + "body": "autonomous_database", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.UpdateAutonomousDatabaseRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseUpdateAutonomousDatabase._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateExadbVmCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1/{exadb_vm_cluster.name=projects/*/locations/*/exadbVmClusters/*}", + "body": "exadb_vm_cluster", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = oracledatabase.UpdateExadbVmClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseOracleDatabaseRestTransport._BaseUpdateExadbVmCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseGetLocation: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/__init__.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/__init__.py index 9047e139725f..00d55ee5dc0d 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/__init__.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/__init__.py @@ -23,9 +23,12 @@ AutonomousDatabaseStandbySummary, DatabaseConnectionStringProfile, DBWorkload, + EncryptionKey, + EncryptionKeyHistoryEntry, GenerateType, OperationsInsightsState, ScheduledOperationDetails, + SourceConfig, State, ) from .autonomous_database_character_set import AutonomousDatabaseCharacterSet @@ -34,30 +37,113 @@ AutonomousDatabaseBackupProperties, ) from .autonomous_db_version import AutonomousDbVersion -from .common import CustomerContact +from .common import ( + ComputeModel, + CustomerContact, + DataCollectionOptionsCommon, + IdentityConnector, +) +from .database import ( + Database, + DatabaseProperties, + DbBackupConfig, + GetDatabaseRequest, + ListDatabasesRequest, + ListDatabasesResponse, +) +from .database_character_set import ( + DatabaseCharacterSet, + ListDatabaseCharacterSetsRequest, + ListDatabaseCharacterSetsResponse, +) from .db_node import DbNode, DbNodeProperties from .db_server import DbServer, DbServerProperties +from .db_system import ( + CreateDbSystemRequest, + DataCollectionOptionsDbSystem, + DbHome, + DbSystem, + DbSystemOptions, + DbSystemProperties, + DeleteDbSystemRequest, + GetDbSystemRequest, + ListDbSystemsRequest, + ListDbSystemsResponse, +) +from .db_system_initial_storage_size import ( + DbSystemInitialStorageSize, + DbSystemInitialStorageSizeProperties, + ListDbSystemInitialStorageSizesRequest, + ListDbSystemInitialStorageSizesResponse, + StorageSizeDetails, +) from .db_system_shape import DbSystemShape +from .db_version import ( + DbVersion, + DbVersionProperties, + ListDbVersionsRequest, + ListDbVersionsResponse, +) from .entitlement import CloudAccountDetails, Entitlement from .exadata_infra import ( CloudExadataInfrastructure, CloudExadataInfrastructureProperties, MaintenanceWindow, ) +from .exadb_vm_cluster import ( + ExadbVmCluster, + ExadbVmClusterProperties, + ExadbVmClusterStorageDetails, +) +from .exascale_db_storage_vault import ( + CreateExascaleDbStorageVaultRequest, + DeleteExascaleDbStorageVaultRequest, + ExascaleDbStorageDetails, + ExascaleDbStorageVault, + ExascaleDbStorageVaultProperties, + GetExascaleDbStorageVaultRequest, + ListExascaleDbStorageVaultsRequest, + ListExascaleDbStorageVaultsResponse, +) from .gi_version import GiVersion from .location_metadata import LocationMetadata +from .minor_version import ( + ListMinorVersionsRequest, + ListMinorVersionsResponse, + MinorVersion, +) +from .odb_network import ( + CreateOdbNetworkRequest, + DeleteOdbNetworkRequest, + GetOdbNetworkRequest, + ListOdbNetworksRequest, + ListOdbNetworksResponse, + OdbNetwork, +) +from .odb_subnet import ( + CreateOdbSubnetRequest, + DeleteOdbSubnetRequest, + GetOdbSubnetRequest, + ListOdbSubnetsRequest, + ListOdbSubnetsResponse, + OdbSubnet, +) from .oracledatabase import ( CreateAutonomousDatabaseRequest, CreateCloudExadataInfrastructureRequest, CreateCloudVmClusterRequest, + CreateExadbVmClusterRequest, DeleteAutonomousDatabaseRequest, DeleteCloudExadataInfrastructureRequest, DeleteCloudVmClusterRequest, + DeleteExadbVmClusterRequest, + FailoverAutonomousDatabaseRequest, GenerateAutonomousDatabaseWalletRequest, GenerateAutonomousDatabaseWalletResponse, GetAutonomousDatabaseRequest, GetCloudExadataInfrastructureRequest, GetCloudVmClusterRequest, + GetExadbVmClusterRequest, ListAutonomousDatabaseBackupsRequest, ListAutonomousDatabaseBackupsResponse, ListAutonomousDatabaseCharacterSetsRequest, @@ -78,13 +164,29 @@ ListDbSystemShapesResponse, ListEntitlementsRequest, ListEntitlementsResponse, + ListExadbVmClustersRequest, + ListExadbVmClustersResponse, ListGiVersionsRequest, ListGiVersionsResponse, OperationMetadata, + RemoveVirtualMachineExadbVmClusterRequest, RestartAutonomousDatabaseRequest, RestoreAutonomousDatabaseRequest, StartAutonomousDatabaseRequest, StopAutonomousDatabaseRequest, + SwitchoverAutonomousDatabaseRequest, + UpdateAutonomousDatabaseRequest, + UpdateExadbVmClusterRequest, +) +from .pluggable_database import ( + DatabaseManagementConfig, + GetPluggableDatabaseRequest, + ListPluggableDatabasesRequest, + ListPluggableDatabasesResponse, + PluggableDatabase, + PluggableDatabaseConnectionStrings, + PluggableDatabaseNodeLevelDetails, + PluggableDatabaseProperties, ) from .vm_cluster import CloudVmCluster, CloudVmClusterProperties, DataCollectionOptions @@ -97,7 +199,10 @@ "AutonomousDatabaseProperties", "AutonomousDatabaseStandbySummary", "DatabaseConnectionStringProfile", + "EncryptionKey", + "EncryptionKeyHistoryEntry", "ScheduledOperationDetails", + "SourceConfig", "DBWorkload", "GenerateType", "OperationsInsightsState", @@ -107,29 +212,90 @@ "AutonomousDatabaseBackupProperties", "AutonomousDbVersion", "CustomerContact", + "DataCollectionOptionsCommon", + "IdentityConnector", + "ComputeModel", + "Database", + "DatabaseProperties", + "DbBackupConfig", + "GetDatabaseRequest", + "ListDatabasesRequest", + "ListDatabasesResponse", + "DatabaseCharacterSet", + "ListDatabaseCharacterSetsRequest", + "ListDatabaseCharacterSetsResponse", "DbNode", "DbNodeProperties", "DbServer", "DbServerProperties", + "CreateDbSystemRequest", + "DataCollectionOptionsDbSystem", + "DbHome", + "DbSystem", + "DbSystemOptions", + "DbSystemProperties", + "DeleteDbSystemRequest", + "GetDbSystemRequest", + "ListDbSystemsRequest", + "ListDbSystemsResponse", + "DbSystemInitialStorageSize", + "DbSystemInitialStorageSizeProperties", + "ListDbSystemInitialStorageSizesRequest", + "ListDbSystemInitialStorageSizesResponse", + "StorageSizeDetails", "DbSystemShape", + "DbVersion", + "DbVersionProperties", + "ListDbVersionsRequest", + "ListDbVersionsResponse", "CloudAccountDetails", "Entitlement", "CloudExadataInfrastructure", "CloudExadataInfrastructureProperties", "MaintenanceWindow", + "ExadbVmCluster", + "ExadbVmClusterProperties", + "ExadbVmClusterStorageDetails", + "CreateExascaleDbStorageVaultRequest", + "DeleteExascaleDbStorageVaultRequest", + "ExascaleDbStorageDetails", + "ExascaleDbStorageVault", + "ExascaleDbStorageVaultProperties", + "GetExascaleDbStorageVaultRequest", + "ListExascaleDbStorageVaultsRequest", + "ListExascaleDbStorageVaultsResponse", "GiVersion", "LocationMetadata", + "ListMinorVersionsRequest", + "ListMinorVersionsResponse", + "MinorVersion", + "CreateOdbNetworkRequest", + "DeleteOdbNetworkRequest", + "GetOdbNetworkRequest", + "ListOdbNetworksRequest", + "ListOdbNetworksResponse", + "OdbNetwork", + "CreateOdbSubnetRequest", + "DeleteOdbSubnetRequest", + "GetOdbSubnetRequest", + "ListOdbSubnetsRequest", + "ListOdbSubnetsResponse", + "OdbSubnet", "CreateAutonomousDatabaseRequest", "CreateCloudExadataInfrastructureRequest", "CreateCloudVmClusterRequest", + "CreateExadbVmClusterRequest", "DeleteAutonomousDatabaseRequest", "DeleteCloudExadataInfrastructureRequest", "DeleteCloudVmClusterRequest", + "DeleteExadbVmClusterRequest", + "FailoverAutonomousDatabaseRequest", "GenerateAutonomousDatabaseWalletRequest", "GenerateAutonomousDatabaseWalletResponse", "GetAutonomousDatabaseRequest", "GetCloudExadataInfrastructureRequest", "GetCloudVmClusterRequest", + "GetExadbVmClusterRequest", "ListAutonomousDatabaseBackupsRequest", "ListAutonomousDatabaseBackupsResponse", "ListAutonomousDatabaseCharacterSetsRequest", @@ -150,13 +316,27 @@ "ListDbSystemShapesResponse", "ListEntitlementsRequest", "ListEntitlementsResponse", + "ListExadbVmClustersRequest", + "ListExadbVmClustersResponse", "ListGiVersionsRequest", "ListGiVersionsResponse", "OperationMetadata", + "RemoveVirtualMachineExadbVmClusterRequest", "RestartAutonomousDatabaseRequest", "RestoreAutonomousDatabaseRequest", "StartAutonomousDatabaseRequest", "StopAutonomousDatabaseRequest", + "SwitchoverAutonomousDatabaseRequest", + "UpdateAutonomousDatabaseRequest", + "UpdateExadbVmClusterRequest", + "DatabaseManagementConfig", + "GetPluggableDatabaseRequest", + "ListPluggableDatabasesRequest", + "ListPluggableDatabasesResponse", + "PluggableDatabase", + "PluggableDatabaseConnectionStrings", + "PluggableDatabaseNodeLevelDetails", + "PluggableDatabaseProperties", "CloudVmCluster", "CloudVmClusterProperties", "DataCollectionOptions", diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/autonomous_database.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/autonomous_database.py index 5b9c0dda9158..75e1a65b8099 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/autonomous_database.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/autonomous_database.py @@ -33,7 +33,10 @@ "OperationsInsightsState", "DBWorkload", "AutonomousDatabase", + "SourceConfig", "AutonomousDatabaseProperties", + "EncryptionKeyHistoryEntry", + "EncryptionKey", "AutonomousDatabaseApex", "AutonomousDatabaseConnectionStrings", "DatabaseConnectionStringProfile", @@ -250,10 +253,33 @@ class AutonomousDatabase(proto.Message): projects/{project}/global/networks/{network} cidr (str): Optional. The subnet CIDR range for the - Autonmous Database. + Autonomous Database. + odb_network (str): + Optional. The name of the OdbNetwork associated with the + Autonomous Database. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network} + It is optional but if specified, this should match the + parent ODBNetwork of the OdbSubnet. + odb_subnet (str): + Optional. The name of the OdbSubnet associated with the + Autonomous Database. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} + source_config (google.cloud.oracledatabase_v1.types.SourceConfig): + Optional. The source Autonomous Database + configuration for the standby Autonomous + Database. The source Autonomous Database is + configured while creating the Peer Autonomous + Database and can't be updated after creation. + peer_autonomous_databases (MutableSequence[str]): + Output only. The peer Autonomous Database + names of the given Autonomous Database. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The date and time that the Autonomous Database was created. + disaster_recovery_supported_locations (MutableSequence[str]): + Output only. List of supported GCP region to clone the + Autonomous Database for disaster recovery. Format: + ``project/{project}/locations/{location}``. """ name: str = proto.Field( @@ -294,11 +320,56 @@ class AutonomousDatabase(proto.Message): proto.STRING, number=10, ) + odb_network: str = proto.Field( + proto.STRING, + number=16, + ) + odb_subnet: str = proto.Field( + proto.STRING, + number=17, + ) + source_config: "SourceConfig" = proto.Field( + proto.MESSAGE, + number=11, + message="SourceConfig", + ) + peer_autonomous_databases: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=12, + ) create_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=13, message=timestamp_pb2.Timestamp, ) + disaster_recovery_supported_locations: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=15, + ) + + +class SourceConfig(proto.Message): + r"""The source configuration for the standby Autonomous Database. + + Attributes: + autonomous_database (str): + Optional. The name of the primary Autonomous + Database that is used to create a Peer + Autonomous Database from a source. + automatic_backups_replication_enabled (bool): + Optional. This field specifies if the + replication of automatic backups is enabled when + creating a Data Guard. + """ + + autonomous_database: str = proto.Field( + proto.STRING, + number=1, + ) + automatic_backups_replication_enabled: bool = proto.Field( + proto.BOOL, + number=2, + ) class AutonomousDatabaseProperties(proto.Message): @@ -498,12 +569,36 @@ class AutonomousDatabaseProperties(proto.Message): next_long_term_backup_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The long term backup schedule of the Autonomous Database. + data_guard_role_changed_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time the Autonomous + Data Guard role was changed for the standby + Autonomous Database. + disaster_recovery_role_changed_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time the Disaster + Recovery role was changed for the standby + Autonomous Database. maintenance_begin_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The date and time when maintenance will begin. maintenance_end_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The date and time when maintenance will end. + allowlisted_ips (MutableSequence[str]): + Optional. The list of allowlisted IP + addresses for the Autonomous Database. + encryption_key (google.cloud.oracledatabase_v1.types.EncryptionKey): + Optional. The encryption key used to encrypt the Autonomous + Database. Updating this field will add a new entry in the + ``encryption_key_history_entries`` field with the former + version. + encryption_key_history_entries (MutableSequence[google.cloud.oracledatabase_v1.types.EncryptionKeyHistoryEntry]): + Output only. The history of the encryption + keys used to encrypt the Autonomous Database. + service_agent_email (str): + Output only. An Oracle-managed Google Cloud + service account on which customers can grant + roles to access resources in the customer + project. """ class DatabaseEdition(proto.Enum): @@ -958,6 +1053,16 @@ class Role(proto.Enum): number=60, message=timestamp_pb2.Timestamp, ) + data_guard_role_changed_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=61, + message=timestamp_pb2.Timestamp, + ) + disaster_recovery_role_changed_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=62, + message=timestamp_pb2.Timestamp, + ) maintenance_begin_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=65, @@ -968,6 +1073,93 @@ class Role(proto.Enum): number=66, message=timestamp_pb2.Timestamp, ) + allowlisted_ips: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=67, + ) + encryption_key: "EncryptionKey" = proto.Field( + proto.MESSAGE, + number=68, + message="EncryptionKey", + ) + encryption_key_history_entries: MutableSequence[ + "EncryptionKeyHistoryEntry" + ] = proto.RepeatedField( + proto.MESSAGE, + number=69, + message="EncryptionKeyHistoryEntry", + ) + service_agent_email: str = proto.Field( + proto.STRING, + number=70, + ) + + +class EncryptionKeyHistoryEntry(proto.Message): + r"""The history of the encryption keys used to encrypt the + Autonomous Database. + + Attributes: + encryption_key (google.cloud.oracledatabase_v1.types.EncryptionKey): + Output only. The encryption key used to + encrypt the Autonomous Database. + activation_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time when the + encryption key was activated on the Autonomous + Database.. + """ + + encryption_key: "EncryptionKey" = proto.Field( + proto.MESSAGE, + number=1, + message="EncryptionKey", + ) + activation_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + +class EncryptionKey(proto.Message): + r"""The encryption key used to encrypt the Autonomous Database. + + Attributes: + provider (google.cloud.oracledatabase_v1.types.EncryptionKey.Provider): + Optional. The provider of the encryption key. + kms_key (str): + Optional. The KMS key used to encrypt the Autonomous + Database. This field is required if the provider is + GOOGLE_MANAGED. The name of the KMS key resource in the + following format: + ``projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}``. + """ + + class Provider(proto.Enum): + r"""The provider of the encryption key. + + Values: + PROVIDER_UNSPECIFIED (0): + Default unspecified value. + GOOGLE_MANAGED (1): + Google Managed KMS key, if selected, please + provide the KMS key name. + ORACLE_MANAGED (2): + Oracle Managed. + """ + PROVIDER_UNSPECIFIED = 0 + GOOGLE_MANAGED = 1 + ORACLE_MANAGED = 2 + + provider: Provider = proto.Field( + proto.ENUM, + number=1, + enum=Provider, + ) + kms_key: str = proto.Field( + proto.STRING, + number=2, + ) class AutonomousDatabaseApex(proto.Message): diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/common.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/common.py index a914794bef95..02d089fa4f18 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/common.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/common.py @@ -22,11 +22,35 @@ __protobuf__ = proto.module( package="google.cloud.oracledatabase.v1", manifest={ + "ComputeModel", "CustomerContact", + "IdentityConnector", + "DataCollectionOptionsCommon", }, ) +class ComputeModel(proto.Enum): + r"""The compute model of the Exadata Infrastructure, VM Cluster + and Autonomous Database. + + Values: + COMPUTE_MODEL_UNSPECIFIED (0): + Unspecified compute model. + COMPUTE_MODEL_ECPU (1): + Abstract measure of compute resources. ECPUs + are based on the number of cores elastically + allocated from a pool of compute and storage + servers. + COMPUTE_MODEL_OCPU (2): + Physical measure of compute resources. OCPUs + are based on the physical core of a processor. + """ + COMPUTE_MODEL_UNSPECIFIED = 0 + COMPUTE_MODEL_ECPU = 1 + COMPUTE_MODEL_OCPU = 2 + + class CustomerContact(proto.Message): r"""The CustomerContact reference as defined by Oracle. https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/CustomerContact @@ -44,4 +68,84 @@ class CustomerContact(proto.Message): ) +class IdentityConnector(proto.Message): + r"""The identity connector details which will allow OCI to + securely access the resources in the customer project. + + Attributes: + service_agent_email (str): + Output only. A google managed service account on which + customers can grant roles to access resources in the + customer project. Example: + ``p176944527254-55-75119d87fd8f@gcp-sa-oci.iam.gserviceaccount.com`` + connection_state (google.cloud.oracledatabase_v1.types.IdentityConnector.ConnectionState): + Output only. The connection state of the + identity connector. + """ + + class ConnectionState(proto.Enum): + r"""The various connection states of the + WorkloadIdentityPoolConnection. + + Values: + CONNECTION_STATE_UNSPECIFIED (0): + Default unspecified value. + CONNECTED (1): + The identity pool connection is connected. + PARTIALLY_CONNECTED (2): + The identity pool connection is partially + connected. + DISCONNECTED (3): + The identity pool connection is disconnected. + UNKNOWN (4): + The identity pool connection is in an unknown + state. + """ + CONNECTION_STATE_UNSPECIFIED = 0 + CONNECTED = 1 + PARTIALLY_CONNECTED = 2 + DISCONNECTED = 3 + UNKNOWN = 4 + + service_agent_email: str = proto.Field( + proto.STRING, + number=1, + ) + connection_state: ConnectionState = proto.Field( + proto.ENUM, + number=2, + enum=ConnectionState, + ) + + +class DataCollectionOptionsCommon(proto.Message): + r"""Data collection options for diagnostics. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/DataCollectionOptions + + Attributes: + is_diagnostics_events_enabled (bool): + Optional. Indicates whether to enable data + collection for diagnostics. + is_health_monitoring_enabled (bool): + Optional. Indicates whether to enable health + monitoring. + is_incident_logs_enabled (bool): + Optional. Indicates whether to enable + incident logs and trace collection. + """ + + is_diagnostics_events_enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + is_health_monitoring_enabled: bool = proto.Field( + proto.BOOL, + number=2, + ) + is_incident_logs_enabled: bool = proto.Field( + proto.BOOL, + number=3, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/database.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/database.py new file mode 100644 index 000000000000..a392586587db --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/database.py @@ -0,0 +1,531 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import dayofweek_pb2 # type: ignore +import proto # type: ignore + +from google.cloud.oracledatabase_v1.types import pluggable_database + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "Database", + "DatabaseProperties", + "DbBackupConfig", + "GetDatabaseRequest", + "ListDatabasesRequest", + "ListDatabasesResponse", + }, +) + + +class Database(proto.Message): + r"""Details of the Database resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/Database/ + + Attributes: + name (str): + Identifier. The name of the Database resource + in the following format: + projects/{project}/locations/{region}/databases/{database} + db_name (str): + Optional. The database name. The name must + begin with an alphabetic character and can + contain a maximum of eight alphanumeric + characters. Special characters are not + permitted. + db_unique_name (str): + Optional. The DB_UNIQUE_NAME of the Oracle Database being + backed up. + admin_password (str): + Required. The password for the default ADMIN + user. + tde_wallet_password (str): + Optional. The TDE wallet password for the + database. + character_set (str): + Optional. The character set for the database. + The default is AL32UTF8. + ncharacter_set (str): + Optional. The national character set for the + database. The default is AL16UTF16. + oci_url (str): + Output only. HTTPS link to OCI resources + exposed to Customer via UI Interface. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time that the + Database was created. + properties (google.cloud.oracledatabase_v1.types.DatabaseProperties): + Optional. The properties of the Database. + database_id (str): + Optional. The database ID of the Database. + db_home_name (str): + Optional. The name of the DbHome resource + associated with the Database. + gcp_oracle_zone (str): + Output only. The GCP Oracle zone where the + Database is created. + ops_insights_status (google.cloud.oracledatabase_v1.types.Database.OperationsInsightsStatus): + Output only. The Status of Operations + Insights for this Database. + """ + + class OperationsInsightsStatus(proto.Enum): + r"""The Status of Operations Insights for this Database. + + Values: + OPERATIONS_INSIGHTS_STATUS_UNSPECIFIED (0): + Default unspecified value. + ENABLING (1): + Indicates that the operations insights are + being enabled. + ENABLED (2): + Indicates that the operations insights are + enabled. + DISABLING (3): + Indicates that the operations insights are + being disabled. + NOT_ENABLED (4): + Indicates that the operations insights are + not enabled. + FAILED_ENABLING (5): + Indicates that the operations insights failed + to enable. + FAILED_DISABLING (6): + Indicates that the operations insights failed + to disable. + """ + OPERATIONS_INSIGHTS_STATUS_UNSPECIFIED = 0 + ENABLING = 1 + ENABLED = 2 + DISABLING = 3 + NOT_ENABLED = 4 + FAILED_ENABLING = 5 + FAILED_DISABLING = 6 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + db_name: str = proto.Field( + proto.STRING, + number=2, + ) + db_unique_name: str = proto.Field( + proto.STRING, + number=3, + ) + admin_password: str = proto.Field( + proto.STRING, + number=4, + ) + tde_wallet_password: str = proto.Field( + proto.STRING, + number=5, + ) + character_set: str = proto.Field( + proto.STRING, + number=6, + ) + ncharacter_set: str = proto.Field( + proto.STRING, + number=7, + ) + oci_url: str = proto.Field( + proto.STRING, + number=8, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + properties: "DatabaseProperties" = proto.Field( + proto.MESSAGE, + number=10, + message="DatabaseProperties", + ) + database_id: str = proto.Field( + proto.STRING, + number=11, + ) + db_home_name: str = proto.Field( + proto.STRING, + number=12, + ) + gcp_oracle_zone: str = proto.Field( + proto.STRING, + number=13, + ) + ops_insights_status: OperationsInsightsStatus = proto.Field( + proto.ENUM, + number=14, + enum=OperationsInsightsStatus, + ) + + +class DatabaseProperties(proto.Message): + r"""The properties of a Database. + + Attributes: + state (google.cloud.oracledatabase_v1.types.DatabaseProperties.DatabaseLifecycleState): + Output only. State of the Database. + db_version (str): + Required. The Oracle Database version. + db_backup_config (google.cloud.oracledatabase_v1.types.DbBackupConfig): + Optional. Backup options for the Database. + database_management_config (google.cloud.oracledatabase_v1.types.DatabaseManagementConfig): + Output only. The Database Management config. + """ + + class DatabaseLifecycleState(proto.Enum): + r"""The various lifecycle states of the Database. + + Values: + DATABASE_LIFECYCLE_STATE_UNSPECIFIED (0): + Default unspecified value. + PROVISIONING (1): + Indicates that the resource is in + provisioning state. + AVAILABLE (2): + Indicates that the resource is in available + state. + UPDATING (3): + Indicates that the resource is in updating + state. + BACKUP_IN_PROGRESS (4): + Indicates that the resource is in backup in + progress state. + UPGRADING (5): + Indicates that the resource is in upgrading + state. + CONVERTING (6): + Indicates that the resource is in converting + state. + TERMINATING (7): + Indicates that the resource is in terminating + state. + TERMINATED (8): + Indicates that the resource is in terminated + state. + RESTORE_FAILED (9): + Indicates that the resource is in restore + failed state. + FAILED (10): + Indicates that the resource is in failed + state. + """ + DATABASE_LIFECYCLE_STATE_UNSPECIFIED = 0 + PROVISIONING = 1 + AVAILABLE = 2 + UPDATING = 3 + BACKUP_IN_PROGRESS = 4 + UPGRADING = 5 + CONVERTING = 6 + TERMINATING = 7 + TERMINATED = 8 + RESTORE_FAILED = 9 + FAILED = 10 + + state: DatabaseLifecycleState = proto.Field( + proto.ENUM, + number=1, + enum=DatabaseLifecycleState, + ) + db_version: str = proto.Field( + proto.STRING, + number=2, + ) + db_backup_config: "DbBackupConfig" = proto.Field( + proto.MESSAGE, + number=3, + message="DbBackupConfig", + ) + database_management_config: pluggable_database.DatabaseManagementConfig = ( + proto.Field( + proto.MESSAGE, + number=4, + message=pluggable_database.DatabaseManagementConfig, + ) + ) + + +class DbBackupConfig(proto.Message): + r"""Backup Options for the Database. + + Attributes: + auto_backup_enabled (bool): + Optional. If set to true, enables automatic + backups on the database. + backup_destination_details (MutableSequence[google.cloud.oracledatabase_v1.types.DbBackupConfig.BackupDestinationDetails]): + Optional. Details of the database backup + destinations. + retention_period_days (int): + Optional. The number of days an automatic + backup is retained before being automatically + deleted. This value determines the earliest + point in time to which a database can be + restored. Min: 1, Max: 60. + backup_deletion_policy (google.cloud.oracledatabase_v1.types.DbBackupConfig.BackupDeletionPolicy): + Optional. This defines when the backups will + be deleted after Database termination. + auto_full_backup_day (google.type.dayofweek_pb2.DayOfWeek): + Optional. The day of the week on which the + full backup should be performed on the database. + If no value is provided, it will default to + Sunday. + auto_full_backup_window (google.cloud.oracledatabase_v1.types.DbBackupConfig.BackupWindow): + Optional. The window in which the full backup + should be performed on the database. If no value + is provided, the default is anytime. + auto_incremental_backup_window (google.cloud.oracledatabase_v1.types.DbBackupConfig.BackupWindow): + Optional. The window in which the incremental + backup should be performed on the database. If + no value is provided, the default is anytime + except the auto full backup day. + """ + + class BackupDestinationType(proto.Enum): + r"""The type of the database backup destination. + + Values: + BACKUP_DESTINATION_TYPE_UNSPECIFIED (0): + Default unspecified value. + NFS (1): + Backup destination type is NFS. + RECOVERY_APPLIANCE (2): + Backup destination type is Recovery + Appliance. + OBJECT_STORE (3): + Backup destination type is Object Store. + LOCAL (4): + Backup destination type is Local. + DBRS (5): + Backup destination type is DBRS. + """ + BACKUP_DESTINATION_TYPE_UNSPECIFIED = 0 + NFS = 1 + RECOVERY_APPLIANCE = 2 + OBJECT_STORE = 3 + LOCAL = 4 + DBRS = 5 + + class BackupWindow(proto.Enum): + r"""The 2 hour window in which the backup should be performed on + the database. + + Values: + BACKUP_WINDOW_UNSPECIFIED (0): + Default unspecified value. + SLOT_ONE (1): + 12:00 AM - 2:00 AM + SLOT_TWO (2): + 2:00 AM - 4:00 AM + SLOT_THREE (3): + 4:00 AM - 6:00 AM + SLOT_FOUR (4): + 6:00 AM - 8:00 AM + SLOT_FIVE (5): + 8:00 AM - 10:00 AM + SLOT_SIX (6): + 10:00 AM - 12:00 PM + SLOT_SEVEN (7): + 12:00 PM - 2:00 PM + SLOT_EIGHT (8): + 2:00 PM - 4:00 PM + SLOT_NINE (9): + 4:00 PM - 6:00 PM + SLOT_TEN (10): + 6:00 PM - 8:00 PM + SLOT_ELEVEN (11): + 8:00 PM - 10:00 PM + SLOT_TWELVE (12): + 10:00 PM - 12:00 AM + """ + BACKUP_WINDOW_UNSPECIFIED = 0 + SLOT_ONE = 1 + SLOT_TWO = 2 + SLOT_THREE = 3 + SLOT_FOUR = 4 + SLOT_FIVE = 5 + SLOT_SIX = 6 + SLOT_SEVEN = 7 + SLOT_EIGHT = 8 + SLOT_NINE = 9 + SLOT_TEN = 10 + SLOT_ELEVEN = 11 + SLOT_TWELVE = 12 + + class BackupDeletionPolicy(proto.Enum): + r"""This defines when the backups will be deleted after Database + termination. + + Values: + BACKUP_DELETION_POLICY_UNSPECIFIED (0): + Default unspecified value. + DELETE_IMMEDIATELY (1): + Keeps the backup for predefined time + i.e. 72 hours and then delete permanently. + DELETE_AFTER_RETENTION_PERIOD (2): + Keeps the backups as per the policy defined + for database backups. + """ + BACKUP_DELETION_POLICY_UNSPECIFIED = 0 + DELETE_IMMEDIATELY = 1 + DELETE_AFTER_RETENTION_PERIOD = 2 + + class BackupDestinationDetails(proto.Message): + r"""The details of the database backup destination. + + Attributes: + type_ (google.cloud.oracledatabase_v1.types.DbBackupConfig.BackupDestinationType): + Optional. The type of the database backup + destination. + """ + + type_: "DbBackupConfig.BackupDestinationType" = proto.Field( + proto.ENUM, + number=1, + enum="DbBackupConfig.BackupDestinationType", + ) + + auto_backup_enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + backup_destination_details: MutableSequence[ + BackupDestinationDetails + ] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=BackupDestinationDetails, + ) + retention_period_days: int = proto.Field( + proto.INT32, + number=3, + ) + backup_deletion_policy: BackupDeletionPolicy = proto.Field( + proto.ENUM, + number=4, + enum=BackupDeletionPolicy, + ) + auto_full_backup_day: dayofweek_pb2.DayOfWeek = proto.Field( + proto.ENUM, + number=5, + enum=dayofweek_pb2.DayOfWeek, + ) + auto_full_backup_window: BackupWindow = proto.Field( + proto.ENUM, + number=6, + enum=BackupWindow, + ) + auto_incremental_backup_window: BackupWindow = proto.Field( + proto.ENUM, + number=7, + enum=BackupWindow, + ) + + +class GetDatabaseRequest(proto.Message): + r"""The request for ``Database.Get``. + + Attributes: + name (str): + Required. The name of the Database resource + in the following format: + projects/{project}/locations/{region}/databases/{database} + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListDatabasesRequest(proto.Message): + r"""The request for ``Database.List``. + + Attributes: + parent (str): + Required. The parent resource name in the + following format: + projects/{project}/locations/{region} + page_size (int): + Optional. The maximum number of items to + return. If unspecified, a maximum of 50 + Databases will be returned. The maximum value is + 1000; values above 1000 will be reset to 1000. + page_token (str): + Optional. A token identifying the requested + page of results to return. All fields except the + filter should remain the same as in the request + that provided this page token. + filter (str): + Optional. An expression for filtering the results of the + request. list for container databases is supported only with + a valid dbSystem (full resource name) filter in this format: + ``dbSystem="projects/{project}/locations/{location}/dbSystems/{dbSystemId}"`` + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + + +class ListDatabasesResponse(proto.Message): + r"""The response for ``Database.List``. + + Attributes: + databases (MutableSequence[google.cloud.oracledatabase_v1.types.Database]): + The list of Databases. + next_page_token (str): + A token identifying a page of results the + server should return. + """ + + @property + def raw_page(self): + return self + + databases: MutableSequence["Database"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Database", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/database_character_set.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/database_character_set.py new file mode 100644 index 000000000000..a56e414b718b --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/database_character_set.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "DatabaseCharacterSet", + "ListDatabaseCharacterSetsRequest", + "ListDatabaseCharacterSetsResponse", + }, +) + + +class DatabaseCharacterSet(proto.Message): + r"""Details of the Database character set resource. + + Attributes: + name (str): + Identifier. The name of the Database Character Set resource + in the following format: + projects/{project}/locations/{region}/databaseCharacterSets/{database_character_set} + character_set_type (google.cloud.oracledatabase_v1.types.DatabaseCharacterSet.CharacterSetType): + Output only. The character set type for the + Database. + character_set (str): + Output only. The character set name for the + Database which is the ID in the resource name. + """ + + class CharacterSetType(proto.Enum): + r"""The type of character set a Database can have. + + Values: + CHARACTER_SET_TYPE_UNSPECIFIED (0): + Character set type is not specified. + DATABASE (1): + Character set type is set to database. + NATIONAL (2): + Character set type is set to national. + """ + CHARACTER_SET_TYPE_UNSPECIFIED = 0 + DATABASE = 1 + NATIONAL = 2 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + character_set_type: CharacterSetType = proto.Field( + proto.ENUM, + number=2, + enum=CharacterSetType, + ) + character_set: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListDatabaseCharacterSetsRequest(proto.Message): + r"""The request for ``DatabaseCharacterSet.List``. + + Attributes: + parent (str): + Required. The parent value for + DatabaseCharacterSets in the following format: + projects/{project}/locations/{location}. + page_size (int): + Optional. The maximum number of + DatabaseCharacterSets to return. The service may + return fewer than this value. If unspecified, at + most 50 DatabaseCharacterSets will be returned. + The maximum value is 1000; values above 1000 + will be coerced to 1000. + page_token (str): + Optional. A page token, received from a previous + ``ListDatabaseCharacterSets`` call. Provide this to retrieve + the subsequent page. + + When paginating, all other parameters provided to + ``ListDatabaseCharacterSets`` must match the call that + provided the page token. + filter (str): + Optional. An expression for filtering the results of the + request. Only the **character_set_type** field is supported + in the following format: + ``character_set_type="{characterSetType}"``. Accepted values + include ``DATABASE`` and ``NATIONAL``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + + +class ListDatabaseCharacterSetsResponse(proto.Message): + r"""The response for ``DatabaseCharacterSet.List``. + + Attributes: + database_character_sets (MutableSequence[google.cloud.oracledatabase_v1.types.DatabaseCharacterSet]): + The list of DatabaseCharacterSets. + next_page_token (str): + A token identifying a page of results the + server should return. + """ + + @property + def raw_page(self): + return self + + database_character_sets: MutableSequence[ + "DatabaseCharacterSet" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="DatabaseCharacterSet", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_node.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_node.py index 113bad978a5c..e8a0dbffb594 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_node.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_node.py @@ -17,6 +17,7 @@ from typing import MutableMapping, MutableSequence +from google.protobuf import timestamp_pb2 # type: ignore import proto # type: ignore __protobuf__ = proto.module( @@ -73,6 +74,9 @@ class DbNodeProperties(proto.Message): Output only. State of the database node. total_cpu_core_count (int): Total CPU core count of the database node. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time that the + database node was created. """ class State(proto.Enum): @@ -153,6 +157,11 @@ class State(proto.Enum): proto.INT32, number=10, ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_system.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_system.py new file mode 100644 index 000000000000..110d3842bb27 --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_system.py @@ -0,0 +1,645 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import datetime_pb2 # type: ignore +import proto # type: ignore + +from google.cloud.oracledatabase_v1.types import database as gco_database + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "DbSystem", + "DbSystemProperties", + "DataCollectionOptionsDbSystem", + "DbSystemOptions", + "DbHome", + "CreateDbSystemRequest", + "DeleteDbSystemRequest", + "GetDbSystemRequest", + "ListDbSystemsRequest", + "ListDbSystemsResponse", + }, +) + + +class DbSystem(proto.Message): + r"""Details of the DbSystem (BaseDB) resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/DbSystem/ + + Attributes: + name (str): + Identifier. The name of the DbSystem resource in the + following format: + projects/{project}/locations/{region}/dbSystems/{db_system} + properties (google.cloud.oracledatabase_v1.types.DbSystemProperties): + Optional. The properties of the DbSystem. + gcp_oracle_zone (str): + Optional. The GCP Oracle zone where Oracle + DbSystem is hosted. Example: us-east4-b-r2. + If not specified, the system will pick a zone + based on availability. + labels (MutableMapping[str, str]): + Optional. The labels or tags associated with + the DbSystem. + odb_network (str): + Optional. The name of the OdbNetwork associated with the + DbSystem. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network} + It is optional but if specified, this should match the + parent ODBNetwork of the OdbSubnet. + odb_subnet (str): + Required. The name of the OdbSubnet associated with the + DbSystem for IP allocation. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} + entitlement_id (str): + Output only. The ID of the subscription + entitlement associated with the DbSystem + display_name (str): + Required. The display name for the System db. + The name does not have to be unique within your + project. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time that the + DbSystem was created. + oci_url (str): + Output only. HTTPS link to OCI resources + exposed to Customer via UI Interface. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + properties: "DbSystemProperties" = proto.Field( + proto.MESSAGE, + number=2, + message="DbSystemProperties", + ) + gcp_oracle_zone: str = proto.Field( + proto.STRING, + number=3, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + odb_network: str = proto.Field( + proto.STRING, + number=5, + ) + odb_subnet: str = proto.Field( + proto.STRING, + number=6, + ) + entitlement_id: str = proto.Field( + proto.STRING, + number=7, + ) + display_name: str = proto.Field( + proto.STRING, + number=8, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + oci_url: str = proto.Field( + proto.STRING, + number=10, + ) + + +class DbSystemProperties(proto.Message): + r"""The properties of a DbSystem. + + Attributes: + shape (str): + Required. Shape of DB System. + compute_count (int): + Required. The number of CPU cores to enable + for the DbSystem. + initial_data_storage_size_gb (int): + Required. The initial data storage size in + GB. + database_edition (google.cloud.oracledatabase_v1.types.DbSystemProperties.DbSystemDatabaseEdition): + Required. The database edition of the + DbSystem. + license_model (google.cloud.oracledatabase_v1.types.DbSystemProperties.LicenseModel): + Required. The license model of the DbSystem. + ssh_public_keys (MutableSequence[str]): + Required. SSH public keys to be stored with + the DbSystem. + hostname_prefix (str): + Optional. Prefix for DB System host names. + hostname (str): + Output only. The hostname of the DbSystem. + private_ip (str): + Optional. The private IP address of the + DbSystem. + data_collection_options (google.cloud.oracledatabase_v1.types.DataCollectionOptionsDbSystem): + Optional. Data collection options for + diagnostics. + time_zone (google.type.datetime_pb2.TimeZone): + Optional. Time zone of the DbSystem. + lifecycle_state (google.cloud.oracledatabase_v1.types.DbSystemProperties.DbSystemLifecycleState): + Output only. State of the DbSystem. + db_home (google.cloud.oracledatabase_v1.types.DbHome): + Optional. Details for creating a Database + Home. + ocid (str): + Output only. OCID of the DbSystem. + memory_size_gb (int): + Optional. The memory size in GB. + compute_model (google.cloud.oracledatabase_v1.types.DbSystemProperties.ComputeModel): + Optional. The compute model of the DbSystem. + data_storage_size_gb (int): + Optional. The data storage size in GB that is + currently available to DbSystems. + reco_storage_size_gb (int): + Optional. The reco/redo storage size in GB. + domain (str): + Optional. The host domain name of the + DbSystem. + node_count (int): + Optional. The number of nodes in the + DbSystem. + db_system_options (google.cloud.oracledatabase_v1.types.DbSystemOptions): + Optional. The options for the DbSystem. + """ + + class DbSystemDatabaseEdition(proto.Enum): + r"""The editions available for DbSystem. + + Values: + DB_SYSTEM_DATABASE_EDITION_UNSPECIFIED (0): + The database edition is unspecified. + STANDARD_EDITION (1): + The database edition is Standard. + ENTERPRISE_EDITION (2): + The database edition is Enterprise. + ENTERPRISE_EDITION_HIGH_PERFORMANCE (3): + The database edition is Enterprise Edition. + """ + DB_SYSTEM_DATABASE_EDITION_UNSPECIFIED = 0 + STANDARD_EDITION = 1 + ENTERPRISE_EDITION = 2 + ENTERPRISE_EDITION_HIGH_PERFORMANCE = 3 + + class LicenseModel(proto.Enum): + r"""The license model of the DbSystem. + + Values: + LICENSE_MODEL_UNSPECIFIED (0): + The license model is unspecified. + LICENSE_INCLUDED (1): + The license model is included. + BRING_YOUR_OWN_LICENSE (2): + The license model is bring your own license. + """ + LICENSE_MODEL_UNSPECIFIED = 0 + LICENSE_INCLUDED = 1 + BRING_YOUR_OWN_LICENSE = 2 + + class DbSystemLifecycleState(proto.Enum): + r"""The various lifecycle states of the DbSystem. + + Values: + DB_SYSTEM_LIFECYCLE_STATE_UNSPECIFIED (0): + Default unspecified value. + PROVISIONING (1): + Indicates that the resource is in + provisioning state. + AVAILABLE (2): + Indicates that the resource is in available + state. + UPDATING (3): + Indicates that the resource is in updating + state. + TERMINATING (4): + Indicates that the resource is in terminating + state. + TERMINATED (5): + Indicates that the resource is in terminated + state. + FAILED (6): + Indicates that the resource is in failed + state. + MIGRATED (7): + Indicates that the resource has been + migrated. + MAINTENANCE_IN_PROGRESS (8): + Indicates that the resource is in maintenance + in progress state. + NEEDS_ATTENTION (9): + Indicates that the resource needs attention. + UPGRADING (10): + Indicates that the resource is upgrading. + """ + DB_SYSTEM_LIFECYCLE_STATE_UNSPECIFIED = 0 + PROVISIONING = 1 + AVAILABLE = 2 + UPDATING = 3 + TERMINATING = 4 + TERMINATED = 5 + FAILED = 6 + MIGRATED = 7 + MAINTENANCE_IN_PROGRESS = 8 + NEEDS_ATTENTION = 9 + UPGRADING = 10 + + class ComputeModel(proto.Enum): + r"""The compute model of the DbSystem. + + Values: + COMPUTE_MODEL_UNSPECIFIED (0): + The compute model is unspecified. + ECPU (1): + The compute model is virtual. + OCPU (2): + The compute model is physical. + """ + COMPUTE_MODEL_UNSPECIFIED = 0 + ECPU = 1 + OCPU = 2 + + shape: str = proto.Field( + proto.STRING, + number=1, + ) + compute_count: int = proto.Field( + proto.INT32, + number=2, + ) + initial_data_storage_size_gb: int = proto.Field( + proto.INT32, + number=3, + ) + database_edition: DbSystemDatabaseEdition = proto.Field( + proto.ENUM, + number=4, + enum=DbSystemDatabaseEdition, + ) + license_model: LicenseModel = proto.Field( + proto.ENUM, + number=5, + enum=LicenseModel, + ) + ssh_public_keys: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=6, + ) + hostname_prefix: str = proto.Field( + proto.STRING, + number=7, + ) + hostname: str = proto.Field( + proto.STRING, + number=8, + ) + private_ip: str = proto.Field( + proto.STRING, + number=9, + ) + data_collection_options: "DataCollectionOptionsDbSystem" = proto.Field( + proto.MESSAGE, + number=10, + message="DataCollectionOptionsDbSystem", + ) + time_zone: datetime_pb2.TimeZone = proto.Field( + proto.MESSAGE, + number=11, + message=datetime_pb2.TimeZone, + ) + lifecycle_state: DbSystemLifecycleState = proto.Field( + proto.ENUM, + number=12, + enum=DbSystemLifecycleState, + ) + db_home: "DbHome" = proto.Field( + proto.MESSAGE, + number=13, + message="DbHome", + ) + ocid: str = proto.Field( + proto.STRING, + number=14, + ) + memory_size_gb: int = proto.Field( + proto.INT32, + number=15, + ) + compute_model: ComputeModel = proto.Field( + proto.ENUM, + number=16, + enum=ComputeModel, + ) + data_storage_size_gb: int = proto.Field( + proto.INT32, + number=17, + ) + reco_storage_size_gb: int = proto.Field( + proto.INT32, + number=18, + ) + domain: str = proto.Field( + proto.STRING, + number=19, + ) + node_count: int = proto.Field( + proto.INT32, + number=20, + ) + db_system_options: "DbSystemOptions" = proto.Field( + proto.MESSAGE, + number=21, + message="DbSystemOptions", + ) + + +class DataCollectionOptionsDbSystem(proto.Message): + r"""Data collection options for DbSystem. + + Attributes: + is_diagnostics_events_enabled (bool): + Optional. Indicates whether to enable data + collection for diagnostics. + is_incident_logs_enabled (bool): + Optional. Indicates whether to enable + incident logs and trace collection. + """ + + is_diagnostics_events_enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + is_incident_logs_enabled: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +class DbSystemOptions(proto.Message): + r"""Details of the DbSystem Options. + + Attributes: + storage_management (google.cloud.oracledatabase_v1.types.DbSystemOptions.StorageManagement): + Optional. The storage option used in DB + system. + """ + + class StorageManagement(proto.Enum): + r"""The storage option used in DB system. + + Values: + STORAGE_MANAGEMENT_UNSPECIFIED (0): + The storage management is unspecified. + ASM (1): + Automatic storage management. + LVM (2): + Logical Volume management. + """ + STORAGE_MANAGEMENT_UNSPECIFIED = 0 + ASM = 1 + LVM = 2 + + storage_management: StorageManagement = proto.Field( + proto.ENUM, + number=1, + enum=StorageManagement, + ) + + +class DbHome(proto.Message): + r"""Details of the Database Home resource. + + Attributes: + display_name (str): + Optional. The display name for the Database + Home. The name does not have to be unique within + your project. + db_version (str): + Required. A valid Oracle Database version. + For a list of supported versions, use the + ListDbVersions operation. + database (google.cloud.oracledatabase_v1.types.Database): + Required. The Database resource. + is_unified_auditing_enabled (bool): + Optional. Whether unified auditing is enabled + for the Database Home. + """ + + display_name: str = proto.Field( + proto.STRING, + number=1, + ) + db_version: str = proto.Field( + proto.STRING, + number=2, + ) + database: gco_database.Database = proto.Field( + proto.MESSAGE, + number=3, + message=gco_database.Database, + ) + is_unified_auditing_enabled: bool = proto.Field( + proto.BOOL, + number=4, + ) + + +class CreateDbSystemRequest(proto.Message): + r"""The request for ``DbSystem.Create``. + + Attributes: + parent (str): + Required. The value for parent of the + DbSystem in the following format: + projects/{project}/locations/{location}. + db_system_id (str): + Required. The ID of the DbSystem to create. This value is + restricted to (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and + must be a maximum of 63 characters in length. The value must + start with a letter and end with a letter or a number. + db_system (google.cloud.oracledatabase_v1.types.DbSystem): + Required. The resource being created. + request_id (str): + Optional. An optional request ID to identify + requests. Specify a unique request ID so that if + you must retry your request, the server will + know to ignore the request if it has already + been completed. The server will guarantee that + for at least 60 minutes since the first request. + + For example, consider a situation where you make + an initial request and the request times out. If + you make the request again with the same request + ID, the server can check if original operation + with the same request ID was received, and if + so, will ignore the second request. This + prevents clients from accidentally creating + duplicate commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + db_system_id: str = proto.Field( + proto.STRING, + number=2, + ) + db_system: "DbSystem" = proto.Field( + proto.MESSAGE, + number=3, + message="DbSystem", + ) + request_id: str = proto.Field( + proto.STRING, + number=4, + ) + + +class DeleteDbSystemRequest(proto.Message): + r"""The request for ``DbSystem.Delete``. + + Attributes: + name (str): + Required. The name of the DbSystem in the following format: + projects/{project}/locations/{location}/dbSystems/{db_system}. + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + request_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetDbSystemRequest(proto.Message): + r"""The request for ``DbSystem.Get``. + + Attributes: + name (str): + Required. The name of the DbSystem in the following format: + projects/{project}/locations/{location}/dbSystems/{db_system}. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListDbSystemsRequest(proto.Message): + r"""The request for ``DbSystem.List``. + + Attributes: + parent (str): + Required. The parent value for DbSystems in + the following format: + projects/{project}/locations/{location}. + page_size (int): + Optional. The maximum number of items to + return. If unspecified, at most 50 DbSystems + will be returned. The maximum value is 1000; + values above 1000 will be coerced to 1000. + page_token (str): + Optional. A token identifying a page of + results the server should return. + filter (str): + Optional. An expression for filtering the + results of the request. + order_by (str): + Optional. An expression for ordering the + results of the request. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListDbSystemsResponse(proto.Message): + r"""The response for ``DbSystem.List``. + + Attributes: + db_systems (MutableSequence[google.cloud.oracledatabase_v1.types.DbSystem]): + The list of DbSystems. + next_page_token (str): + A token identifying a page of results the + server should return. + """ + + @property + def raw_page(self): + return self + + db_systems: MutableSequence["DbSystem"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="DbSystem", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_system_initial_storage_size.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_system_initial_storage_size.py new file mode 100644 index 000000000000..4af1a31a0dab --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_system_initial_storage_size.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "DbSystemInitialStorageSize", + "DbSystemInitialStorageSizeProperties", + "StorageSizeDetails", + "ListDbSystemInitialStorageSizesRequest", + "ListDbSystemInitialStorageSizesResponse", + }, +) + + +class DbSystemInitialStorageSize(proto.Message): + r"""Summary of the DbSystem initial storage size. + + Attributes: + name (str): + Output only. The name of the resource. + properties (google.cloud.oracledatabase_v1.types.DbSystemInitialStorageSizeProperties): + Output only. The properties of the DbSystem + initial storage size summary. + """ + + name: str = proto.Field( + proto.STRING, + number=2, + ) + properties: "DbSystemInitialStorageSizeProperties" = proto.Field( + proto.MESSAGE, + number=3, + message="DbSystemInitialStorageSizeProperties", + ) + + +class DbSystemInitialStorageSizeProperties(proto.Message): + r"""The properties of a DbSystem initial storage size summary. + + Attributes: + storage_management (google.cloud.oracledatabase_v1.types.DbSystemInitialStorageSizeProperties.StorageManagement): + Output only. The storage option used in DB + system. + shape_type (google.cloud.oracledatabase_v1.types.DbSystemInitialStorageSizeProperties.ShapeType): + Output only. VM shape platform type + storage_size_details (MutableSequence[google.cloud.oracledatabase_v1.types.StorageSizeDetails]): + Output only. List of storage disk details. + launch_from_backup_storage_size_details (MutableSequence[google.cloud.oracledatabase_v1.types.StorageSizeDetails]): + Output only. List of storage disk details + available for launches from backup. + """ + + class StorageManagement(proto.Enum): + r"""The storage option used in the DB system. + + Values: + STORAGE_MANAGEMENT_UNSPECIFIED (0): + Unspecified storage management. + ASM (1): + Automatic Storage Management. + LVM (2): + Logical Volume Management. + """ + STORAGE_MANAGEMENT_UNSPECIFIED = 0 + ASM = 1 + LVM = 2 + + class ShapeType(proto.Enum): + r"""The shape type of the DB system. + + Values: + SHAPE_TYPE_UNSPECIFIED (0): + Unspecified shape type. + STANDARD_X86 (1): + Standard X86. + """ + SHAPE_TYPE_UNSPECIFIED = 0 + STANDARD_X86 = 1 + + storage_management: StorageManagement = proto.Field( + proto.ENUM, + number=1, + enum=StorageManagement, + ) + shape_type: ShapeType = proto.Field( + proto.ENUM, + number=2, + enum=ShapeType, + ) + storage_size_details: MutableSequence["StorageSizeDetails"] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="StorageSizeDetails", + ) + launch_from_backup_storage_size_details: MutableSequence[ + "StorageSizeDetails" + ] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message="StorageSizeDetails", + ) + + +class StorageSizeDetails(proto.Message): + r"""The initial storage size, in gigabytes, that is applicable + for virtual machine DBSystem. + + Attributes: + data_storage_size_in_gbs (int): + Output only. The data storage size, in + gigabytes, that is applicable for virtual + machine DBSystem. + reco_storage_size_in_gbs (int): + Output only. The RECO/REDO storage size, in + gigabytes, that is applicable for virtual + machine DBSystem. + """ + + data_storage_size_in_gbs: int = proto.Field( + proto.INT32, + number=1, + ) + reco_storage_size_in_gbs: int = proto.Field( + proto.INT32, + number=2, + ) + + +class ListDbSystemInitialStorageSizesRequest(proto.Message): + r"""The request for ``DbSystemInitialStorageSizes.List``. + + Attributes: + parent (str): + Required. The parent value for the + DbSystemInitialStorageSize resource with the + format: projects/{project}/locations/{location} + page_size (int): + Optional. The maximum number of items to + return. If unspecified, a maximum of 50 + DbSystemInitialStorageSizes will be returned. + The maximum value is 1000; values above 1000 + will be reset to 1000. + page_token (str): + Optional. A token identifying the requested + page of results to return. All fields except the + filter should remain the same as in the request + that provided this page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListDbSystemInitialStorageSizesResponse(proto.Message): + r"""The response for ``DbSystemInitialStorageSizes.List``. + + Attributes: + db_system_initial_storage_sizes (MutableSequence[google.cloud.oracledatabase_v1.types.DbSystemInitialStorageSize]): + The list of DbSystemInitialStorageSizes. + next_page_token (str): + A token identifying a page of results the + server should return. + """ + + @property + def raw_page(self): + return self + + db_system_initial_storage_sizes: MutableSequence[ + "DbSystemInitialStorageSize" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="DbSystemInitialStorageSize", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_version.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_version.py new file mode 100644 index 000000000000..49eafea76899 --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/db_version.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "DbVersion", + "DbVersionProperties", + "ListDbVersionsRequest", + "ListDbVersionsResponse", + }, +) + + +class DbVersion(proto.Message): + r"""A valid Oracle Database version. + + Attributes: + name (str): + Output only. The name of the DbVersion resource in the + following format: + projects/{project}/locations/{region}/dbVersions/{db_version} + properties (google.cloud.oracledatabase_v1.types.DbVersionProperties): + Output only. The properties of the DbVersion. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + properties: "DbVersionProperties" = proto.Field( + proto.MESSAGE, + number=2, + message="DbVersionProperties", + ) + + +class DbVersionProperties(proto.Message): + r"""The properties of a DbVersion. + + Attributes: + version (str): + Output only. A valid Oracle Database version. + is_latest_for_major_version (bool): + Output only. True if this version of the + Oracle Database software is the latest version + for a release. + supports_pdb (bool): + Output only. True if this version of the + Oracle Database software supports pluggable + databases. + is_preview_db_version (bool): + Output only. True if this version of the + Oracle Database software is the preview version. + is_upgrade_supported (bool): + Output only. True if this version of the + Oracle Database software is supported for + Upgrade. + """ + + version: str = proto.Field( + proto.STRING, + number=1, + ) + is_latest_for_major_version: bool = proto.Field( + proto.BOOL, + number=2, + ) + supports_pdb: bool = proto.Field( + proto.BOOL, + number=3, + ) + is_preview_db_version: bool = proto.Field( + proto.BOOL, + number=4, + ) + is_upgrade_supported: bool = proto.Field( + proto.BOOL, + number=5, + ) + + +class ListDbVersionsRequest(proto.Message): + r"""The request for ``DbVersions.List``. + + Attributes: + parent (str): + Required. The parent value for the DbVersion + resource with the format: + projects/{project}/locations/{location} + page_size (int): + Optional. The maximum number of items to + return. If unspecified, a maximum of 50 + DbVersions will be returned. The maximum value + is 1000; values above 1000 will be reset to + 1000. + page_token (str): + Optional. A token identifying the requested + page of results to return. All fields except the + filter should remain the same as in the request + that provided this page token. + filter (str): + Optional. Filter expression that matches a subset of the + DbVersions to show. The supported filter for dbSystem + creation is + ``db_system_shape = {db_system_shape} AND storage_management = {storage_management}``. + If no filter is provided, all DbVersions will be returned. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + + +class ListDbVersionsResponse(proto.Message): + r"""The response for ``DbVersions.List``. + + Attributes: + db_versions (MutableSequence[google.cloud.oracledatabase_v1.types.DbVersion]): + The list of DbVersions. + next_page_token (str): + A token identifying a page of results the + server should return. + """ + + @property + def raw_page(self): + return self + + db_versions: MutableSequence["DbVersion"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="DbVersion", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exadata_infra.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exadata_infra.py index bcf195de74b2..1c7e1d1dff01 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exadata_infra.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exadata_infra.py @@ -47,8 +47,10 @@ class CloudExadataInfrastructure(proto.Message): Optional. User friendly name for this resource. gcp_oracle_zone (str): - Optional. Google Cloud Platform location - where Oracle Exadata is hosted. + Optional. The GCP Oracle zone where Oracle + Exadata Infrastructure is hosted. Example: + us-east4-b-r2. If not specified, the system will + pick a zone based on availability. entitlement_id (str): Output only. Entitlement ID of the private offer against which this infrastructure resource @@ -187,6 +189,15 @@ class CloudExadataInfrastructureProperties(proto.Message): Output only. The monthly software version of the database servers (dom0) in the Exadata Infrastructure. Example: 20.1.15 + compute_model (google.cloud.oracledatabase_v1.types.ComputeModel): + Output only. The compute model of the Exadata + Infrastructure. + database_server_type (str): + Output only. The database server type of the + Exadata Infrastructure. + storage_server_type (str): + Output only. The storage server type of the + Exadata Infrastructure. """ class State(proto.Enum): @@ -336,6 +347,19 @@ class State(proto.Enum): proto.STRING, number=27, ) + compute_model: common.ComputeModel = proto.Field( + proto.ENUM, + number=31, + enum=common.ComputeModel, + ) + database_server_type: str = proto.Field( + proto.STRING, + number=29, + ) + storage_server_type: str = proto.Field( + proto.STRING, + number=30, + ) class MaintenanceWindow(proto.Message): diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exadb_vm_cluster.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exadb_vm_cluster.py new file mode 100644 index 000000000000..b76f7238ffce --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exadb_vm_cluster.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import datetime_pb2 # type: ignore +import proto # type: ignore + +from google.cloud.oracledatabase_v1.types import common + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "ExadbVmCluster", + "ExadbVmClusterStorageDetails", + "ExadbVmClusterProperties", + }, +) + + +class ExadbVmCluster(proto.Message): + r"""ExadbVmCluster represents a cluster of VMs that are used to + run Exadata workloads. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExadbVmCluster/ + + Attributes: + name (str): + Identifier. The name of the ExadbVmCluster resource in the + following format: + projects/{project}/locations/{region}/exadbVmClusters/{exadb_vm_cluster} + properties (google.cloud.oracledatabase_v1.types.ExadbVmClusterProperties): + Required. The properties of the + ExadbVmCluster. + gcp_oracle_zone (str): + Output only. Immutable. The GCP Oracle zone + where Oracle ExadbVmCluster is hosted. Example: + us-east4-b-r2. During creation, the system will + pick the zone assigned to the + ExascaleDbStorageVault. + labels (MutableMapping[str, str]): + Optional. The labels or tags associated with + the ExadbVmCluster. + odb_network (str): + Optional. Immutable. The name of the OdbNetwork associated + with the ExadbVmCluster. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network} + It is optional but if specified, this should match the + parent ODBNetwork of the OdbSubnet. + odb_subnet (str): + Required. Immutable. The name of the OdbSubnet associated + with the ExadbVmCluster for IP allocation. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} + backup_odb_subnet (str): + Required. Immutable. The name of the backup OdbSubnet + associated with the ExadbVmCluster. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} + display_name (str): + Required. Immutable. The display name for the + ExadbVmCluster. The name does not have to be + unique within your project. The name must be + 1-255 characters long and can only contain + alphanumeric characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time that the + ExadbVmCluster was created. + entitlement_id (str): + Output only. The ID of the subscription + entitlement associated with the ExadbVmCluster. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + properties: "ExadbVmClusterProperties" = proto.Field( + proto.MESSAGE, + number=2, + message="ExadbVmClusterProperties", + ) + gcp_oracle_zone: str = proto.Field( + proto.STRING, + number=3, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + odb_network: str = proto.Field( + proto.STRING, + number=6, + ) + odb_subnet: str = proto.Field( + proto.STRING, + number=7, + ) + backup_odb_subnet: str = proto.Field( + proto.STRING, + number=8, + ) + display_name: str = proto.Field( + proto.STRING, + number=9, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + entitlement_id: str = proto.Field( + proto.STRING, + number=11, + ) + + +class ExadbVmClusterStorageDetails(proto.Message): + r"""The storage allocation for the exadbvmcluster, in gigabytes + (GB). + + Attributes: + size_in_gbs_per_node (int): + Required. The storage allocation for the + exadbvmcluster per node, in gigabytes (GB). This + field is used to calculate the total storage + allocation for the exadbvmcluster. + """ + + size_in_gbs_per_node: int = proto.Field( + proto.INT32, + number=2, + ) + + +class ExadbVmClusterProperties(proto.Message): + r"""The properties of an ExadbVmCluster. + + Attributes: + cluster_name (str): + Optional. Immutable. The cluster name for Exascale vm + cluster. The cluster name must begin with an alphabetic + character and may contain hyphens(-) but can not contain + underscores(\_). It should be not more than 11 characters + and is not case sensitive. OCI Cluster name. + grid_image_id (str): + Required. Immutable. Grid Infrastructure + Version. + node_count (int): + Required. The number of nodes/VMs in the + ExadbVmCluster. + enabled_ecpu_count_per_node (int): + Required. Immutable. The number of ECPUs + enabled per node for an exadata vm cluster on + exascale infrastructure. + additional_ecpu_count_per_node (int): + Optional. Immutable. The number of additional + ECPUs per node for an Exadata VM cluster on + exascale infrastructure. + vm_file_system_storage (google.cloud.oracledatabase_v1.types.ExadbVmClusterStorageDetails): + Required. Immutable. Total storage details + for the ExadbVmCluster. + license_model (google.cloud.oracledatabase_v1.types.ExadbVmClusterProperties.LicenseModel): + Optional. Immutable. The license type of the + ExadbVmCluster. + exascale_db_storage_vault (str): + Required. Immutable. The name of ExascaleDbStorageVault + associated with the ExadbVmCluster. It can refer to an + existing ExascaleDbStorageVault. Or a new one can be created + during the ExadbVmCluster creation (requires + storage_vault_properties to be set). Format: + projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault} + hostname_prefix (str): + Required. Immutable. Prefix for VM cluster + host names. + hostname (str): + Output only. The hostname of the + ExadbVmCluster. + ssh_public_keys (MutableSequence[str]): + Required. Immutable. The SSH public keys for + the ExadbVmCluster. + data_collection_options (google.cloud.oracledatabase_v1.types.DataCollectionOptionsCommon): + Optional. Immutable. Indicates user + preference for data collection options. + time_zone (google.type.datetime_pb2.TimeZone): + Optional. Immutable. The time zone of the + ExadbVmCluster. + lifecycle_state (google.cloud.oracledatabase_v1.types.ExadbVmClusterProperties.ExadbVmClusterLifecycleState): + Output only. State of the cluster. + shape_attribute (google.cloud.oracledatabase_v1.types.ExadbVmClusterProperties.ShapeAttribute): + Required. Immutable. The shape attribute of the VM cluster. + The type of Exascale storage used for Exadata VM cluster. + The default is SMART_STORAGE which supports Oracle Database + 23ai and later + memory_size_gb (int): + Output only. Memory per VM (GB) (Read-only): + Shows the amount of memory allocated to each VM. + Memory is calculated based on 2.75 GB per Total + ECPUs. + scan_listener_port_tcp (int): + Optional. Immutable. SCAN listener port - TCP + oci_uri (str): + Output only. Deep link to the OCI console to + view this resource. + gi_version (str): + Output only. The Oracle Grid Infrastructure + (GI) software version. + """ + + class LicenseModel(proto.Enum): + r"""The Oracle license model that applies to the ExaScale VM + cluster + + Values: + LICENSE_MODEL_UNSPECIFIED (0): + Unspecified. + LICENSE_INCLUDED (1): + Default is license included. + BRING_YOUR_OWN_LICENSE (2): + Bring your own license. + """ + LICENSE_MODEL_UNSPECIFIED = 0 + LICENSE_INCLUDED = 1 + BRING_YOUR_OWN_LICENSE = 2 + + class ExadbVmClusterLifecycleState(proto.Enum): + r"""The various lifecycle states of the VM cluster. + + Values: + EXADB_VM_CLUSTER_LIFECYCLE_STATE_UNSPECIFIED (0): + Default unspecified value. + PROVISIONING (1): + Indicates that the resource is in + provisioning state. + AVAILABLE (2): + Indicates that the resource is in available + state. + UPDATING (3): + Indicates that the resource is in updating + state. + TERMINATING (4): + Indicates that the resource is in terminating + state. + TERMINATED (5): + Indicates that the resource is in terminated + state. + FAILED (6): + Indicates that the resource is in failed + state. + MAINTENANCE_IN_PROGRESS (7): + Indicates that the resource is in maintenance + in progress state. + """ + EXADB_VM_CLUSTER_LIFECYCLE_STATE_UNSPECIFIED = 0 + PROVISIONING = 1 + AVAILABLE = 2 + UPDATING = 3 + TERMINATING = 4 + TERMINATED = 5 + FAILED = 6 + MAINTENANCE_IN_PROGRESS = 7 + + class ShapeAttribute(proto.Enum): + r"""The shape attribute of the VM cluster. The type of Exascale storage + used for Exadata VM cluster. The default is SMART_STORAGE which + supports Oracle Database 23ai and later + + Values: + SHAPE_ATTRIBUTE_UNSPECIFIED (0): + Default unspecified value. + SMART_STORAGE (1): + Indicates that the resource is in smart + storage. + BLOCK_STORAGE (2): + Indicates that the resource is in block + storage. + """ + SHAPE_ATTRIBUTE_UNSPECIFIED = 0 + SMART_STORAGE = 1 + BLOCK_STORAGE = 2 + + cluster_name: str = proto.Field( + proto.STRING, + number=1, + ) + grid_image_id: str = proto.Field( + proto.STRING, + number=2, + ) + node_count: int = proto.Field( + proto.INT32, + number=3, + ) + enabled_ecpu_count_per_node: int = proto.Field( + proto.INT32, + number=20, + ) + additional_ecpu_count_per_node: int = proto.Field( + proto.INT32, + number=21, + ) + vm_file_system_storage: "ExadbVmClusterStorageDetails" = proto.Field( + proto.MESSAGE, + number=6, + message="ExadbVmClusterStorageDetails", + ) + license_model: LicenseModel = proto.Field( + proto.ENUM, + number=7, + enum=LicenseModel, + ) + exascale_db_storage_vault: str = proto.Field( + proto.STRING, + number=8, + ) + hostname_prefix: str = proto.Field( + proto.STRING, + number=9, + ) + hostname: str = proto.Field( + proto.STRING, + number=10, + ) + ssh_public_keys: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=11, + ) + data_collection_options: common.DataCollectionOptionsCommon = proto.Field( + proto.MESSAGE, + number=12, + message=common.DataCollectionOptionsCommon, + ) + time_zone: datetime_pb2.TimeZone = proto.Field( + proto.MESSAGE, + number=13, + message=datetime_pb2.TimeZone, + ) + lifecycle_state: ExadbVmClusterLifecycleState = proto.Field( + proto.ENUM, + number=14, + enum=ExadbVmClusterLifecycleState, + ) + shape_attribute: ShapeAttribute = proto.Field( + proto.ENUM, + number=15, + enum=ShapeAttribute, + ) + memory_size_gb: int = proto.Field( + proto.INT32, + number=16, + ) + scan_listener_port_tcp: int = proto.Field( + proto.INT32, + number=17, + ) + oci_uri: str = proto.Field( + proto.STRING, + number=18, + ) + gi_version: str = proto.Field( + proto.STRING, + number=19, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exascale_db_storage_vault.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exascale_db_storage_vault.py new file mode 100644 index 000000000000..c7efff98917f --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/exascale_db_storage_vault.py @@ -0,0 +1,456 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import datetime_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "ExascaleDbStorageVault", + "ExascaleDbStorageVaultProperties", + "ExascaleDbStorageDetails", + "GetExascaleDbStorageVaultRequest", + "ListExascaleDbStorageVaultsRequest", + "ListExascaleDbStorageVaultsResponse", + "CreateExascaleDbStorageVaultRequest", + "DeleteExascaleDbStorageVaultRequest", + }, +) + + +class ExascaleDbStorageVault(proto.Message): + r"""ExascaleDbStorageVault represents a storage vault exadb vm + cluster resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/ExascaleDbStorageVault/ + + Attributes: + name (str): + Identifier. The resource name of the ExascaleDbStorageVault. + Format: + projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault} + display_name (str): + Required. The display name for the + ExascaleDbStorageVault. The name does not have + to be unique within your project. The name must + be 1-255 characters long and can only contain + alphanumeric characters. + gcp_oracle_zone (str): + Optional. The GCP Oracle zone where Oracle + ExascaleDbStorageVault is hosted. Example: + us-east4-b-r2. If not specified, the system will + pick a zone based on availability. + properties (google.cloud.oracledatabase_v1.types.ExascaleDbStorageVaultProperties): + Required. The properties of the + ExascaleDbStorageVault. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time when the + ExascaleDbStorageVault was created. + entitlement_id (str): + Output only. The ID of the subscription + entitlement associated with the + ExascaleDbStorageVault. + labels (MutableMapping[str, str]): + Optional. The labels or tags associated with + the ExascaleDbStorageVault. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + gcp_oracle_zone: str = proto.Field( + proto.STRING, + number=3, + ) + properties: "ExascaleDbStorageVaultProperties" = proto.Field( + proto.MESSAGE, + number=4, + message="ExascaleDbStorageVaultProperties", + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + entitlement_id: str = proto.Field( + proto.STRING, + number=6, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + + +class ExascaleDbStorageVaultProperties(proto.Message): + r"""The properties of the ExascaleDbStorageVault. + next ID: 12 + + Attributes: + ocid (str): + Output only. The OCID for the + ExascaleDbStorageVault. + time_zone (google.type.datetime_pb2.TimeZone): + Output only. The time zone of the + ExascaleDbStorageVault. + exascale_db_storage_details (google.cloud.oracledatabase_v1.types.ExascaleDbStorageDetails): + Required. The storage details of the + ExascaleDbStorageVault. + state (google.cloud.oracledatabase_v1.types.ExascaleDbStorageVaultProperties.State): + Output only. The state of the + ExascaleDbStorageVault. + description (str): + Optional. The description of the + ExascaleDbStorageVault. + vm_cluster_ids (MutableSequence[str]): + Output only. The list of VM cluster OCIDs + associated with the ExascaleDbStorageVault. + vm_cluster_count (int): + Output only. The number of VM clusters + associated with the ExascaleDbStorageVault. + additional_flash_cache_percent (int): + Optional. The size of additional flash cache + in percentage of high capacity database storage. + oci_uri (str): + Output only. Deep link to the OCI console to + view this resource. + attached_shape_attributes (MutableSequence[google.cloud.oracledatabase_v1.types.ExascaleDbStorageVaultProperties.ShapeAttribute]): + Output only. The shape attributes of the VM + clusters attached to the ExascaleDbStorageVault. + available_shape_attributes (MutableSequence[google.cloud.oracledatabase_v1.types.ExascaleDbStorageVaultProperties.ShapeAttribute]): + Output only. The shape attributes available + for the VM clusters to be attached to the + ExascaleDbStorageVault. + """ + + class State(proto.Enum): + r"""The state of the ExascaleDbStorageVault. + + Values: + STATE_UNSPECIFIED (0): + The state of the ExascaleDbStorageVault is + unspecified. + PROVISIONING (1): + The ExascaleDbStorageVault is being + provisioned. + AVAILABLE (2): + The ExascaleDbStorageVault is available. + UPDATING (3): + The ExascaleDbStorageVault is being updated. + TERMINATING (4): + The ExascaleDbStorageVault is being deleted. + TERMINATED (5): + The ExascaleDbStorageVault has been deleted. + FAILED (6): + The ExascaleDbStorageVault has failed. + """ + STATE_UNSPECIFIED = 0 + PROVISIONING = 1 + AVAILABLE = 2 + UPDATING = 3 + TERMINATING = 4 + TERMINATED = 5 + FAILED = 6 + + class ShapeAttribute(proto.Enum): + r"""The shape attribute of the VM clusters attached to the + ExascaleDbStorageVault. + + Values: + SHAPE_ATTRIBUTE_UNSPECIFIED (0): + Default unspecified value. + SMART_STORAGE (1): + Indicates that the resource is in smart + storage. + BLOCK_STORAGE (2): + Indicates that the resource is in block + storage. + """ + SHAPE_ATTRIBUTE_UNSPECIFIED = 0 + SMART_STORAGE = 1 + BLOCK_STORAGE = 2 + + ocid: str = proto.Field( + proto.STRING, + number=1, + ) + time_zone: datetime_pb2.TimeZone = proto.Field( + proto.MESSAGE, + number=2, + message=datetime_pb2.TimeZone, + ) + exascale_db_storage_details: "ExascaleDbStorageDetails" = proto.Field( + proto.MESSAGE, + number=3, + message="ExascaleDbStorageDetails", + ) + state: State = proto.Field( + proto.ENUM, + number=4, + enum=State, + ) + description: str = proto.Field( + proto.STRING, + number=5, + ) + vm_cluster_ids: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=6, + ) + vm_cluster_count: int = proto.Field( + proto.INT32, + number=9, + ) + additional_flash_cache_percent: int = proto.Field( + proto.INT32, + number=7, + ) + oci_uri: str = proto.Field( + proto.STRING, + number=8, + ) + attached_shape_attributes: MutableSequence[ShapeAttribute] = proto.RepeatedField( + proto.ENUM, + number=10, + enum=ShapeAttribute, + ) + available_shape_attributes: MutableSequence[ShapeAttribute] = proto.RepeatedField( + proto.ENUM, + number=11, + enum=ShapeAttribute, + ) + + +class ExascaleDbStorageDetails(proto.Message): + r"""The storage details of the ExascaleDbStorageVault. + + Attributes: + available_size_gbs (int): + Output only. The available storage capacity + for the ExascaleDbStorageVault, in gigabytes + (GB). + total_size_gbs (int): + Required. The total storage allocation for + the ExascaleDbStorageVault, in gigabytes (GB). + """ + + available_size_gbs: int = proto.Field( + proto.INT32, + number=1, + ) + total_size_gbs: int = proto.Field( + proto.INT32, + number=2, + ) + + +class GetExascaleDbStorageVaultRequest(proto.Message): + r"""The request for ``ExascaleDbStorageVault.Get``. + + Attributes: + name (str): + Required. The name of the ExascaleDbStorageVault in the + following format: + projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault}. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListExascaleDbStorageVaultsRequest(proto.Message): + r"""The request for ``ExascaleDbStorageVault.List``. + + Attributes: + parent (str): + Required. The parent value for + ExascaleDbStorageVault in the following format: + projects/{project}/locations/{location}. + page_size (int): + Optional. The maximum number of items to + return. If unspecified, at most 50 + ExascaleDbStorageVaults will be returned. The + maximum value is 1000; values above 1000 will be + coerced to 1000. + page_token (str): + Optional. A token identifying a page of + results the server should return. + filter (str): + Optional. An expression for filtering the + results of the request. Filter the list as + specified in https://google.aip.dev/160. + order_by (str): + Optional. An expression for ordering the + results of the request. Order results as + specified in https://google.aip.dev/132. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListExascaleDbStorageVaultsResponse(proto.Message): + r"""The response for ``ExascaleDbStorageVault.List``. + + Attributes: + exascale_db_storage_vaults (MutableSequence[google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault]): + The ExascaleDbStorageVaults. + next_page_token (str): + A token identifying a page of results the + server should return. If present, the next page + token can be provided to a subsequent + ListExascaleDbStorageVaults call to list the + next page. If empty, there are no more pages. + """ + + @property + def raw_page(self): + return self + + exascale_db_storage_vaults: MutableSequence[ + "ExascaleDbStorageVault" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ExascaleDbStorageVault", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateExascaleDbStorageVaultRequest(proto.Message): + r"""The request for ``ExascaleDbStorageVault.Create``. + + Attributes: + parent (str): + Required. The value for parent of the + ExascaleDbStorageVault in the following format: + projects/{project}/locations/{location}. + exascale_db_storage_vault_id (str): + Required. The ID of the ExascaleDbStorageVault to create. + This value is restricted to + (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and must be a + maximum of 63 characters in length. The value must start + with a letter and end with a letter or a number. + exascale_db_storage_vault (google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault): + Required. The resource being created. + request_id (str): + Optional. An optional request ID to identify + requests. Specify a unique request ID so that if + you must retry your request, the server will + know to ignore the request if it has already + been completed. The server will guarantee that + for at least 60 minutes since the first request. + + For example, consider a situation where you make + an initial request and the request times out. If + you make the request again with the same request + ID, the server can check if original operation + with the same request ID was received, and if + so, will ignore the second request. This + prevents clients from accidentally creating + duplicate commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + exascale_db_storage_vault_id: str = proto.Field( + proto.STRING, + number=2, + ) + exascale_db_storage_vault: "ExascaleDbStorageVault" = proto.Field( + proto.MESSAGE, + number=3, + message="ExascaleDbStorageVault", + ) + request_id: str = proto.Field( + proto.STRING, + number=4, + ) + + +class DeleteExascaleDbStorageVaultRequest(proto.Message): + r"""The request message for ``ExascaleDbStorageVault.Delete``. + + Attributes: + name (str): + Required. The name of the ExascaleDbStorageVault in the + following format: + projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault}. + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + request_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/minor_version.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/minor_version.py new file mode 100644 index 000000000000..573a0edba72a --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/minor_version.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "MinorVersion", + "ListMinorVersionsRequest", + "ListMinorVersionsResponse", + }, +) + + +class MinorVersion(proto.Message): + r"""MinorVersion represents a minor version of a GI. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/GiMinorVersionSummary/ + + Attributes: + name (str): + Identifier. The name of the MinorVersion resource with the + format: + projects/{project}/locations/{region}/giVersions/{gi_version}/minorVersions/{minor_version} + grid_image_id (str): + Optional. The ID of the Grid Image. + version (str): + Optional. The valid Oracle grid + infrastructure software version. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + grid_image_id: str = proto.Field( + proto.STRING, + number=2, + ) + version: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListMinorVersionsRequest(proto.Message): + r"""The request for ``MinorVersion.List``. + + Attributes: + parent (str): + Required. The parent value for the MinorVersion resource + with the format: + projects/{project}/locations/{location}/giVersions/{gi_version} + page_size (int): + Optional. The maximum number of items to + return. If unspecified, a maximum of 50 System + Versions will be returned. The maximum value is + 1000; values above 1000 will be reset to 1000. + page_token (str): + Optional. A token identifying the requested + page of results to return. All fields except the + filter should remain the same as in the request + that provided this page token. + filter (str): + Optional. An expression for filtering the results of the + request. Only shapeFamily and gcp_oracle_zone_id are + supported in this format: + ``shape_family="{shapeFamily}" AND gcp_oracle_zone_id="{gcp_oracle_zone_id}"``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + + +class ListMinorVersionsResponse(proto.Message): + r"""The response for ``MinorVersion.List``. + + Attributes: + minor_versions (MutableSequence[google.cloud.oracledatabase_v1.types.MinorVersion]): + The list of MinorVersions. + next_page_token (str): + A token identifying a page of results the + server should return. + """ + + @property + def raw_page(self): + return self + + minor_versions: MutableSequence["MinorVersion"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="MinorVersion", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/odb_network.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/odb_network.py new file mode 100644 index 000000000000..cd1a9ec8866c --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/odb_network.py @@ -0,0 +1,302 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "OdbNetwork", + "CreateOdbNetworkRequest", + "DeleteOdbNetworkRequest", + "ListOdbNetworksRequest", + "ListOdbNetworksResponse", + "GetOdbNetworkRequest", + }, +) + + +class OdbNetwork(proto.Message): + r"""Represents OdbNetwork resource. + + Attributes: + name (str): + Identifier. The name of the OdbNetwork resource in the + following format: + projects/{project}/locations/{region}/odbNetworks/{odb_network} + network (str): + Required. The name of the VPC network in the + following format: + projects/{project}/global/networks/{network} + labels (MutableMapping[str, str]): + Optional. Labels or tags associated with the + resource. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time that the + OdbNetwork was created. + state (google.cloud.oracledatabase_v1.types.OdbNetwork.State): + Output only. State of the ODB Network. + entitlement_id (str): + Output only. The ID of the subscription + entitlement associated with the OdbNetwork. + gcp_oracle_zone (str): + Optional. The GCP Oracle zone where + OdbNetwork is hosted. Example: us-east4-b-r2. If + not specified, the system will pick a zone based + on availability. + """ + + class State(proto.Enum): + r"""The various lifecycle states of the ODB Network. + + Values: + STATE_UNSPECIFIED (0): + Default unspecified value. + PROVISIONING (1): + Indicates that the resource is in + provisioning state. + AVAILABLE (2): + Indicates that the resource is in available + state. + TERMINATING (3): + Indicates that the resource is in terminating + state. + FAILED (4): + Indicates that the resource is in failed + state. + """ + STATE_UNSPECIFIED = 0 + PROVISIONING = 1 + AVAILABLE = 2 + TERMINATING = 3 + FAILED = 4 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + network: str = proto.Field( + proto.STRING, + number=2, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + state: State = proto.Field( + proto.ENUM, + number=5, + enum=State, + ) + entitlement_id: str = proto.Field( + proto.STRING, + number=6, + ) + gcp_oracle_zone: str = proto.Field( + proto.STRING, + number=7, + ) + + +class CreateOdbNetworkRequest(proto.Message): + r"""The request for ``OdbNetwork.Create``. + + Attributes: + parent (str): + Required. The parent value for the OdbNetwork + in the following format: + projects/{project}/locations/{location}. + odb_network_id (str): + Required. The ID of the OdbNetwork to create. This value is + restricted to (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and + must be a maximum of 63 characters in length. The value must + start with a letter and end with a letter or a number. + odb_network (google.cloud.oracledatabase_v1.types.OdbNetwork): + Required. Details of the OdbNetwork instance + to create. + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + odb_network_id: str = proto.Field( + proto.STRING, + number=2, + ) + odb_network: "OdbNetwork" = proto.Field( + proto.MESSAGE, + number=3, + message="OdbNetwork", + ) + request_id: str = proto.Field( + proto.STRING, + number=4, + ) + + +class DeleteOdbNetworkRequest(proto.Message): + r"""The request for ``OdbNetwork.Delete``. + + Attributes: + name (str): + Required. The name of the resource in the following format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + request_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ListOdbNetworksRequest(proto.Message): + r"""The request for ``OdbNetwork.List``. + + Attributes: + parent (str): + Required. The parent value for the ODB + Network in the following format: + projects/{project}/locations/{location}. + page_size (int): + Optional. The maximum number of items to + return. If unspecified, at most 50 ODB Networks + will be returned. The maximum value is 1000; + values above 1000 will be coerced to 1000. + page_token (str): + Optional. A token identifying a page of + results the server should return. + filter (str): + Optional. An expression for filtering the + results of the request. + order_by (str): + Optional. An expression for ordering the + results of the request. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListOdbNetworksResponse(proto.Message): + r"""The response for ``OdbNetwork.List``. + + Attributes: + odb_networks (MutableSequence[google.cloud.oracledatabase_v1.types.OdbNetwork]): + The list of ODB Networks. + next_page_token (str): + A token identifying a page of results the + server should return. + unreachable (MutableSequence[str]): + Unreachable locations when listing resources + across all locations using wildcard location + '-'. + """ + + @property + def raw_page(self): + return self + + odb_networks: MutableSequence["OdbNetwork"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="OdbNetwork", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + unreachable: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class GetOdbNetworkRequest(proto.Message): + r"""The request for ``OdbNetwork.Get``. + + Attributes: + name (str): + Required. The name of the OdbNetwork in the following + format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/odb_subnet.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/odb_subnet.py new file mode 100644 index 000000000000..2295638104b9 --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/odb_subnet.py @@ -0,0 +1,305 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "OdbSubnet", + "CreateOdbSubnetRequest", + "DeleteOdbSubnetRequest", + "ListOdbSubnetsRequest", + "ListOdbSubnetsResponse", + "GetOdbSubnetRequest", + }, +) + + +class OdbSubnet(proto.Message): + r"""Represents OdbSubnet resource. + + Attributes: + name (str): + Identifier. The name of the OdbSubnet resource in the + following format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} + cidr_range (str): + Required. The CIDR range of the subnet. + purpose (google.cloud.oracledatabase_v1.types.OdbSubnet.Purpose): + Required. Purpose of the subnet. + labels (MutableMapping[str, str]): + Optional. Labels or tags associated with the + resource. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time that the + OdbNetwork was created. + state (google.cloud.oracledatabase_v1.types.OdbSubnet.State): + Output only. State of the ODB Subnet. + """ + + class Purpose(proto.Enum): + r"""Purpose available for the subnet. + + Values: + PURPOSE_UNSPECIFIED (0): + Default unspecified value. + CLIENT_SUBNET (1): + Subnet to be used for client connections. + BACKUP_SUBNET (2): + Subnet to be used for backup. + """ + PURPOSE_UNSPECIFIED = 0 + CLIENT_SUBNET = 1 + BACKUP_SUBNET = 2 + + class State(proto.Enum): + r"""The various lifecycle states of the ODB Subnet. + + Values: + STATE_UNSPECIFIED (0): + Default unspecified value. + PROVISIONING (1): + Indicates that the resource is in + provisioning state. + AVAILABLE (2): + Indicates that the resource is in available + state. + TERMINATING (3): + Indicates that the resource is in terminating + state. + FAILED (4): + Indicates that the resource is in failed + state. + """ + STATE_UNSPECIFIED = 0 + PROVISIONING = 1 + AVAILABLE = 2 + TERMINATING = 3 + FAILED = 4 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + cidr_range: str = proto.Field( + proto.STRING, + number=2, + ) + purpose: Purpose = proto.Field( + proto.ENUM, + number=3, + enum=Purpose, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + state: State = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + + +class CreateOdbSubnetRequest(proto.Message): + r"""The request for ``OdbSubnet.Create``. + + Attributes: + parent (str): + Required. The parent value for the OdbSubnet in the + following format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + odb_subnet_id (str): + Required. The ID of the OdbSubnet to create. This value is + restricted to (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) and + must be a maximum of 63 characters in length. The value must + start with a letter and end with a letter or a number. + odb_subnet (google.cloud.oracledatabase_v1.types.OdbSubnet): + Required. Details of the OdbSubnet instance + to create. + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + odb_subnet_id: str = proto.Field( + proto.STRING, + number=2, + ) + odb_subnet: "OdbSubnet" = proto.Field( + proto.MESSAGE, + number=3, + message="OdbSubnet", + ) + request_id: str = proto.Field( + proto.STRING, + number=4, + ) + + +class DeleteOdbSubnetRequest(proto.Message): + r"""The request for ``OdbSubnet.Delete``. + + Attributes: + name (str): + Required. The name of the resource in the following format: + projects/{project}/locations/{region}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet}. + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + request_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ListOdbSubnetsRequest(proto.Message): + r"""The request for ``OdbSubnet.List``. + + Attributes: + parent (str): + Required. The parent value for the OdbSubnet in the + following format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}. + page_size (int): + Optional. The maximum number of items to + return. If unspecified, at most 50 ODB Networks + will be returned. The maximum value is 1000; + values above 1000 will be coerced to 1000. + page_token (str): + Optional. A token identifying a page of + results the server should return. + filter (str): + Optional. An expression for filtering the + results of the request. + order_by (str): + Optional. An expression for ordering the + results of the request. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListOdbSubnetsResponse(proto.Message): + r"""The response for ``OdbSubnet.List``. + + Attributes: + odb_subnets (MutableSequence[google.cloud.oracledatabase_v1.types.OdbSubnet]): + The list of ODB Subnets. + next_page_token (str): + A token identifying a page of results the + server should return. + unreachable (MutableSequence[str]): + Unreachable locations when listing resources + across all locations using wildcard location + '-'. + """ + + @property + def raw_page(self): + return self + + odb_subnets: MutableSequence["OdbSubnet"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="OdbSubnet", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + unreachable: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class GetOdbSubnetRequest(proto.Message): + r"""The request for ``OdbSubnet.Get``. + + Attributes: + name (str): + Required. The name of the OdbSubnet in the following format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet}. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/oracledatabase.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/oracledatabase.py index 02391acfd393..1f6eaed4819b 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/oracledatabase.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/oracledatabase.py @@ -17,6 +17,7 @@ from typing import MutableMapping, MutableSequence +from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import proto # type: ignore @@ -29,12 +30,14 @@ db_system_shape, entitlement, exadata_infra, - gi_version, - vm_cluster, ) from google.cloud.oracledatabase_v1.types import ( autonomous_database as gco_autonomous_database, ) +from google.cloud.oracledatabase_v1.types import ( + exadb_vm_cluster as gco_exadb_vm_cluster, +) +from google.cloud.oracledatabase_v1.types import gi_version, vm_cluster __protobuf__ = proto.module( package="google.cloud.oracledatabase.v1", @@ -64,11 +67,14 @@ "ListAutonomousDatabasesResponse", "GetAutonomousDatabaseRequest", "CreateAutonomousDatabaseRequest", + "UpdateAutonomousDatabaseRequest", "DeleteAutonomousDatabaseRequest", "RestoreAutonomousDatabaseRequest", "StopAutonomousDatabaseRequest", "StartAutonomousDatabaseRequest", "RestartAutonomousDatabaseRequest", + "SwitchoverAutonomousDatabaseRequest", + "FailoverAutonomousDatabaseRequest", "GenerateAutonomousDatabaseWalletRequest", "GenerateAutonomousDatabaseWalletResponse", "ListAutonomousDbVersionsRequest", @@ -77,6 +83,13 @@ "ListAutonomousDatabaseCharacterSetsResponse", "ListAutonomousDatabaseBackupsRequest", "ListAutonomousDatabaseBackupsResponse", + "CreateExadbVmClusterRequest", + "DeleteExadbVmClusterRequest", + "GetExadbVmClusterRequest", + "ListExadbVmClustersRequest", + "ListExadbVmClustersResponse", + "UpdateExadbVmClusterRequest", + "RemoveVirtualMachineExadbVmClusterRequest", }, ) @@ -98,6 +111,12 @@ class ListCloudExadataInfrastructuresRequest(proto.Message): page_token (str): Optional. A token identifying a page of results the server should return. + filter (str): + Optional. An expression for filtering the + results of the request. + order_by (str): + Optional. An expression for ordering the + results of the request. """ parent: str = proto.Field( @@ -112,6 +131,14 @@ class ListCloudExadataInfrastructuresRequest(proto.Message): proto.STRING, number=3, ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) class ListCloudExadataInfrastructuresResponse(proto.Message): @@ -546,6 +573,7 @@ class ListDbNodesRequest(proto.Message): Required. The parent value for database node in the following format: projects/{project}/locations/{location}/cloudVmClusters/{cloudVmCluster}. + . page_size (int): Optional. The maximum number of items to return. If unspecified, at most 50 db nodes will @@ -613,6 +641,10 @@ class ListGiVersionsRequest(proto.Message): page_token (str): Optional. A token identifying a page of results the server should return. + filter (str): + Optional. An expression for filtering the results of the + request. Only the shape, gcp_oracle_zone and gi_version + fields are supported in this format: ``shape="{shape}"``. """ parent: str = proto.Field( @@ -627,6 +659,10 @@ class ListGiVersionsRequest(proto.Message): proto.STRING, number=3, ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) class ListGiVersionsResponse(proto.Message): @@ -673,6 +709,10 @@ class ListDbSystemShapesRequest(proto.Message): page_token (str): Optional. A token identifying a page of results the server should return. + filter (str): + Optional. An expression for filtering the results of the + request. Only the gcp_oracle_zone_id field is supported in + this format: ``gcp_oracle_zone_id="{gcp_oracle_zone_id}"``. """ parent: str = proto.Field( @@ -687,6 +727,10 @@ class ListDbSystemShapesRequest(proto.Message): proto.STRING, number=3, ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) class ListDbSystemShapesResponse(proto.Message): @@ -927,6 +971,50 @@ class CreateAutonomousDatabaseRequest(proto.Message): ) +class UpdateAutonomousDatabaseRequest(proto.Message): + r"""The request for ``AutonomousDatabase.Update``. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Field mask is used to specify the fields to be + overwritten in the Exadata resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. If the user does not provide a mask + then all fields will be overwritten. + autonomous_database (google.cloud.oracledatabase_v1.types.AutonomousDatabase): + Required. The resource being updated + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + autonomous_database: gco_autonomous_database.AutonomousDatabase = proto.Field( + proto.MESSAGE, + number=2, + message=gco_autonomous_database.AutonomousDatabase, + ) + request_id: str = proto.Field( + proto.STRING, + number=3, + ) + + class DeleteAutonomousDatabaseRequest(proto.Message): r"""The request for ``AutonomousDatabase.Delete``. @@ -1031,6 +1119,52 @@ class RestartAutonomousDatabaseRequest(proto.Message): ) +class SwitchoverAutonomousDatabaseRequest(proto.Message): + r"""The request for ``OracleDatabase.SwitchoverAutonomousDatabase``. + + Attributes: + name (str): + Required. The name of the Autonomous Database in the + following format: + projects/{project}/locations/{location}/autonomousDatabases/{autonomous_database}. + peer_autonomous_database (str): + Required. The peer database name to switch + over to. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + peer_autonomous_database: str = proto.Field( + proto.STRING, + number=2, + ) + + +class FailoverAutonomousDatabaseRequest(proto.Message): + r"""The request for ``OracleDatabase.FailoverAutonomousDatabase``. + + Attributes: + name (str): + Required. The name of the Autonomous Database in the + following format: + projects/{project}/locations/{location}/autonomousDatabases/{autonomous_database}. + peer_autonomous_database (str): + Required. The peer database name to fail over + to. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + peer_autonomous_database: str = proto.Field( + proto.STRING, + number=2, + ) + + class GenerateAutonomousDatabaseWalletRequest(proto.Message): r"""The request for ``AutonomousDatabase.GenerateWallet``. @@ -1292,4 +1426,269 @@ def raw_page(self): ) +class CreateExadbVmClusterRequest(proto.Message): + r"""The request for ``ExadbVmCluster.Create``. + + Attributes: + parent (str): + Required. The value for parent of the + ExadbVmCluster in the following format: + projects/{project}/locations/{location}. + exadb_vm_cluster_id (str): + Required. The ID of the ExadbVmCluster to create. This value + is restricted to (^\ `a-z <[a-z0-9-]{0,61}[a-z0-9]>`__?$) + and must be a maximum of 63 characters in length. The value + must start with a letter and end with a letter or a number. + exadb_vm_cluster (google.cloud.oracledatabase_v1.types.ExadbVmCluster): + Required. The resource being created. + request_id (str): + Optional. An optional request ID to identify + requests. Specify a unique request ID so that if + you must retry your request, the server will + know to ignore the request if it has already + been completed. The server will guarantee that + for at least 60 minutes since the first request. + + For example, consider a situation where you make + an initial request and the request times out. If + you make the request again with the same request + ID, the server can check if original operation + with the same request ID was received, and if + so, will ignore the second request. This + prevents clients from accidentally creating + duplicate commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + exadb_vm_cluster_id: str = proto.Field( + proto.STRING, + number=2, + ) + exadb_vm_cluster: gco_exadb_vm_cluster.ExadbVmCluster = proto.Field( + proto.MESSAGE, + number=3, + message=gco_exadb_vm_cluster.ExadbVmCluster, + ) + request_id: str = proto.Field( + proto.STRING, + number=4, + ) + + +class DeleteExadbVmClusterRequest(proto.Message): + r"""The request for ``ExadbVmCluster.Delete``. + + Attributes: + name (str): + Required. The name of the ExadbVmCluster in the following + format: + projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}. + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + request_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetExadbVmClusterRequest(proto.Message): + r"""The request for ``ExadbVmCluster.Get``. + + Attributes: + name (str): + Required. The name of the ExadbVmCluster in the following + format: + projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListExadbVmClustersRequest(proto.Message): + r"""The request for ``ExadbVmCluster.List``. + + Attributes: + parent (str): + Required. The parent value for + ExadbVmClusters in the following format: + projects/{project}/locations/{location}. + page_size (int): + Optional. The maximum number of items to + return. If unspecified, at most 50 + ExadbVmClusters will be returned. The maximum + value is 1000; values above 1000 will be coerced + to 1000. + page_token (str): + Optional. A token identifying a page of + results the server should return. + filter (str): + Optional. An expression for filtering the + results of the request. + order_by (str): + Optional. An expression for ordering the + results of the request. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListExadbVmClustersResponse(proto.Message): + r"""The response for ``ExadbVmCluster.List``. + + Attributes: + exadb_vm_clusters (MutableSequence[google.cloud.oracledatabase_v1.types.ExadbVmCluster]): + The list of ExadbVmClusters. + next_page_token (str): + A token identifying a page of results the + server should return. + """ + + @property + def raw_page(self): + return self + + exadb_vm_clusters: MutableSequence[ + gco_exadb_vm_cluster.ExadbVmCluster + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gco_exadb_vm_cluster.ExadbVmCluster, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateExadbVmClusterRequest(proto.Message): + r"""The request for ``ExadbVmCluster.Update``. We only support adding + the Virtual Machine to the ExadbVmCluster. Rest of the fields in + ExadbVmCluster are immutable. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A mask specifying which fields in + th VM Cluster should be updated. A field + specified in the mask is overwritten. If a mask + isn't provided then all the fields in the VM + Cluster are overwritten. + exadb_vm_cluster (google.cloud.oracledatabase_v1.types.ExadbVmCluster): + Required. The resource being updated. + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + """ + + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + exadb_vm_cluster: gco_exadb_vm_cluster.ExadbVmCluster = proto.Field( + proto.MESSAGE, + number=2, + message=gco_exadb_vm_cluster.ExadbVmCluster, + ) + request_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class RemoveVirtualMachineExadbVmClusterRequest(proto.Message): + r"""The request for ``ExadbVmCluster.RemoveVirtualMachine``. + + Attributes: + name (str): + Required. The name of the ExadbVmCluster in the following + format: + projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}. + request_id (str): + Optional. An optional ID to identify the + request. This value is used to identify + duplicate requests. If you make a request with + the same request ID and the original request is + still in progress or completed, the server + ignores the second request. This prevents + clients from accidentally creating duplicate + commitments. + + The request ID must be a valid UUID with the + exception that zero UUID is not supported + (00000000-0000-0000-0000-000000000000). + hostnames (MutableSequence[str]): + Required. The list of host names of db nodes + to be removed from the ExadbVmCluster. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + request_id: str = proto.Field( + proto.STRING, + number=3, + ) + hostnames: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=4, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/pluggable_database.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/pluggable_database.py new file mode 100644 index 000000000000..e149f4160d00 --- /dev/null +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/pluggable_database.py @@ -0,0 +1,523 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +from google.protobuf import timestamp_pb2 # type: ignore +import proto # type: ignore + +__protobuf__ = proto.module( + package="google.cloud.oracledatabase.v1", + manifest={ + "PluggableDatabase", + "PluggableDatabaseProperties", + "PluggableDatabaseConnectionStrings", + "PluggableDatabaseNodeLevelDetails", + "DatabaseManagementConfig", + "GetPluggableDatabaseRequest", + "ListPluggableDatabasesRequest", + "ListPluggableDatabasesResponse", + }, +) + + +class PluggableDatabase(proto.Message): + r"""The PluggableDatabase resource. + https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/PluggableDatabase/ + + Attributes: + name (str): + Identifier. The name of the PluggableDatabase resource in + the following format: + projects/{project}/locations/{region}/pluggableDatabases/{pluggable_database} + properties (google.cloud.oracledatabase_v1.types.PluggableDatabaseProperties): + Optional. The properties of the + PluggableDatabase. + oci_url (str): + Output only. HTTPS link to OCI resources + exposed to Customer via UI Interface. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The date and time that the + PluggableDatabase was created. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + properties: "PluggableDatabaseProperties" = proto.Field( + proto.MESSAGE, + number=2, + message="PluggableDatabaseProperties", + ) + oci_url: str = proto.Field( + proto.STRING, + number=3, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class PluggableDatabaseProperties(proto.Message): + r"""The properties of a PluggableDatabase. + + Attributes: + compartment_id (str): + Required. The OCID of the compartment. + connection_strings (google.cloud.oracledatabase_v1.types.PluggableDatabaseConnectionStrings): + Optional. The Connection strings used to + connect to the Oracle Database. + container_database_ocid (str): + Required. The OCID of the CDB. + defined_tags (MutableMapping[str, google.cloud.oracledatabase_v1.types.PluggableDatabaseProperties.DefinedTagValue]): + Optional. Defined tags for this resource. + Each key is predefined and scoped to a + namespace. + freeform_tags (MutableMapping[str, str]): + Optional. Free-form tags for this resource. + Each tag is a simple key-value pair with no + predefined name, type, or namespace. + ocid (str): + Output only. The OCID of the pluggable + database. + is_restricted (bool): + Optional. The restricted mode of the + pluggable database. If a pluggable database is + opened in restricted mode, the user needs both + create a session and have restricted session + privileges to connect to it. + lifecycle_details (str): + Output only. Additional information about the + current lifecycle state. + lifecycle_state (google.cloud.oracledatabase_v1.types.PluggableDatabaseProperties.PluggableDatabaseLifecycleState): + Output only. The current state of the + pluggable database. + pdb_name (str): + Required. The database name. + pdb_node_level_details (MutableSequence[google.cloud.oracledatabase_v1.types.PluggableDatabaseNodeLevelDetails]): + Optional. Pluggable Database Node Level + Details + database_management_config (google.cloud.oracledatabase_v1.types.DatabaseManagementConfig): + Output only. The configuration of the + Database Management service. + operations_insights_state (google.cloud.oracledatabase_v1.types.PluggableDatabaseProperties.OperationsInsightsState): + Output only. The status of Operations + Insights for this Database. + """ + + class PluggableDatabaseLifecycleState(proto.Enum): + r"""The various lifecycle states of the PluggableDatabase. + + Values: + PLUGGABLE_DATABASE_LIFECYCLE_STATE_UNSPECIFIED (0): + The lifecycle state is unspecified. + PROVISIONING (1): + The pluggable database is provisioning. + AVAILABLE (2): + The pluggable database is available. + TERMINATING (3): + The pluggable database is terminating. + TERMINATED (4): + The pluggable database is terminated. + UPDATING (5): + The pluggable database is updating. + FAILED (6): + The pluggable database is in a failed state. + RELOCATING (7): + The pluggable database is relocating. + RELOCATED (8): + The pluggable database is relocated. + REFRESHING (9): + The pluggable database is refreshing. + RESTORE_IN_PROGRESS (10): + The pluggable database is restoring. + RESTORE_FAILED (11): + The pluggable database restore failed. + BACKUP_IN_PROGRESS (12): + The pluggable database is backing up. + DISABLED (13): + The pluggable database is disabled. + """ + PLUGGABLE_DATABASE_LIFECYCLE_STATE_UNSPECIFIED = 0 + PROVISIONING = 1 + AVAILABLE = 2 + TERMINATING = 3 + TERMINATED = 4 + UPDATING = 5 + FAILED = 6 + RELOCATING = 7 + RELOCATED = 8 + REFRESHING = 9 + RESTORE_IN_PROGRESS = 10 + RESTORE_FAILED = 11 + BACKUP_IN_PROGRESS = 12 + DISABLED = 13 + + class OperationsInsightsState(proto.Enum): + r"""The status of Operations Insights for this Database. + + Values: + OPERATIONS_INSIGHTS_STATE_UNSPECIFIED (0): + The status is not specified. + ENABLING (1): + Operations Insights is enabling. + ENABLED (2): + Operations Insights is enabled. + DISABLING (3): + Operations Insights is disabling. + NOT_ENABLED (4): + Operations Insights is not enabled. + FAILED_ENABLING (5): + Operations Insights failed to enable. + FAILED_DISABLING (6): + Operations Insights failed to disable. + """ + OPERATIONS_INSIGHTS_STATE_UNSPECIFIED = 0 + ENABLING = 1 + ENABLED = 2 + DISABLING = 3 + NOT_ENABLED = 4 + FAILED_ENABLING = 5 + FAILED_DISABLING = 6 + + class DefinedTagValue(proto.Message): + r"""Wrapper message for the value of a defined tag. + + Attributes: + tags (MutableMapping[str, str]): + The tags within the namespace. + """ + + tags: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=1, + ) + + compartment_id: str = proto.Field( + proto.STRING, + number=1, + ) + connection_strings: "PluggableDatabaseConnectionStrings" = proto.Field( + proto.MESSAGE, + number=2, + message="PluggableDatabaseConnectionStrings", + ) + container_database_ocid: str = proto.Field( + proto.STRING, + number=3, + ) + defined_tags: MutableMapping[str, DefinedTagValue] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=4, + message=DefinedTagValue, + ) + freeform_tags: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + ocid: str = proto.Field( + proto.STRING, + number=6, + ) + is_restricted: bool = proto.Field( + proto.BOOL, + number=7, + ) + lifecycle_details: str = proto.Field( + proto.STRING, + number=8, + ) + lifecycle_state: PluggableDatabaseLifecycleState = proto.Field( + proto.ENUM, + number=9, + enum=PluggableDatabaseLifecycleState, + ) + pdb_name: str = proto.Field( + proto.STRING, + number=10, + ) + pdb_node_level_details: MutableSequence[ + "PluggableDatabaseNodeLevelDetails" + ] = proto.RepeatedField( + proto.MESSAGE, + number=11, + message="PluggableDatabaseNodeLevelDetails", + ) + database_management_config: "DatabaseManagementConfig" = proto.Field( + proto.MESSAGE, + number=13, + message="DatabaseManagementConfig", + ) + operations_insights_state: OperationsInsightsState = proto.Field( + proto.ENUM, + number=14, + enum=OperationsInsightsState, + ) + + +class PluggableDatabaseConnectionStrings(proto.Message): + r"""The connection strings used to connect to the Oracle + Database. + + Attributes: + all_connection_strings (MutableMapping[str, str]): + Optional. All connection strings to use to + connect to the pluggable database. + pdb_default (str): + Optional. The default connection string to + use to connect to the pluggable database. + pdb_ip_default (str): + Optional. The default connection string to + use to connect to the pluggable database using + IP. + """ + + all_connection_strings: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=1, + ) + pdb_default: str = proto.Field( + proto.STRING, + number=2, + ) + pdb_ip_default: str = proto.Field( + proto.STRING, + number=3, + ) + + +class PluggableDatabaseNodeLevelDetails(proto.Message): + r"""The Pluggable Database Node Level Details. + + Attributes: + node_name (str): + Required. The Node name of the Database home. + open_mode (google.cloud.oracledatabase_v1.types.PluggableDatabaseNodeLevelDetails.PluggableDatabaseOpenMode): + Required. The mode that the pluggable + database is in to open it. + pluggable_database_id (str): + Required. The OCID of the Pluggable Database. + """ + + class PluggableDatabaseOpenMode(proto.Enum): + r"""The mode that the pluggable database is in to open it. + + Values: + PLUGGABLE_DATABASE_OPEN_MODE_UNSPECIFIED (0): + The open mode is unspecified. + READ_ONLY (1): + The pluggable database is opened in read-only + mode. + READ_WRITE (2): + The pluggable database is opened in + read-write mode. + MOUNTED (3): + The pluggable database is mounted. + MIGRATE (4): + The pluggable database is migrated. + """ + PLUGGABLE_DATABASE_OPEN_MODE_UNSPECIFIED = 0 + READ_ONLY = 1 + READ_WRITE = 2 + MOUNTED = 3 + MIGRATE = 4 + + node_name: str = proto.Field( + proto.STRING, + number=1, + ) + open_mode: PluggableDatabaseOpenMode = proto.Field( + proto.ENUM, + number=2, + enum=PluggableDatabaseOpenMode, + ) + pluggable_database_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class DatabaseManagementConfig(proto.Message): + r"""The configuration of the Database Management service. + + Attributes: + management_state (google.cloud.oracledatabase_v1.types.DatabaseManagementConfig.ManagementState): + Output only. The status of the Database + Management service. + management_type (google.cloud.oracledatabase_v1.types.DatabaseManagementConfig.ManagementType): + Output only. The Database Management type. + """ + + class ManagementState(proto.Enum): + r"""The status of the Database Management service. + + Values: + MANAGEMENT_STATE_UNSPECIFIED (0): + The status is not specified. + ENABLING (1): + The Database Management service is enabling. + ENABLED (2): + The Database Management service is enabled. + DISABLING (3): + The Database Management service is disabling. + DISABLED (4): + The Database Management service is disabled. + UPDATING (5): + The Database Management service is updating. + FAILED_ENABLING (6): + The Database Management service failed to + enable. + FAILED_DISABLING (7): + The Database Management service failed to + disable. + FAILED_UPDATING (8): + The Database Management service failed to + update. + """ + MANAGEMENT_STATE_UNSPECIFIED = 0 + ENABLING = 1 + ENABLED = 2 + DISABLING = 3 + DISABLED = 4 + UPDATING = 5 + FAILED_ENABLING = 6 + FAILED_DISABLING = 7 + FAILED_UPDATING = 8 + + class ManagementType(proto.Enum): + r"""The Database Management type. + + Values: + MANAGEMENT_TYPE_UNSPECIFIED (0): + The type is not specified. + BASIC (1): + Basic Database Management. + ADVANCED (2): + Advanced Database Management. + """ + MANAGEMENT_TYPE_UNSPECIFIED = 0 + BASIC = 1 + ADVANCED = 2 + + management_state: ManagementState = proto.Field( + proto.ENUM, + number=1, + enum=ManagementState, + ) + management_type: ManagementType = proto.Field( + proto.ENUM, + number=2, + enum=ManagementType, + ) + + +class GetPluggableDatabaseRequest(proto.Message): + r"""The request for ``PluggableDatabase.Get``. + + Attributes: + name (str): + Required. The name of the PluggableDatabase resource in the + following format: + projects/{project}/locations/{region}/pluggableDatabases/{pluggable_database} + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListPluggableDatabasesRequest(proto.Message): + r"""The request for ``PluggableDatabase.List``. + + Attributes: + parent (str): + Required. The parent, which owns this + collection of PluggableDatabases. Format: + projects/{project}/locations/{location} + page_size (int): + Optional. The maximum number of + PluggableDatabases to return. The service may + return fewer than this value. + page_token (str): + Optional. A page token, received from a previous + ``ListPluggableDatabases`` call. Provide this to retrieve + the subsequent page. + + When paginating, all other parameters provided to + ``ListPluggableDatabases`` must match the call that provided + the page token. + filter (str): + Optional. An expression for filtering the results of the + request. List for pluggable databases is supported only with + a valid container database (full resource name) filter in + this format: + ``database="projects/{project}/locations/{location}/databases/{database}"`` + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + + +class ListPluggableDatabasesResponse(proto.Message): + r"""The response for ``PluggableDatabase.List``. + + Attributes: + pluggable_databases (MutableSequence[google.cloud.oracledatabase_v1.types.PluggableDatabase]): + The list of PluggableDatabases. + next_page_token (str): + A token identifying a page of results the + server should return. + """ + + @property + def raw_page(self): + return self + + pluggable_databases: MutableSequence["PluggableDatabase"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="PluggableDatabase", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/vm_cluster.py b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/vm_cluster.py index 0a0d7d0fbdcb..6a7766bebde3 100644 --- a/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/vm_cluster.py +++ b/packages/google-cloud-oracledatabase/google/cloud/oracledatabase_v1/types/vm_cluster.py @@ -21,6 +21,8 @@ from google.type import datetime_pb2 # type: ignore import proto # type: ignore +from google.cloud.oracledatabase_v1.types import common + __protobuf__ = proto.module( package="google.cloud.oracledatabase.v1", manifest={ @@ -48,11 +50,6 @@ class CloudVmCluster(proto.Message): display_name (str): Optional. User friendly name for this resource. - gcp_oracle_zone (str): - Output only. Google Cloud Platform location - where Oracle Exadata is hosted. It is same as - Google Cloud Platform Oracle zone of Exadata - infrastructure. properties (google.cloud.oracledatabase_v1.types.CloudVmClusterProperties): Optional. Various properties of the VM Cluster. @@ -63,14 +60,36 @@ class CloudVmCluster(proto.Message): Output only. The date and time that the VM cluster was created. cidr (str): - Required. Network settings. CIDR to use for + Optional. Network settings. CIDR to use for cluster IP allocation. backup_subnet_cidr (str): - Required. CIDR range of the backup subnet. + Optional. CIDR range of the backup subnet. network (str): - Required. The name of the VPC network. + Optional. The name of the VPC network. Format: projects/{project}/global/networks/{network} + gcp_oracle_zone (str): + Output only. The GCP Oracle zone where Oracle CloudVmCluster + is hosted. This will be the same as the gcp_oracle_zone of + the CloudExadataInfrastructure. Example: us-east4-b-r2. + odb_network (str): + Optional. The name of the OdbNetwork associated with the VM + Cluster. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network} + It is optional but if specified, this should match the + parent ODBNetwork of the odb_subnet and backup_odb_subnet. + odb_subnet (str): + Optional. The name of the OdbSubnet associated with the VM + Cluster for IP allocation. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} + backup_odb_subnet (str): + Optional. The name of the backup OdbSubnet associated with + the VM Cluster. Format: + projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} + identity_connector (google.cloud.oracledatabase_v1.types.IdentityConnector): + Output only. The identity connector details + which will allow OCI to securely access the + resources in the customer project. """ name: str = proto.Field( @@ -85,10 +104,6 @@ class CloudVmCluster(proto.Message): proto.STRING, number=3, ) - gcp_oracle_zone: str = proto.Field( - proto.STRING, - number=12, - ) properties: "CloudVmClusterProperties" = proto.Field( proto.MESSAGE, number=6, @@ -116,6 +131,27 @@ class CloudVmCluster(proto.Message): proto.STRING, number=11, ) + gcp_oracle_zone: str = proto.Field( + proto.STRING, + number=12, + ) + odb_network: str = proto.Field( + proto.STRING, + number=13, + ) + odb_subnet: str = proto.Field( + proto.STRING, + number=14, + ) + backup_odb_subnet: str = proto.Field( + proto.STRING, + number=15, + ) + identity_connector: common.IdentityConnector = proto.Field( + proto.MESSAGE, + number=16, + message=common.IdentityConnector, + ) class CloudVmClusterProperties(proto.Message): @@ -201,6 +237,9 @@ class CloudVmClusterProperties(proto.Message): Output only. DNS listener IP. cluster_name (str): Optional. OCI Cluster name. + compute_model (google.cloud.oracledatabase_v1.types.ComputeModel): + Output only. The compute model of the VM + Cluster. """ class LicenseType(proto.Enum): @@ -403,6 +442,11 @@ class State(proto.Enum): proto.STRING, number=36, ) + compute_model: common.ComputeModel = proto.Field( + proto.ENUM, + number=37, + enum=common.ComputeModel, + ) class DataCollectionOptions(proto.Message): diff --git a/packages/google-cloud-oracledatabase/noxfile.py b/packages/google-cloud-oracledatabase/noxfile.py index f667da638b78..ef694a433e3d 100644 --- a/packages/google-cloud-oracledatabase/noxfile.py +++ b/packages/google-cloud-oracledatabase/noxfile.py @@ -27,6 +27,10 @@ LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] +# Add samples to the list of directories to format if the directory exists. +if os.path.isdir("samples"): + LINT_PATHS.append("samples") + ALL_PYTHON = [ "3.7", "3.8", diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_autonomous_database_async.py index e390738ddefd..dbfbb576b03a 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_autonomous_database_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_autonomous_database_async.py @@ -54,4 +54,5 @@ async def sample_create_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_CreateAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_autonomous_database_sync.py index ff7d7850b0cc..c409feafe561 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_autonomous_database_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_autonomous_database_sync.py @@ -54,4 +54,5 @@ def sample_create_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_CreateAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_exadata_infrastructure_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_exadata_infrastructure_async.py index f5a88face411..773c9fbad988 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_exadata_infrastructure_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_exadata_infrastructure_async.py @@ -54,4 +54,5 @@ async def sample_create_cloud_exadata_infrastructure(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_CreateCloudExadataInfrastructure_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_exadata_infrastructure_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_exadata_infrastructure_sync.py index 64fa27e87d6d..6ceb5632d7ef 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_exadata_infrastructure_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_exadata_infrastructure_sync.py @@ -54,4 +54,5 @@ def sample_create_cloud_exadata_infrastructure(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_CreateCloudExadataInfrastructure_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_vm_cluster_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_vm_cluster_async.py index c0c4254ac054..2d5edd7c1bb5 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_vm_cluster_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_vm_cluster_async.py @@ -41,9 +41,6 @@ async def sample_create_cloud_vm_cluster(): # Initialize request argument(s) cloud_vm_cluster = oracledatabase_v1.CloudVmCluster() cloud_vm_cluster.exadata_infrastructure = "exadata_infrastructure_value" - cloud_vm_cluster.cidr = "cidr_value" - cloud_vm_cluster.backup_subnet_cidr = "backup_subnet_cidr_value" - cloud_vm_cluster.network = "network_value" request = oracledatabase_v1.CreateCloudVmClusterRequest( parent="parent_value", @@ -61,4 +58,5 @@ async def sample_create_cloud_vm_cluster(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_CreateCloudVmCluster_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_vm_cluster_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_vm_cluster_sync.py index 05b7a0610f7a..92e9ceff40d1 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_vm_cluster_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_cloud_vm_cluster_sync.py @@ -41,9 +41,6 @@ def sample_create_cloud_vm_cluster(): # Initialize request argument(s) cloud_vm_cluster = oracledatabase_v1.CloudVmCluster() cloud_vm_cluster.exadata_infrastructure = "exadata_infrastructure_value" - cloud_vm_cluster.cidr = "cidr_value" - cloud_vm_cluster.backup_subnet_cidr = "backup_subnet_cidr_value" - cloud_vm_cluster.network = "network_value" request = oracledatabase_v1.CreateCloudVmClusterRequest( parent="parent_value", @@ -61,4 +58,5 @@ def sample_create_cloud_vm_cluster(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_CreateCloudVmCluster_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_db_system_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_db_system_async.py new file mode 100644 index 000000000000..82229f6ab278 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_db_system_async.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDbSystem +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateDbSystem_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_create_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + db_system = oracledatabase_v1.DbSystem() + db_system.odb_subnet = "odb_subnet_value" + db_system.display_name = "display_name_value" + + request = oracledatabase_v1.CreateDbSystemRequest( + parent="parent_value", + db_system_id="db_system_id_value", + db_system=db_system, + ) + + # Make the request + operation = client.create_db_system(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateDbSystem_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_db_system_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_db_system_sync.py new file mode 100644 index 000000000000..133d96cf3835 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_db_system_sync.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDbSystem +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateDbSystem_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_create_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + db_system = oracledatabase_v1.DbSystem() + db_system.odb_subnet = "odb_subnet_value" + db_system.display_name = "display_name_value" + + request = oracledatabase_v1.CreateDbSystemRequest( + parent="parent_value", + db_system_id="db_system_id_value", + db_system=db_system, + ) + + # Make the request + operation = client.create_db_system(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateDbSystem_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_async.py new file mode 100644 index 000000000000..4675d2ccc997 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_async.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateExadbVmCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_create_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + exadb_vm_cluster = oracledatabase_v1.ExadbVmCluster() + exadb_vm_cluster.properties.grid_image_id = "grid_image_id_value" + exadb_vm_cluster.properties.node_count = 1070 + exadb_vm_cluster.properties.enabled_ecpu_count_per_node = 2826 + exadb_vm_cluster.properties.vm_file_system_storage.size_in_gbs_per_node = 2103 + exadb_vm_cluster.properties.exascale_db_storage_vault = ( + "exascale_db_storage_vault_value" + ) + exadb_vm_cluster.properties.hostname_prefix = "hostname_prefix_value" + exadb_vm_cluster.properties.ssh_public_keys = [ + "ssh_public_keys_value1", + "ssh_public_keys_value2", + ] + exadb_vm_cluster.properties.shape_attribute = "BLOCK_STORAGE" + exadb_vm_cluster.odb_subnet = "odb_subnet_value" + exadb_vm_cluster.backup_odb_subnet = "backup_odb_subnet_value" + exadb_vm_cluster.display_name = "display_name_value" + + request = oracledatabase_v1.CreateExadbVmClusterRequest( + parent="parent_value", + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + exadb_vm_cluster=exadb_vm_cluster, + ) + + # Make the request + operation = client.create_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateExadbVmCluster_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_sync.py new file mode 100644 index 000000000000..2443fb1a8b3d --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_sync.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateExadbVmCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_create_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + exadb_vm_cluster = oracledatabase_v1.ExadbVmCluster() + exadb_vm_cluster.properties.grid_image_id = "grid_image_id_value" + exadb_vm_cluster.properties.node_count = 1070 + exadb_vm_cluster.properties.enabled_ecpu_count_per_node = 2826 + exadb_vm_cluster.properties.vm_file_system_storage.size_in_gbs_per_node = 2103 + exadb_vm_cluster.properties.exascale_db_storage_vault = ( + "exascale_db_storage_vault_value" + ) + exadb_vm_cluster.properties.hostname_prefix = "hostname_prefix_value" + exadb_vm_cluster.properties.ssh_public_keys = [ + "ssh_public_keys_value1", + "ssh_public_keys_value2", + ] + exadb_vm_cluster.properties.shape_attribute = "BLOCK_STORAGE" + exadb_vm_cluster.odb_subnet = "odb_subnet_value" + exadb_vm_cluster.backup_odb_subnet = "backup_odb_subnet_value" + exadb_vm_cluster.display_name = "display_name_value" + + request = oracledatabase_v1.CreateExadbVmClusterRequest( + parent="parent_value", + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + exadb_vm_cluster=exadb_vm_cluster, + ) + + # Make the request + operation = client.create_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateExadbVmCluster_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_async.py new file mode 100644 index 000000000000..1f4ed05e960a --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_async.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExascaleDbStorageVault +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateExascaleDbStorageVault_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_create_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + exascale_db_storage_vault = oracledatabase_v1.ExascaleDbStorageVault() + exascale_db_storage_vault.display_name = "display_name_value" + exascale_db_storage_vault.properties.exascale_db_storage_details.total_size_gbs = ( + 1497 + ) + + request = oracledatabase_v1.CreateExascaleDbStorageVaultRequest( + parent="parent_value", + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", + exascale_db_storage_vault=exascale_db_storage_vault, + ) + + # Make the request + operation = client.create_exascale_db_storage_vault(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateExascaleDbStorageVault_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_sync.py new file mode 100644 index 000000000000..6f40ee1d1245 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_sync.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExascaleDbStorageVault +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateExascaleDbStorageVault_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_create_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + exascale_db_storage_vault = oracledatabase_v1.ExascaleDbStorageVault() + exascale_db_storage_vault.display_name = "display_name_value" + exascale_db_storage_vault.properties.exascale_db_storage_details.total_size_gbs = ( + 1497 + ) + + request = oracledatabase_v1.CreateExascaleDbStorageVaultRequest( + parent="parent_value", + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", + exascale_db_storage_vault=exascale_db_storage_vault, + ) + + # Make the request + operation = client.create_exascale_db_storage_vault(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateExascaleDbStorageVault_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_network_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_network_async.py new file mode 100644 index 000000000000..40cc4b7f9df9 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_network_async.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateOdbNetwork +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateOdbNetwork_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_create_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + odb_network = oracledatabase_v1.OdbNetwork() + odb_network.network = "network_value" + + request = oracledatabase_v1.CreateOdbNetworkRequest( + parent="parent_value", + odb_network_id="odb_network_id_value", + odb_network=odb_network, + ) + + # Make the request + operation = client.create_odb_network(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateOdbNetwork_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_network_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_network_sync.py new file mode 100644 index 000000000000..d053bc1502ea --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_network_sync.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateOdbNetwork +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateOdbNetwork_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_create_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + odb_network = oracledatabase_v1.OdbNetwork() + odb_network.network = "network_value" + + request = oracledatabase_v1.CreateOdbNetworkRequest( + parent="parent_value", + odb_network_id="odb_network_id_value", + odb_network=odb_network, + ) + + # Make the request + operation = client.create_odb_network(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateOdbNetwork_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_subnet_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_subnet_async.py new file mode 100644 index 000000000000..0b9a7de2c1e2 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_subnet_async.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateOdbSubnet +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateOdbSubnet_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_create_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + odb_subnet = oracledatabase_v1.OdbSubnet() + odb_subnet.cidr_range = "cidr_range_value" + odb_subnet.purpose = "BACKUP_SUBNET" + + request = oracledatabase_v1.CreateOdbSubnetRequest( + parent="parent_value", + odb_subnet_id="odb_subnet_id_value", + odb_subnet=odb_subnet, + ) + + # Make the request + operation = client.create_odb_subnet(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateOdbSubnet_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_subnet_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_subnet_sync.py new file mode 100644 index 000000000000..cbe818b85623 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_create_odb_subnet_sync.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateOdbSubnet +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_CreateOdbSubnet_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_create_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + odb_subnet = oracledatabase_v1.OdbSubnet() + odb_subnet.cidr_range = "cidr_range_value" + odb_subnet.purpose = "BACKUP_SUBNET" + + request = oracledatabase_v1.CreateOdbSubnetRequest( + parent="parent_value", + odb_subnet_id="odb_subnet_id_value", + odb_subnet=odb_subnet, + ) + + # Make the request + operation = client.create_odb_subnet(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_CreateOdbSubnet_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_autonomous_database_async.py index f636f113915d..2350ab5e74b8 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_autonomous_database_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_autonomous_database_async.py @@ -53,4 +53,5 @@ async def sample_delete_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_DeleteAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_autonomous_database_sync.py index eedb1e6a10f1..212da1b438d3 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_autonomous_database_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_autonomous_database_sync.py @@ -53,4 +53,5 @@ def sample_delete_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_DeleteAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_async.py index ca91fe0046eb..a2d169a0ec40 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_async.py @@ -53,4 +53,5 @@ async def sample_delete_cloud_exadata_infrastructure(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_DeleteCloudExadataInfrastructure_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_sync.py index daded45420f9..bb6a43005db4 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_sync.py @@ -53,4 +53,5 @@ def sample_delete_cloud_exadata_infrastructure(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_DeleteCloudExadataInfrastructure_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_async.py index 0ec58edc5fb7..137d43852760 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_async.py @@ -53,4 +53,5 @@ async def sample_delete_cloud_vm_cluster(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_DeleteCloudVmCluster_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_sync.py index 47a7d51c8fec..73b8f47de203 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_sync.py @@ -53,4 +53,5 @@ def sample_delete_cloud_vm_cluster(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_DeleteCloudVmCluster_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_db_system_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_db_system_async.py new file mode 100644 index 000000000000..9e2fbadb2ed1 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_db_system_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDbSystem +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteDbSystem_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_delete_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteDbSystemRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_db_system(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteDbSystem_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_db_system_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_db_system_sync.py new file mode 100644 index 000000000000..76d7b3cad880 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_db_system_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDbSystem +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteDbSystem_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_delete_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteDbSystemRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_db_system(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteDbSystem_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_async.py new file mode 100644 index 000000000000..dc0f485e4ab6 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteExadbVmCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_delete_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteExadbVmClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteExadbVmCluster_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_sync.py new file mode 100644 index 000000000000..678a9bba577a --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteExadbVmCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_delete_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteExadbVmClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteExadbVmCluster_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_async.py new file mode 100644 index 000000000000..67e140b31490 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExascaleDbStorageVault +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteExascaleDbStorageVault_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_delete_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteExascaleDbStorageVaultRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_exascale_db_storage_vault(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteExascaleDbStorageVault_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_sync.py new file mode 100644 index 000000000000..278f38055f3d --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExascaleDbStorageVault +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteExascaleDbStorageVault_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_delete_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteExascaleDbStorageVaultRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_exascale_db_storage_vault(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteExascaleDbStorageVault_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_network_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_network_async.py new file mode 100644 index 000000000000..52ef6b2bd971 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_network_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteOdbNetwork +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteOdbNetwork_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_delete_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteOdbNetworkRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_odb_network(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteOdbNetwork_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_network_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_network_sync.py new file mode 100644 index 000000000000..e57118025361 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_network_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteOdbNetwork +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteOdbNetwork_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_delete_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteOdbNetworkRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_odb_network(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteOdbNetwork_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_subnet_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_subnet_async.py new file mode 100644 index 000000000000..9050199af8ed --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_subnet_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteOdbSubnet +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteOdbSubnet_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_delete_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteOdbSubnetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_odb_subnet(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteOdbSubnet_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_subnet_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_subnet_sync.py new file mode 100644 index 000000000000..d70da2f31be9 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_delete_odb_subnet_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteOdbSubnet +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_DeleteOdbSubnet_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_delete_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.DeleteOdbSubnetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_odb_subnet(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_DeleteOdbSubnet_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_failover_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_failover_autonomous_database_async.py new file mode 100644 index 000000000000..2612d51d2ec8 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_failover_autonomous_database_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for FailoverAutonomousDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_FailoverAutonomousDatabase_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_failover_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.FailoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Make the request + operation = client.failover_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_FailoverAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_failover_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_failover_autonomous_database_sync.py new file mode 100644 index 000000000000..98467c6d37d6 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_failover_autonomous_database_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for FailoverAutonomousDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_FailoverAutonomousDatabase_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_failover_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.FailoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Make the request + operation = client.failover_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_FailoverAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_async.py index f4120526383f..5b610ad51c31 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_async.py @@ -50,4 +50,5 @@ async def sample_generate_autonomous_database_wallet(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_GenerateAutonomousDatabaseWallet_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_sync.py index 7c88b17a8214..f123f8998d43 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_sync.py @@ -50,4 +50,5 @@ def sample_generate_autonomous_database_wallet(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_GenerateAutonomousDatabaseWallet_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_autonomous_database_async.py index 6b5c488b6b60..44f0b9925c0b 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_autonomous_database_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_autonomous_database_async.py @@ -49,4 +49,5 @@ async def sample_get_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_GetAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_autonomous_database_sync.py index 18abc23f1118..100b3b5f1ae7 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_autonomous_database_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_autonomous_database_sync.py @@ -49,4 +49,5 @@ def sample_get_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_GetAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_async.py index d972209a7d50..d4d5bdbbb0e2 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_async.py @@ -49,4 +49,5 @@ async def sample_get_cloud_exadata_infrastructure(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_GetCloudExadataInfrastructure_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_sync.py index 2e30a54d09f1..aa458162d393 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_sync.py @@ -49,4 +49,5 @@ def sample_get_cloud_exadata_infrastructure(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_GetCloudExadataInfrastructure_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_async.py index 68888157ccf3..39fa476e5724 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_async.py @@ -49,4 +49,5 @@ async def sample_get_cloud_vm_cluster(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_GetCloudVmCluster_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_sync.py index 0d2b9201855d..26aa1a3d0c2c 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_sync.py @@ -49,4 +49,5 @@ def sample_get_cloud_vm_cluster(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_GetCloudVmCluster_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_database_async.py new file mode 100644 index 000000000000..82bd9c99db2b --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_database_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetDatabase_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_get_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetDatabaseRequest( + name="name_value", + ) + + # Make the request + response = await client.get_database(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_database_sync.py new file mode 100644 index 000000000000..e6cd85c590e0 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_database_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetDatabase_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_get_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetDatabaseRequest( + name="name_value", + ) + + # Make the request + response = client.get_database(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_db_system_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_db_system_async.py new file mode 100644 index 000000000000..c9f0506de11e --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_db_system_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDbSystem +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetDbSystem_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_get_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetDbSystemRequest( + name="name_value", + ) + + # Make the request + response = await client.get_db_system(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetDbSystem_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_db_system_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_db_system_sync.py new file mode 100644 index 000000000000..60464563f930 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_db_system_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDbSystem +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetDbSystem_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_get_db_system(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetDbSystemRequest( + name="name_value", + ) + + # Make the request + response = client.get_db_system(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetDbSystem_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_async.py new file mode 100644 index 000000000000..507aa85dfb87 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetExadbVmCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_get_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetExadbVmClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_exadb_vm_cluster(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetExadbVmCluster_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_sync.py new file mode 100644 index 000000000000..305249d18814 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetExadbVmCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_get_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetExadbVmClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_exadb_vm_cluster(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetExadbVmCluster_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_async.py new file mode 100644 index 000000000000..07b704e919a2 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExascaleDbStorageVault +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetExascaleDbStorageVault_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_get_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetExascaleDbStorageVaultRequest( + name="name_value", + ) + + # Make the request + response = await client.get_exascale_db_storage_vault(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetExascaleDbStorageVault_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_sync.py new file mode 100644 index 000000000000..cde797bbb216 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExascaleDbStorageVault +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetExascaleDbStorageVault_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_get_exascale_db_storage_vault(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetExascaleDbStorageVaultRequest( + name="name_value", + ) + + # Make the request + response = client.get_exascale_db_storage_vault(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetExascaleDbStorageVault_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_network_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_network_async.py new file mode 100644 index 000000000000..8202b1375028 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_network_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetOdbNetwork +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetOdbNetwork_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_get_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetOdbNetworkRequest( + name="name_value", + ) + + # Make the request + response = await client.get_odb_network(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetOdbNetwork_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_network_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_network_sync.py new file mode 100644 index 000000000000..c84f972c8e2f --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_network_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetOdbNetwork +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetOdbNetwork_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_get_odb_network(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetOdbNetworkRequest( + name="name_value", + ) + + # Make the request + response = client.get_odb_network(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetOdbNetwork_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_subnet_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_subnet_async.py new file mode 100644 index 000000000000..5bae868791ad --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_subnet_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetOdbSubnet +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetOdbSubnet_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_get_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetOdbSubnetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_odb_subnet(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetOdbSubnet_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_subnet_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_subnet_sync.py new file mode 100644 index 000000000000..42c0c8327167 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_odb_subnet_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetOdbSubnet +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetOdbSubnet_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_get_odb_subnet(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetOdbSubnetRequest( + name="name_value", + ) + + # Make the request + response = client.get_odb_subnet(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetOdbSubnet_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_pluggable_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_pluggable_database_async.py new file mode 100644 index 000000000000..a9d0042ad8ff --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_pluggable_database_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPluggableDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetPluggableDatabase_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_get_pluggable_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetPluggableDatabaseRequest( + name="name_value", + ) + + # Make the request + response = await client.get_pluggable_database(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetPluggableDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_pluggable_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_pluggable_database_sync.py new file mode 100644 index 000000000000..47c573027f32 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_get_pluggable_database_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPluggableDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_GetPluggableDatabase_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_get_pluggable_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.GetPluggableDatabaseRequest( + name="name_value", + ) + + # Make the request + response = client.get_pluggable_database(request=request) + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_GetPluggableDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_async.py index 1ccc1f0fcf86..3c7318167bdd 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_async.py @@ -50,4 +50,5 @@ async def sample_list_autonomous_database_backups(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseBackups_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_sync.py index ef540ae11162..4cf84bea9ea0 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_sync.py @@ -50,4 +50,5 @@ def sample_list_autonomous_database_backups(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseBackups_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_async.py index 41213f7551c6..657c6eeb7b46 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_async.py @@ -50,4 +50,5 @@ async def sample_list_autonomous_database_character_sets(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseCharacterSets_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_sync.py index 133f9eb01abe..192d99a80e1e 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_sync.py @@ -50,4 +50,5 @@ def sample_list_autonomous_database_character_sets(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseCharacterSets_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_databases_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_databases_async.py index 0e0d63db6009..64419a42dbb9 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_databases_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_databases_async.py @@ -50,4 +50,5 @@ async def sample_list_autonomous_databases(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabases_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_databases_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_databases_sync.py index c1fad90cdd03..ab3ac6de1b7c 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_databases_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_databases_sync.py @@ -50,4 +50,5 @@ def sample_list_autonomous_databases(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabases_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_async.py index 81c1301a31d8..0d07b17435f1 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_async.py @@ -50,4 +50,5 @@ async def sample_list_autonomous_db_versions(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListAutonomousDbVersions_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_sync.py index 382677fc0bd8..6ce1bc8d3af4 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_sync.py @@ -50,4 +50,5 @@ def sample_list_autonomous_db_versions(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListAutonomousDbVersions_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_async.py index 7cbabe0ff5a3..0667e4d0467d 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_async.py @@ -50,4 +50,5 @@ async def sample_list_cloud_exadata_infrastructures(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListCloudExadataInfrastructures_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_sync.py index 84f44afa4ce5..460adc0d3aa7 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_sync.py @@ -50,4 +50,5 @@ def sample_list_cloud_exadata_infrastructures(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListCloudExadataInfrastructures_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_async.py index a3bd3e52a0fe..fba79b3b841f 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_async.py @@ -50,4 +50,5 @@ async def sample_list_cloud_vm_clusters(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListCloudVmClusters_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_sync.py index a0aaa8441091..3aa98e33c126 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_sync.py @@ -50,4 +50,5 @@ def sample_list_cloud_vm_clusters(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListCloudVmClusters_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_database_character_sets_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_database_character_sets_async.py new file mode 100644 index 000000000000..23418a2a7163 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_database_character_sets_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatabaseCharacterSets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDatabaseCharacterSets_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_database_character_sets(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDatabaseCharacterSetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_database_character_sets(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDatabaseCharacterSets_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_database_character_sets_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_database_character_sets_sync.py new file mode 100644 index 000000000000..7ff2a67ffef0 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_database_character_sets_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatabaseCharacterSets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDatabaseCharacterSets_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_database_character_sets(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDatabaseCharacterSetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_database_character_sets(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDatabaseCharacterSets_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_databases_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_databases_async.py new file mode 100644 index 000000000000..e8666df13c54 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_databases_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatabases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDatabases_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_databases(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDatabasesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_databases(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDatabases_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_databases_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_databases_sync.py new file mode 100644 index 000000000000..fac3d678bda1 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_databases_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatabases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDatabases_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_databases(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDatabasesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_databases(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDatabases_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_nodes_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_nodes_async.py index f9daf05c76d7..1da0f71678ed 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_nodes_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_nodes_async.py @@ -50,4 +50,5 @@ async def sample_list_db_nodes(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListDbNodes_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_nodes_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_nodes_sync.py index 2a78b9ac1f35..2e6c42f7c12e 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_nodes_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_nodes_sync.py @@ -50,4 +50,5 @@ def sample_list_db_nodes(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListDbNodes_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_servers_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_servers_async.py index de055f13a037..5976106312ba 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_servers_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_servers_async.py @@ -50,4 +50,5 @@ async def sample_list_db_servers(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListDbServers_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_servers_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_servers_sync.py index a753ce3f9ac2..e3287a9740ce 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_servers_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_servers_sync.py @@ -50,4 +50,5 @@ def sample_list_db_servers(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListDbServers_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_async.py new file mode 100644 index 000000000000..843d4757193b --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDbSystemInitialStorageSizes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDbSystemInitialStorageSizes_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_db_system_initial_storage_sizes(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbSystemInitialStorageSizesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_system_initial_storage_sizes(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDbSystemInitialStorageSizes_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_sync.py new file mode 100644 index 000000000000..e9d9827b340e --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDbSystemInitialStorageSizes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDbSystemInitialStorageSizes_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_db_system_initial_storage_sizes(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbSystemInitialStorageSizesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_system_initial_storage_sizes(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDbSystemInitialStorageSizes_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_shapes_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_shapes_async.py index 70f6ce53bba0..831267a9ff7e 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_shapes_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_shapes_async.py @@ -50,4 +50,5 @@ async def sample_list_db_system_shapes(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListDbSystemShapes_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_shapes_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_shapes_sync.py index 83173a14a2c1..83c9d5a752c9 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_shapes_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_system_shapes_sync.py @@ -50,4 +50,5 @@ def sample_list_db_system_shapes(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListDbSystemShapes_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_systems_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_systems_async.py new file mode 100644 index 000000000000..66c576bfab98 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_systems_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDbSystems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDbSystems_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_db_systems(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbSystemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_systems(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDbSystems_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_systems_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_systems_sync.py new file mode 100644 index 000000000000..df0225c14dbf --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_systems_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDbSystems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDbSystems_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_db_systems(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbSystemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_systems(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDbSystems_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_versions_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_versions_async.py new file mode 100644 index 000000000000..b0ba88213709 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_versions_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDbVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDbVersions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_db_versions(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbVersionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_versions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDbVersions_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_versions_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_versions_sync.py new file mode 100644 index 000000000000..6d317e625171 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_db_versions_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDbVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListDbVersions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_db_versions(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListDbVersionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_db_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListDbVersions_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_entitlements_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_entitlements_async.py index 03142f26c0c8..5fff2ed83c35 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_entitlements_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_entitlements_async.py @@ -50,4 +50,5 @@ async def sample_list_entitlements(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListEntitlements_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_entitlements_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_entitlements_sync.py index c04123478b23..ce7c1eb1f57b 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_entitlements_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_entitlements_sync.py @@ -50,4 +50,5 @@ def sample_list_entitlements(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListEntitlements_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_async.py new file mode 100644 index 000000000000..7a457001cc25 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExadbVmClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListExadbVmClusters_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_exadb_vm_clusters(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListExadbVmClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_exadb_vm_clusters(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListExadbVmClusters_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_sync.py new file mode 100644 index 000000000000..17bf4adf2607 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExadbVmClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListExadbVmClusters_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_exadb_vm_clusters(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListExadbVmClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_exadb_vm_clusters(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListExadbVmClusters_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_async.py new file mode 100644 index 000000000000..5aa9b68e7b42 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExascaleDbStorageVaults +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListExascaleDbStorageVaults_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_exascale_db_storage_vaults(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListExascaleDbStorageVaultsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_exascale_db_storage_vaults(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListExascaleDbStorageVaults_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_sync.py new file mode 100644 index 000000000000..74f481fb1cc8 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExascaleDbStorageVaults +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListExascaleDbStorageVaults_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_exascale_db_storage_vaults(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListExascaleDbStorageVaultsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_exascale_db_storage_vaults(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListExascaleDbStorageVaults_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_gi_versions_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_gi_versions_async.py index cd520193fe72..c21ca1c14990 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_gi_versions_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_gi_versions_async.py @@ -50,4 +50,5 @@ async def sample_list_gi_versions(): async for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListGiVersions_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_gi_versions_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_gi_versions_sync.py index 956a85d3e5d0..55fbe3d249ce 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_gi_versions_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_gi_versions_sync.py @@ -50,4 +50,5 @@ def sample_list_gi_versions(): for response in page_result: print(response) + # [END oracledatabase_v1_generated_OracleDatabase_ListGiVersions_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_minor_versions_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_minor_versions_async.py new file mode 100644 index 000000000000..3ae09f49d00d --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_minor_versions_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMinorVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListMinorVersions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_minor_versions(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListMinorVersionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_minor_versions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListMinorVersions_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_minor_versions_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_minor_versions_sync.py new file mode 100644 index 000000000000..93ec32c54869 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_minor_versions_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMinorVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListMinorVersions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_minor_versions(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListMinorVersionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_minor_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListMinorVersions_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_networks_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_networks_async.py new file mode 100644 index 000000000000..5908bc73e7da --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_networks_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOdbNetworks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListOdbNetworks_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_odb_networks(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListOdbNetworksRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_odb_networks(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListOdbNetworks_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_networks_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_networks_sync.py new file mode 100644 index 000000000000..bfd9b9badce8 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_networks_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOdbNetworks +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListOdbNetworks_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_odb_networks(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListOdbNetworksRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_odb_networks(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListOdbNetworks_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_subnets_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_subnets_async.py new file mode 100644 index 000000000000..4c7cba8c9a26 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_subnets_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOdbSubnets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListOdbSubnets_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_odb_subnets(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListOdbSubnetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_odb_subnets(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListOdbSubnets_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_subnets_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_subnets_sync.py new file mode 100644 index 000000000000..60b42ec741f2 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_odb_subnets_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOdbSubnets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListOdbSubnets_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_odb_subnets(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListOdbSubnetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_odb_subnets(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListOdbSubnets_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_pluggable_databases_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_pluggable_databases_async.py new file mode 100644 index 000000000000..4a03e921e388 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_pluggable_databases_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPluggableDatabases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListPluggableDatabases_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_list_pluggable_databases(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListPluggableDatabasesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pluggable_databases(request=request) + + # Handle the response + async for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListPluggableDatabases_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_pluggable_databases_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_pluggable_databases_sync.py new file mode 100644 index 000000000000..2f000cab53bb --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_list_pluggable_databases_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPluggableDatabases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_ListPluggableDatabases_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_list_pluggable_databases(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.ListPluggableDatabasesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pluggable_databases(request=request) + + # Handle the response + for response in page_result: + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_ListPluggableDatabases_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_async.py new file mode 100644 index 000000000000..b0a29835fd86 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RemoveVirtualMachineExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_RemoveVirtualMachineExadbVmCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_remove_virtual_machine_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.RemoveVirtualMachineExadbVmClusterRequest( + name="name_value", + hostnames=["hostnames_value1", "hostnames_value2"], + ) + + # Make the request + operation = client.remove_virtual_machine_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_RemoveVirtualMachineExadbVmCluster_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_sync.py new file mode 100644 index 000000000000..cb7500b6cd12 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RemoveVirtualMachineExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_RemoveVirtualMachineExadbVmCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_remove_virtual_machine_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.RemoveVirtualMachineExadbVmClusterRequest( + name="name_value", + hostnames=["hostnames_value1", "hostnames_value2"], + ) + + # Make the request + operation = client.remove_virtual_machine_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_RemoveVirtualMachineExadbVmCluster_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restart_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restart_autonomous_database_async.py index 1ccd1338c07b..3406ef54f2b4 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restart_autonomous_database_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restart_autonomous_database_async.py @@ -53,4 +53,5 @@ async def sample_restart_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_RestartAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restart_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restart_autonomous_database_sync.py index c370b139d37e..ef0956ddca4b 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restart_autonomous_database_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restart_autonomous_database_sync.py @@ -53,4 +53,5 @@ def sample_restart_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_RestartAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restore_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restore_autonomous_database_async.py index c7a0ebeea56f..fe083c7d5e57 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restore_autonomous_database_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restore_autonomous_database_async.py @@ -53,4 +53,5 @@ async def sample_restore_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_RestoreAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restore_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restore_autonomous_database_sync.py index 220a7c9a0754..5ae94689445b 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restore_autonomous_database_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_restore_autonomous_database_sync.py @@ -53,4 +53,5 @@ def sample_restore_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_RestoreAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_start_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_start_autonomous_database_async.py index bd04c9642701..85e091f08c8a 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_start_autonomous_database_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_start_autonomous_database_async.py @@ -53,4 +53,5 @@ async def sample_start_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_StartAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_start_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_start_autonomous_database_sync.py index 53b9481edab0..d23a53fee17f 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_start_autonomous_database_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_start_autonomous_database_sync.py @@ -53,4 +53,5 @@ def sample_start_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_StartAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_stop_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_stop_autonomous_database_async.py index 60f29efc8fa9..4d0601fd5877 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_stop_autonomous_database_async.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_stop_autonomous_database_async.py @@ -53,4 +53,5 @@ async def sample_stop_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_StopAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_stop_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_stop_autonomous_database_sync.py index 80e3884f1c22..9cb8c126e3ba 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_stop_autonomous_database_sync.py +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_stop_autonomous_database_sync.py @@ -53,4 +53,5 @@ def sample_stop_autonomous_database(): # Handle the response print(response) + # [END oracledatabase_v1_generated_OracleDatabase_StopAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_async.py new file mode 100644 index 000000000000..2137e4a20ca6 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SwitchoverAutonomousDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_SwitchoverAutonomousDatabase_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_switchover_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.SwitchoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Make the request + operation = client.switchover_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_SwitchoverAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_sync.py new file mode 100644 index 000000000000..d123b6eff15f --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SwitchoverAutonomousDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_SwitchoverAutonomousDatabase_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_switchover_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.SwitchoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Make the request + operation = client.switchover_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_SwitchoverAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_autonomous_database_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_autonomous_database_async.py new file mode 100644 index 000000000000..16d758c5b858 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_autonomous_database_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAutonomousDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_UpdateAutonomousDatabase_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_update_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + request = oracledatabase_v1.UpdateAutonomousDatabaseRequest() + + # Make the request + operation = client.update_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_UpdateAutonomousDatabase_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_autonomous_database_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_autonomous_database_sync.py new file mode 100644 index 000000000000..7779146d9e00 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_autonomous_database_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAutonomousDatabase +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_UpdateAutonomousDatabase_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_update_autonomous_database(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + request = oracledatabase_v1.UpdateAutonomousDatabaseRequest() + + # Make the request + operation = client.update_autonomous_database(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_UpdateAutonomousDatabase_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_async.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_async.py new file mode 100644 index 000000000000..3022b1093870 --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_async.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_UpdateExadbVmCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +async def sample_update_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseAsyncClient() + + # Initialize request argument(s) + exadb_vm_cluster = oracledatabase_v1.ExadbVmCluster() + exadb_vm_cluster.properties.grid_image_id = "grid_image_id_value" + exadb_vm_cluster.properties.node_count = 1070 + exadb_vm_cluster.properties.enabled_ecpu_count_per_node = 2826 + exadb_vm_cluster.properties.vm_file_system_storage.size_in_gbs_per_node = 2103 + exadb_vm_cluster.properties.exascale_db_storage_vault = ( + "exascale_db_storage_vault_value" + ) + exadb_vm_cluster.properties.hostname_prefix = "hostname_prefix_value" + exadb_vm_cluster.properties.ssh_public_keys = [ + "ssh_public_keys_value1", + "ssh_public_keys_value2", + ] + exadb_vm_cluster.properties.shape_attribute = "BLOCK_STORAGE" + exadb_vm_cluster.odb_subnet = "odb_subnet_value" + exadb_vm_cluster.backup_odb_subnet = "backup_odb_subnet_value" + exadb_vm_cluster.display_name = "display_name_value" + + request = oracledatabase_v1.UpdateExadbVmClusterRequest( + exadb_vm_cluster=exadb_vm_cluster, + ) + + # Make the request + operation = client.update_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_UpdateExadbVmCluster_async] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_sync.py b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_sync.py new file mode 100644 index 000000000000..cf7dbdea64cd --- /dev/null +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_sync.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExadbVmCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-oracledatabase + + +# [START oracledatabase_v1_generated_OracleDatabase_UpdateExadbVmCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import oracledatabase_v1 + + +def sample_update_exadb_vm_cluster(): + # Create a client + client = oracledatabase_v1.OracleDatabaseClient() + + # Initialize request argument(s) + exadb_vm_cluster = oracledatabase_v1.ExadbVmCluster() + exadb_vm_cluster.properties.grid_image_id = "grid_image_id_value" + exadb_vm_cluster.properties.node_count = 1070 + exadb_vm_cluster.properties.enabled_ecpu_count_per_node = 2826 + exadb_vm_cluster.properties.vm_file_system_storage.size_in_gbs_per_node = 2103 + exadb_vm_cluster.properties.exascale_db_storage_vault = ( + "exascale_db_storage_vault_value" + ) + exadb_vm_cluster.properties.hostname_prefix = "hostname_prefix_value" + exadb_vm_cluster.properties.ssh_public_keys = [ + "ssh_public_keys_value1", + "ssh_public_keys_value2", + ] + exadb_vm_cluster.properties.shape_attribute = "BLOCK_STORAGE" + exadb_vm_cluster.odb_subnet = "odb_subnet_value" + exadb_vm_cluster.backup_odb_subnet = "backup_odb_subnet_value" + exadb_vm_cluster.display_name = "display_name_value" + + request = oracledatabase_v1.UpdateExadbVmClusterRequest( + exadb_vm_cluster=exadb_vm_cluster, + ) + + # Make the request + operation = client.update_exadb_vm_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + +# [END oracledatabase_v1_generated_OracleDatabase_UpdateExadbVmCluster_sync] diff --git a/packages/google-cloud-oracledatabase/samples/generated_samples/snippet_metadata_google.cloud.oracledatabase.v1.json b/packages/google-cloud-oracledatabase/samples/generated_samples/snippet_metadata_google.cloud.oracledatabase.v1.json index 513af7656067..967cc43bbdf6 100644 --- a/packages/google-cloud-oracledatabase/samples/generated_samples/snippet_metadata_google.cloud.oracledatabase.v1.json +++ b/packages/google-cloud-oracledatabase/samples/generated_samples/snippet_metadata_google.cloud.oracledatabase.v1.json @@ -422,12 +422,12 @@ "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateCloudVmCluster_async", "segments": [ { - "end": 63, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 63, + "end": 60, "start": 27, "type": "SHORT" }, @@ -437,18 +437,18 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 53, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 60, - "start": 54, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 64, - "start": 61, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], @@ -510,12 +510,12 @@ "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateCloudVmCluster_sync", "segments": [ { - "end": 63, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 63, + "end": 60, "start": 27, "type": "SHORT" }, @@ -525,18 +525,18 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 53, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 60, - "start": 54, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 64, - "start": 61, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], @@ -550,22 +550,30 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.create_db_system", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateDbSystem", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "DeleteAutonomousDatabase" + "shortName": "CreateDbSystem" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.DeleteAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateDbSystemRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "db_system", + "type": "google.cloud.oracledatabase_v1.types.DbSystem" + }, + { + "name": "db_system_id", "type": "str" }, { @@ -582,21 +590,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_autonomous_database" + "shortName": "create_db_system" }, - "description": "Sample for DeleteAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_delete_autonomous_database_async.py", + "description": "Sample for CreateDbSystem", + "file": "oracledatabase_v1_generated_oracle_database_create_db_system_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteAutonomousDatabase_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateDbSystem_async", "segments": [ { - "end": 55, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 61, "start": 27, "type": "SHORT" }, @@ -606,22 +614,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_delete_autonomous_database_async.py" + "title": "oracledatabase_v1_generated_oracle_database_create_db_system_async.py" }, { "canonical": true, @@ -630,22 +638,30 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.create_db_system", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateDbSystem", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "DeleteAutonomousDatabase" + "shortName": "CreateDbSystem" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.DeleteAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateDbSystemRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "db_system", + "type": "google.cloud.oracledatabase_v1.types.DbSystem" + }, + { + "name": "db_system_id", "type": "str" }, { @@ -662,21 +678,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_autonomous_database" + "shortName": "create_db_system" }, - "description": "Sample for DeleteAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_delete_autonomous_database_sync.py", + "description": "Sample for CreateDbSystem", + "file": "oracledatabase_v1_generated_oracle_database_create_db_system_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteAutonomousDatabase_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateDbSystem_sync", "segments": [ { - "end": 55, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 61, "start": 27, "type": "SHORT" }, @@ -686,22 +702,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_delete_autonomous_database_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_create_db_system_sync.py" }, { "canonical": true, @@ -711,22 +727,30 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_cloud_exadata_infrastructure", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.create_exadb_vm_cluster", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteCloudExadataInfrastructure", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateExadbVmCluster", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "DeleteCloudExadataInfrastructure" + "shortName": "CreateExadbVmCluster" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.DeleteCloudExadataInfrastructureRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateExadbVmClusterRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "exadb_vm_cluster", + "type": "google.cloud.oracledatabase_v1.types.ExadbVmCluster" + }, + { + "name": "exadb_vm_cluster_id", "type": "str" }, { @@ -743,21 +767,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_cloud_exadata_infrastructure" + "shortName": "create_exadb_vm_cluster" }, - "description": "Sample for DeleteCloudExadataInfrastructure", - "file": "oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_async.py", + "description": "Sample for CreateExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteCloudExadataInfrastructure_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateExadbVmCluster_async", "segments": [ { - "end": 55, + "end": 70, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 70, "start": 27, "type": "SHORT" }, @@ -767,22 +791,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 60, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 67, + "start": 61, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 71, + "start": 68, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_async.py" + "title": "oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_async.py" }, { "canonical": true, @@ -791,22 +815,30 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_cloud_exadata_infrastructure", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.create_exadb_vm_cluster", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteCloudExadataInfrastructure", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateExadbVmCluster", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "DeleteCloudExadataInfrastructure" + "shortName": "CreateExadbVmCluster" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.DeleteCloudExadataInfrastructureRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateExadbVmClusterRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "exadb_vm_cluster", + "type": "google.cloud.oracledatabase_v1.types.ExadbVmCluster" + }, + { + "name": "exadb_vm_cluster_id", "type": "str" }, { @@ -823,21 +855,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_cloud_exadata_infrastructure" + "shortName": "create_exadb_vm_cluster" }, - "description": "Sample for DeleteCloudExadataInfrastructure", - "file": "oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_sync.py", + "description": "Sample for CreateExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteCloudExadataInfrastructure_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateExadbVmCluster_sync", "segments": [ { - "end": 55, + "end": 70, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 70, "start": 27, "type": "SHORT" }, @@ -847,22 +879,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 60, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 67, + "start": 61, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 71, + "start": 68, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_create_exadb_vm_cluster_sync.py" }, { "canonical": true, @@ -872,22 +904,30 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_cloud_vm_cluster", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.create_exascale_db_storage_vault", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteCloudVmCluster", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateExascaleDbStorageVault", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "DeleteCloudVmCluster" + "shortName": "CreateExascaleDbStorageVault" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.DeleteCloudVmClusterRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateExascaleDbStorageVaultRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "exascale_db_storage_vault", + "type": "google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault" + }, + { + "name": "exascale_db_storage_vault_id", "type": "str" }, { @@ -904,21 +944,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_cloud_vm_cluster" + "shortName": "create_exascale_db_storage_vault" }, - "description": "Sample for DeleteCloudVmCluster", - "file": "oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_async.py", + "description": "Sample for CreateExascaleDbStorageVault", + "file": "oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteCloudVmCluster_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateExascaleDbStorageVault_async", "segments": [ { - "end": 55, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 61, "start": 27, "type": "SHORT" }, @@ -928,22 +968,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_async.py" + "title": "oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_async.py" }, { "canonical": true, @@ -952,22 +992,30 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_cloud_vm_cluster", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.create_exascale_db_storage_vault", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteCloudVmCluster", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateExascaleDbStorageVault", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "DeleteCloudVmCluster" + "shortName": "CreateExascaleDbStorageVault" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.DeleteCloudVmClusterRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateExascaleDbStorageVaultRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "exascale_db_storage_vault", + "type": "google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault" + }, + { + "name": "exascale_db_storage_vault_id", "type": "str" }, { @@ -984,21 +1032,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_cloud_vm_cluster" + "shortName": "create_exascale_db_storage_vault" }, - "description": "Sample for DeleteCloudVmCluster", - "file": "oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_sync.py", + "description": "Sample for CreateExascaleDbStorageVault", + "file": "oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteCloudVmCluster_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateExascaleDbStorageVault_sync", "segments": [ { - "end": 55, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 61, "start": 27, "type": "SHORT" }, @@ -1008,22 +1056,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_create_exascale_db_storage_vault_sync.py" }, { "canonical": true, @@ -1033,34 +1081,30 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.generate_autonomous_database_wallet", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.create_odb_network", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GenerateAutonomousDatabaseWallet", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateOdbNetwork", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "GenerateAutonomousDatabaseWallet" + "shortName": "CreateOdbNetwork" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.GenerateAutonomousDatabaseWalletRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateOdbNetworkRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { - "name": "type_", - "type": "google.cloud.oracledatabase_v1.types.GenerateType" - }, - { - "name": "is_regional", - "type": "bool" + "name": "odb_network", + "type": "google.cloud.oracledatabase_v1.types.OdbNetwork" }, { - "name": "password", + "name": "odb_network_id", "type": "str" }, { @@ -1076,22 +1120,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.types.GenerateAutonomousDatabaseWalletResponse", - "shortName": "generate_autonomous_database_wallet" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_odb_network" }, - "description": "Sample for GenerateAutonomousDatabaseWallet", - "file": "oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_async.py", + "description": "Sample for CreateOdbNetwork", + "file": "oracledatabase_v1_generated_oracle_database_create_odb_network_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_GenerateAutonomousDatabaseWallet_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateOdbNetwork_async", "segments": [ { - "end": 52, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 60, "start": 27, "type": "SHORT" }, @@ -1101,22 +1145,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_async.py" + "title": "oracledatabase_v1_generated_oracle_database_create_odb_network_async.py" }, { "canonical": true, @@ -1125,34 +1169,30 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.generate_autonomous_database_wallet", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.create_odb_network", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GenerateAutonomousDatabaseWallet", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateOdbNetwork", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "GenerateAutonomousDatabaseWallet" + "shortName": "CreateOdbNetwork" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.GenerateAutonomousDatabaseWalletRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateOdbNetworkRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { - "name": "type_", - "type": "google.cloud.oracledatabase_v1.types.GenerateType" - }, - { - "name": "is_regional", - "type": "bool" + "name": "odb_network", + "type": "google.cloud.oracledatabase_v1.types.OdbNetwork" }, { - "name": "password", + "name": "odb_network_id", "type": "str" }, { @@ -1168,22 +1208,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.types.GenerateAutonomousDatabaseWalletResponse", - "shortName": "generate_autonomous_database_wallet" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_odb_network" }, - "description": "Sample for GenerateAutonomousDatabaseWallet", - "file": "oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_sync.py", + "description": "Sample for CreateOdbNetwork", + "file": "oracledatabase_v1_generated_oracle_database_create_odb_network_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_GenerateAutonomousDatabaseWallet_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateOdbNetwork_sync", "segments": [ { - "end": 52, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 60, "start": 27, "type": "SHORT" }, @@ -1193,22 +1233,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_create_odb_network_sync.py" }, { "canonical": true, @@ -1218,22 +1258,30 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.create_odb_subnet", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateOdbSubnet", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "GetAutonomousDatabase" + "shortName": "CreateOdbSubnet" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.GetAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateOdbSubnetRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "odb_subnet", + "type": "google.cloud.oracledatabase_v1.types.OdbSubnet" + }, + { + "name": "odb_subnet_id", "type": "str" }, { @@ -1249,22 +1297,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.types.AutonomousDatabase", - "shortName": "get_autonomous_database" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_odb_subnet" }, - "description": "Sample for GetAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_get_autonomous_database_async.py", + "description": "Sample for CreateOdbSubnet", + "file": "oracledatabase_v1_generated_oracle_database_create_odb_subnet_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetAutonomousDatabase_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateOdbSubnet_async", "segments": [ { - "end": 51, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 61, "start": 27, "type": "SHORT" }, @@ -1274,22 +1322,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_get_autonomous_database_async.py" + "title": "oracledatabase_v1_generated_oracle_database_create_odb_subnet_async.py" }, { "canonical": true, @@ -1298,53 +1346,5326 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.create_odb_subnet", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.CreateOdbSubnet", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "GetAutonomousDatabase" + "shortName": "CreateOdbSubnet" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.GetAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.CreateOdbSubnetRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { - "name": "retry", - "type": "google.api_core.retry.Retry" + "name": "odb_subnet", + "type": "google.cloud.oracledatabase_v1.types.OdbSubnet" }, { - "name": "timeout", - "type": "float" + "name": "odb_subnet_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_odb_subnet" + }, + "description": "Sample for CreateOdbSubnet", + "file": "oracledatabase_v1_generated_oracle_database_create_odb_subnet_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_CreateOdbSubnet_sync", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_create_odb_subnet_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_autonomous_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteAutonomousDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteAutonomousDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteAutonomousDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_autonomous_database" + }, + "description": "Sample for DeleteAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_delete_autonomous_database_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteAutonomousDatabase_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_autonomous_database_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_autonomous_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteAutonomousDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteAutonomousDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteAutonomousDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_autonomous_database" + }, + "description": "Sample for DeleteAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_delete_autonomous_database_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteAutonomousDatabase_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_autonomous_database_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_cloud_exadata_infrastructure", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteCloudExadataInfrastructure", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteCloudExadataInfrastructure" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteCloudExadataInfrastructureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_cloud_exadata_infrastructure" + }, + "description": "Sample for DeleteCloudExadataInfrastructure", + "file": "oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteCloudExadataInfrastructure_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_cloud_exadata_infrastructure", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteCloudExadataInfrastructure", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteCloudExadataInfrastructure" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteCloudExadataInfrastructureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_cloud_exadata_infrastructure" + }, + "description": "Sample for DeleteCloudExadataInfrastructure", + "file": "oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteCloudExadataInfrastructure_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_cloud_exadata_infrastructure_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_cloud_vm_cluster", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteCloudVmCluster", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteCloudVmCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteCloudVmClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_cloud_vm_cluster" + }, + "description": "Sample for DeleteCloudVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteCloudVmCluster_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_cloud_vm_cluster", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteCloudVmCluster", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteCloudVmCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteCloudVmClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_cloud_vm_cluster" + }, + "description": "Sample for DeleteCloudVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteCloudVmCluster_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_cloud_vm_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_db_system", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteDbSystem", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteDbSystem" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteDbSystemRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_db_system" + }, + "description": "Sample for DeleteDbSystem", + "file": "oracledatabase_v1_generated_oracle_database_delete_db_system_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteDbSystem_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_db_system_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_db_system", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteDbSystem", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteDbSystem" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteDbSystemRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_db_system" + }, + "description": "Sample for DeleteDbSystem", + "file": "oracledatabase_v1_generated_oracle_database_delete_db_system_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteDbSystem_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_db_system_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_exadb_vm_cluster", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteExadbVmCluster", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteExadbVmCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteExadbVmClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_exadb_vm_cluster" + }, + "description": "Sample for DeleteExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteExadbVmCluster_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_exadb_vm_cluster", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteExadbVmCluster", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteExadbVmCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteExadbVmClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_exadb_vm_cluster" + }, + "description": "Sample for DeleteExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteExadbVmCluster_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_exadb_vm_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_exascale_db_storage_vault", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteExascaleDbStorageVault", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteExascaleDbStorageVault" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteExascaleDbStorageVaultRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_exascale_db_storage_vault" + }, + "description": "Sample for DeleteExascaleDbStorageVault", + "file": "oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteExascaleDbStorageVault_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_exascale_db_storage_vault", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteExascaleDbStorageVault", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteExascaleDbStorageVault" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteExascaleDbStorageVaultRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_exascale_db_storage_vault" + }, + "description": "Sample for DeleteExascaleDbStorageVault", + "file": "oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteExascaleDbStorageVault_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_exascale_db_storage_vault_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_odb_network", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteOdbNetwork", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteOdbNetwork" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteOdbNetworkRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_odb_network" + }, + "description": "Sample for DeleteOdbNetwork", + "file": "oracledatabase_v1_generated_oracle_database_delete_odb_network_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteOdbNetwork_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_odb_network_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_odb_network", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteOdbNetwork", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteOdbNetwork" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteOdbNetworkRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_odb_network" + }, + "description": "Sample for DeleteOdbNetwork", + "file": "oracledatabase_v1_generated_oracle_database_delete_odb_network_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteOdbNetwork_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_odb_network_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.delete_odb_subnet", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteOdbSubnet", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteOdbSubnet" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteOdbSubnetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_odb_subnet" + }, + "description": "Sample for DeleteOdbSubnet", + "file": "oracledatabase_v1_generated_oracle_database_delete_odb_subnet_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteOdbSubnet_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_odb_subnet_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.delete_odb_subnet", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.DeleteOdbSubnet", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "DeleteOdbSubnet" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.DeleteOdbSubnetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_odb_subnet" + }, + "description": "Sample for DeleteOdbSubnet", + "file": "oracledatabase_v1_generated_oracle_database_delete_odb_subnet_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_DeleteOdbSubnet_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_delete_odb_subnet_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.failover_autonomous_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.FailoverAutonomousDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "FailoverAutonomousDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.FailoverAutonomousDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "peer_autonomous_database", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "failover_autonomous_database" + }, + "description": "Sample for FailoverAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_failover_autonomous_database_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_FailoverAutonomousDatabase_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_failover_autonomous_database_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.failover_autonomous_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.FailoverAutonomousDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "FailoverAutonomousDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.FailoverAutonomousDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "peer_autonomous_database", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "failover_autonomous_database" + }, + "description": "Sample for FailoverAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_failover_autonomous_database_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_FailoverAutonomousDatabase_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_failover_autonomous_database_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.generate_autonomous_database_wallet", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GenerateAutonomousDatabaseWallet", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GenerateAutonomousDatabaseWallet" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GenerateAutonomousDatabaseWalletRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "type_", + "type": "google.cloud.oracledatabase_v1.types.GenerateType" + }, + { + "name": "is_regional", + "type": "bool" + }, + { + "name": "password", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.GenerateAutonomousDatabaseWalletResponse", + "shortName": "generate_autonomous_database_wallet" + }, + "description": "Sample for GenerateAutonomousDatabaseWallet", + "file": "oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GenerateAutonomousDatabaseWallet_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.generate_autonomous_database_wallet", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GenerateAutonomousDatabaseWallet", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GenerateAutonomousDatabaseWallet" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GenerateAutonomousDatabaseWalletRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "type_", + "type": "google.cloud.oracledatabase_v1.types.GenerateType" + }, + { + "name": "is_regional", + "type": "bool" + }, + { + "name": "password", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.GenerateAutonomousDatabaseWalletResponse", + "shortName": "generate_autonomous_database_wallet" + }, + "description": "Sample for GenerateAutonomousDatabaseWallet", + "file": "oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GenerateAutonomousDatabaseWallet_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_generate_autonomous_database_wallet_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_autonomous_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetAutonomousDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetAutonomousDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetAutonomousDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.AutonomousDatabase", + "shortName": "get_autonomous_database" + }, + "description": "Sample for GetAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_get_autonomous_database_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetAutonomousDatabase_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_autonomous_database_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_autonomous_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetAutonomousDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetAutonomousDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetAutonomousDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.AutonomousDatabase", + "shortName": "get_autonomous_database" + }, + "description": "Sample for GetAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_get_autonomous_database_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetAutonomousDatabase_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_autonomous_database_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_cloud_exadata_infrastructure", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetCloudExadataInfrastructure", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetCloudExadataInfrastructure" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetCloudExadataInfrastructureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.CloudExadataInfrastructure", + "shortName": "get_cloud_exadata_infrastructure" + }, + "description": "Sample for GetCloudExadataInfrastructure", + "file": "oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetCloudExadataInfrastructure_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_cloud_exadata_infrastructure", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetCloudExadataInfrastructure", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetCloudExadataInfrastructure" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetCloudExadataInfrastructureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.CloudExadataInfrastructure", + "shortName": "get_cloud_exadata_infrastructure" + }, + "description": "Sample for GetCloudExadataInfrastructure", + "file": "oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetCloudExadataInfrastructure_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_cloud_vm_cluster", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetCloudVmCluster", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetCloudVmCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetCloudVmClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.CloudVmCluster", + "shortName": "get_cloud_vm_cluster" + }, + "description": "Sample for GetCloudVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetCloudVmCluster_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_cloud_vm_cluster", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetCloudVmCluster", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetCloudVmCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetCloudVmClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.CloudVmCluster", + "shortName": "get_cloud_vm_cluster" + }, + "description": "Sample for GetCloudVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetCloudVmCluster_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.Database", + "shortName": "get_database" + }, + "description": "Sample for GetDatabase", + "file": "oracledatabase_v1_generated_oracle_database_get_database_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetDatabase_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_database_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.Database", + "shortName": "get_database" + }, + "description": "Sample for GetDatabase", + "file": "oracledatabase_v1_generated_oracle_database_get_database_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetDatabase_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_database_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_db_system", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetDbSystem", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetDbSystem" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetDbSystemRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.DbSystem", + "shortName": "get_db_system" + }, + "description": "Sample for GetDbSystem", + "file": "oracledatabase_v1_generated_oracle_database_get_db_system_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetDbSystem_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_db_system_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_db_system", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetDbSystem", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetDbSystem" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetDbSystemRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.DbSystem", + "shortName": "get_db_system" + }, + "description": "Sample for GetDbSystem", + "file": "oracledatabase_v1_generated_oracle_database_get_db_system_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetDbSystem_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_db_system_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_exadb_vm_cluster", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetExadbVmCluster", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetExadbVmCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetExadbVmClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.ExadbVmCluster", + "shortName": "get_exadb_vm_cluster" + }, + "description": "Sample for GetExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetExadbVmCluster_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_exadb_vm_cluster", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetExadbVmCluster", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetExadbVmCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetExadbVmClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.ExadbVmCluster", + "shortName": "get_exadb_vm_cluster" + }, + "description": "Sample for GetExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetExadbVmCluster_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_exadb_vm_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_exascale_db_storage_vault", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetExascaleDbStorageVault", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetExascaleDbStorageVault" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetExascaleDbStorageVaultRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault", + "shortName": "get_exascale_db_storage_vault" + }, + "description": "Sample for GetExascaleDbStorageVault", + "file": "oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetExascaleDbStorageVault_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_exascale_db_storage_vault", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetExascaleDbStorageVault", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetExascaleDbStorageVault" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetExascaleDbStorageVaultRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.ExascaleDbStorageVault", + "shortName": "get_exascale_db_storage_vault" + }, + "description": "Sample for GetExascaleDbStorageVault", + "file": "oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetExascaleDbStorageVault_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_exascale_db_storage_vault_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_odb_network", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetOdbNetwork", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetOdbNetwork" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetOdbNetworkRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.OdbNetwork", + "shortName": "get_odb_network" + }, + "description": "Sample for GetOdbNetwork", + "file": "oracledatabase_v1_generated_oracle_database_get_odb_network_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetOdbNetwork_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_odb_network_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_odb_network", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetOdbNetwork", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetOdbNetwork" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetOdbNetworkRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.OdbNetwork", + "shortName": "get_odb_network" + }, + "description": "Sample for GetOdbNetwork", + "file": "oracledatabase_v1_generated_oracle_database_get_odb_network_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetOdbNetwork_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_odb_network_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_odb_subnet", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetOdbSubnet", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetOdbSubnet" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetOdbSubnetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.OdbSubnet", + "shortName": "get_odb_subnet" + }, + "description": "Sample for GetOdbSubnet", + "file": "oracledatabase_v1_generated_oracle_database_get_odb_subnet_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetOdbSubnet_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_odb_subnet_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_odb_subnet", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetOdbSubnet", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetOdbSubnet" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetOdbSubnetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.OdbSubnet", + "shortName": "get_odb_subnet" + }, + "description": "Sample for GetOdbSubnet", + "file": "oracledatabase_v1_generated_oracle_database_get_odb_subnet_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetOdbSubnet_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_odb_subnet_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_pluggable_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetPluggableDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetPluggableDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetPluggableDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.PluggableDatabase", + "shortName": "get_pluggable_database" + }, + "description": "Sample for GetPluggableDatabase", + "file": "oracledatabase_v1_generated_oracle_database_get_pluggable_database_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetPluggableDatabase_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_pluggable_database_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_pluggable_database", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetPluggableDatabase", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "GetPluggableDatabase" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.GetPluggableDatabaseRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.types.PluggableDatabase", + "shortName": "get_pluggable_database" + }, + "description": "Sample for GetPluggableDatabase", + "file": "oracledatabase_v1_generated_oracle_database_get_pluggable_database_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetPluggableDatabase_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_get_pluggable_database_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_autonomous_database_backups", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabaseBackups", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListAutonomousDatabaseBackups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabaseBackupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabaseBackupsAsyncPager", + "shortName": "list_autonomous_database_backups" + }, + "description": "Sample for ListAutonomousDatabaseBackups", + "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseBackups_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_database_backups", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabaseBackups", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListAutonomousDatabaseBackups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabaseBackupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabaseBackupsPager", + "shortName": "list_autonomous_database_backups" + }, + "description": "Sample for ListAutonomousDatabaseBackups", + "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseBackups_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_autonomous_database_character_sets", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabaseCharacterSets", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListAutonomousDatabaseCharacterSets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabaseCharacterSetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabaseCharacterSetsAsyncPager", + "shortName": "list_autonomous_database_character_sets" + }, + "description": "Sample for ListAutonomousDatabaseCharacterSets", + "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseCharacterSets_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_database_character_sets", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabaseCharacterSets", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListAutonomousDatabaseCharacterSets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabaseCharacterSetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabaseCharacterSetsPager", + "shortName": "list_autonomous_database_character_sets" + }, + "description": "Sample for ListAutonomousDatabaseCharacterSets", + "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseCharacterSets_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_autonomous_databases", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabases", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListAutonomousDatabases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabasesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabasesAsyncPager", + "shortName": "list_autonomous_databases" + }, + "description": "Sample for ListAutonomousDatabases", + "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_databases_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabases_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_databases_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_databases", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabases", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListAutonomousDatabases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabasesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabasesPager", + "shortName": "list_autonomous_databases" + }, + "description": "Sample for ListAutonomousDatabases", + "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_databases_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabases_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_databases_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_autonomous_db_versions", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDbVersions", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListAutonomousDbVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDbVersionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDbVersionsAsyncPager", + "shortName": "list_autonomous_db_versions" + }, + "description": "Sample for ListAutonomousDbVersions", + "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDbVersions_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_db_versions", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDbVersions", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListAutonomousDbVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDbVersionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDbVersionsPager", + "shortName": "list_autonomous_db_versions" + }, + "description": "Sample for ListAutonomousDbVersions", + "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDbVersions_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_cloud_exadata_infrastructures", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListCloudExadataInfrastructures", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListCloudExadataInfrastructures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListCloudExadataInfrastructuresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListCloudExadataInfrastructuresAsyncPager", + "shortName": "list_cloud_exadata_infrastructures" + }, + "description": "Sample for ListCloudExadataInfrastructures", + "file": "oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListCloudExadataInfrastructures_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_cloud_exadata_infrastructures", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListCloudExadataInfrastructures", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListCloudExadataInfrastructures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListCloudExadataInfrastructuresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListCloudExadataInfrastructuresPager", + "shortName": "list_cloud_exadata_infrastructures" + }, + "description": "Sample for ListCloudExadataInfrastructures", + "file": "oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListCloudExadataInfrastructures_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_cloud_vm_clusters", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListCloudVmClusters", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListCloudVmClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListCloudVmClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListCloudVmClustersAsyncPager", + "shortName": "list_cloud_vm_clusters" + }, + "description": "Sample for ListCloudVmClusters", + "file": "oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListCloudVmClusters_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_cloud_vm_clusters", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListCloudVmClusters", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListCloudVmClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListCloudVmClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListCloudVmClustersPager", + "shortName": "list_cloud_vm_clusters" + }, + "description": "Sample for ListCloudVmClusters", + "file": "oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListCloudVmClusters_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_database_character_sets", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDatabaseCharacterSets", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDatabaseCharacterSets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDatabaseCharacterSetsAsyncPager", + "shortName": "list_database_character_sets" + }, + "description": "Sample for ListDatabaseCharacterSets", + "file": "oracledatabase_v1_generated_oracle_database_list_database_character_sets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDatabaseCharacterSets_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_database_character_sets_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_database_character_sets", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDatabaseCharacterSets", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDatabaseCharacterSets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDatabaseCharacterSetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDatabaseCharacterSetsPager", + "shortName": "list_database_character_sets" + }, + "description": "Sample for ListDatabaseCharacterSets", + "file": "oracledatabase_v1_generated_oracle_database_list_database_character_sets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDatabaseCharacterSets_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_database_character_sets_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_databases", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDatabases", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDatabases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDatabasesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDatabasesAsyncPager", + "shortName": "list_databases" + }, + "description": "Sample for ListDatabases", + "file": "oracledatabase_v1_generated_oracle_database_list_databases_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDatabases_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_databases_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_databases", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDatabases", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDatabases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDatabasesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDatabasesPager", + "shortName": "list_databases" + }, + "description": "Sample for ListDatabases", + "file": "oracledatabase_v1_generated_oracle_database_list_databases_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDatabases_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_databases_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_db_nodes", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbNodes", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbNodes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbNodesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbNodesAsyncPager", + "shortName": "list_db_nodes" + }, + "description": "Sample for ListDbNodes", + "file": "oracledatabase_v1_generated_oracle_database_list_db_nodes_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbNodes_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_db_nodes_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_nodes", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbNodes", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbNodes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbNodesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbNodesPager", + "shortName": "list_db_nodes" + }, + "description": "Sample for ListDbNodes", + "file": "oracledatabase_v1_generated_oracle_database_list_db_nodes_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbNodes_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_db_nodes_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_db_servers", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbServers", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbServers" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbServersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbServersAsyncPager", + "shortName": "list_db_servers" + }, + "description": "Sample for ListDbServers", + "file": "oracledatabase_v1_generated_oracle_database_list_db_servers_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbServers_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_db_servers_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_servers", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbServers", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbServers" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbServersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbServersPager", + "shortName": "list_db_servers" + }, + "description": "Sample for ListDbServers", + "file": "oracledatabase_v1_generated_oracle_database_list_db_servers_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbServers_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_db_servers_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_db_system_initial_storage_sizes", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbSystemInitialStorageSizes", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbSystemInitialStorageSizes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemInitialStorageSizesAsyncPager", + "shortName": "list_db_system_initial_storage_sizes" + }, + "description": "Sample for ListDbSystemInitialStorageSizes", + "file": "oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbSystemInitialStorageSizes_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_system_initial_storage_sizes", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbSystemInitialStorageSizes", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbSystemInitialStorageSizes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbSystemInitialStorageSizesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemInitialStorageSizesPager", + "shortName": "list_db_system_initial_storage_sizes" + }, + "description": "Sample for ListDbSystemInitialStorageSizes", + "file": "oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbSystemInitialStorageSizes_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_db_system_initial_storage_sizes_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_db_system_shapes", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbSystemShapes", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbSystemShapes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbSystemShapesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemShapesAsyncPager", + "shortName": "list_db_system_shapes" + }, + "description": "Sample for ListDbSystemShapes", + "file": "oracledatabase_v1_generated_oracle_database_list_db_system_shapes_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbSystemShapes_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_db_system_shapes_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_system_shapes", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbSystemShapes", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbSystemShapes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbSystemShapesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemShapesPager", + "shortName": "list_db_system_shapes" + }, + "description": "Sample for ListDbSystemShapes", + "file": "oracledatabase_v1_generated_oracle_database_list_db_system_shapes_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbSystemShapes_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_db_system_shapes_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", + "shortName": "OracleDatabaseAsyncClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_db_systems", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbSystems", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbSystems" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbSystemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" }, { "name": "metadata", "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.types.AutonomousDatabase", - "shortName": "get_autonomous_database" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemsAsyncPager", + "shortName": "list_db_systems" }, - "description": "Sample for GetAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_get_autonomous_database_sync.py", + "description": "Sample for ListDbSystems", + "file": "oracledatabase_v1_generated_oracle_database_list_db_systems_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetAutonomousDatabase_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbSystems_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -1363,13 +6684,93 @@ "start": 46, "type": "REQUEST_EXECUTION" }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "oracledatabase_v1_generated_oracle_database_list_db_systems_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", + "shortName": "OracleDatabaseClient" + }, + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_systems", + "method": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbSystems", + "service": { + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", + "shortName": "OracleDatabase" + }, + "shortName": "ListDbSystems" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.oracledatabase_v1.types.ListDbSystemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemsPager", + "shortName": "list_db_systems" + }, + "description": "Sample for ListDbSystems", + "file": "oracledatabase_v1_generated_oracle_database_list_db_systems_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbSystems_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, { "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_get_autonomous_database_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_db_systems_sync.py" }, { "canonical": true, @@ -1379,22 +6780,22 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_cloud_exadata_infrastructure", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_db_versions", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetCloudExadataInfrastructure", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbVersions", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "GetCloudExadataInfrastructure" + "shortName": "ListDbVersions" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.GetCloudExadataInfrastructureRequest" + "type": "google.cloud.oracledatabase_v1.types.ListDbVersionsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -1410,22 +6811,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.types.CloudExadataInfrastructure", - "shortName": "get_cloud_exadata_infrastructure" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbVersionsAsyncPager", + "shortName": "list_db_versions" }, - "description": "Sample for GetCloudExadataInfrastructure", - "file": "oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_async.py", + "description": "Sample for ListDbVersions", + "file": "oracledatabase_v1_generated_oracle_database_list_db_versions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetCloudExadataInfrastructure_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbVersions_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -1445,12 +6846,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_async.py" + "title": "oracledatabase_v1_generated_oracle_database_list_db_versions_async.py" }, { "canonical": true, @@ -1459,22 +6860,22 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_cloud_exadata_infrastructure", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_versions", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetCloudExadataInfrastructure", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbVersions", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "GetCloudExadataInfrastructure" + "shortName": "ListDbVersions" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.GetCloudExadataInfrastructureRequest" + "type": "google.cloud.oracledatabase_v1.types.ListDbVersionsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -1490,22 +6891,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.types.CloudExadataInfrastructure", - "shortName": "get_cloud_exadata_infrastructure" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbVersionsPager", + "shortName": "list_db_versions" }, - "description": "Sample for GetCloudExadataInfrastructure", - "file": "oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_sync.py", + "description": "Sample for ListDbVersions", + "file": "oracledatabase_v1_generated_oracle_database_list_db_versions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetCloudExadataInfrastructure_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbVersions_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -1525,12 +6926,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_get_cloud_exadata_infrastructure_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_db_versions_sync.py" }, { "canonical": true, @@ -1540,22 +6941,22 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.get_cloud_vm_cluster", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_entitlements", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetCloudVmCluster", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListEntitlements", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "GetCloudVmCluster" + "shortName": "ListEntitlements" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.GetCloudVmClusterRequest" + "type": "google.cloud.oracledatabase_v1.types.ListEntitlementsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -1571,22 +6972,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.types.CloudVmCluster", - "shortName": "get_cloud_vm_cluster" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListEntitlementsAsyncPager", + "shortName": "list_entitlements" }, - "description": "Sample for GetCloudVmCluster", - "file": "oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_async.py", + "description": "Sample for ListEntitlements", + "file": "oracledatabase_v1_generated_oracle_database_list_entitlements_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetCloudVmCluster_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListEntitlements_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -1606,12 +7007,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_async.py" + "title": "oracledatabase_v1_generated_oracle_database_list_entitlements_async.py" }, { "canonical": true, @@ -1620,22 +7021,22 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.get_cloud_vm_cluster", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_entitlements", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.GetCloudVmCluster", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListEntitlements", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "GetCloudVmCluster" + "shortName": "ListEntitlements" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.GetCloudVmClusterRequest" + "type": "google.cloud.oracledatabase_v1.types.ListEntitlementsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -1651,22 +7052,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.types.CloudVmCluster", - "shortName": "get_cloud_vm_cluster" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListEntitlementsPager", + "shortName": "list_entitlements" }, - "description": "Sample for GetCloudVmCluster", - "file": "oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_sync.py", + "description": "Sample for ListEntitlements", + "file": "oracledatabase_v1_generated_oracle_database_list_entitlements_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_GetCloudVmCluster_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListEntitlements_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -1686,12 +7087,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_get_cloud_vm_cluster_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_entitlements_sync.py" }, { "canonical": true, @@ -1701,19 +7102,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_autonomous_database_backups", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_exadb_vm_clusters", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabaseBackups", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListExadbVmClusters", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListAutonomousDatabaseBackups" + "shortName": "ListExadbVmClusters" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabaseBackupsRequest" + "type": "google.cloud.oracledatabase_v1.types.ListExadbVmClustersRequest" }, { "name": "parent", @@ -1732,14 +7133,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabaseBackupsAsyncPager", - "shortName": "list_autonomous_database_backups" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListExadbVmClustersAsyncPager", + "shortName": "list_exadb_vm_clusters" }, - "description": "Sample for ListAutonomousDatabaseBackups", - "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_async.py", + "description": "Sample for ListExadbVmClusters", + "file": "oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseBackups_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListExadbVmClusters_async", "segments": [ { "end": 52, @@ -1772,7 +7173,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_async.py" + "title": "oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_async.py" }, { "canonical": true, @@ -1781,19 +7182,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_database_backups", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_exadb_vm_clusters", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabaseBackups", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListExadbVmClusters", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListAutonomousDatabaseBackups" + "shortName": "ListExadbVmClusters" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabaseBackupsRequest" + "type": "google.cloud.oracledatabase_v1.types.ListExadbVmClustersRequest" }, { "name": "parent", @@ -1812,14 +7213,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabaseBackupsPager", - "shortName": "list_autonomous_database_backups" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListExadbVmClustersPager", + "shortName": "list_exadb_vm_clusters" }, - "description": "Sample for ListAutonomousDatabaseBackups", - "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_sync.py", + "description": "Sample for ListExadbVmClusters", + "file": "oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseBackups_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListExadbVmClusters_sync", "segments": [ { "end": 52, @@ -1852,7 +7253,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_backups_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_exadb_vm_clusters_sync.py" }, { "canonical": true, @@ -1862,19 +7263,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_autonomous_database_character_sets", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_exascale_db_storage_vaults", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabaseCharacterSets", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListExascaleDbStorageVaults", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListAutonomousDatabaseCharacterSets" + "shortName": "ListExascaleDbStorageVaults" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabaseCharacterSetsRequest" + "type": "google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsRequest" }, { "name": "parent", @@ -1893,14 +7294,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabaseCharacterSetsAsyncPager", - "shortName": "list_autonomous_database_character_sets" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListExascaleDbStorageVaultsAsyncPager", + "shortName": "list_exascale_db_storage_vaults" }, - "description": "Sample for ListAutonomousDatabaseCharacterSets", - "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_async.py", + "description": "Sample for ListExascaleDbStorageVaults", + "file": "oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseCharacterSets_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListExascaleDbStorageVaults_async", "segments": [ { "end": 52, @@ -1933,7 +7334,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_async.py" + "title": "oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_async.py" }, { "canonical": true, @@ -1942,19 +7343,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_database_character_sets", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_exascale_db_storage_vaults", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabaseCharacterSets", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListExascaleDbStorageVaults", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListAutonomousDatabaseCharacterSets" + "shortName": "ListExascaleDbStorageVaults" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabaseCharacterSetsRequest" + "type": "google.cloud.oracledatabase_v1.types.ListExascaleDbStorageVaultsRequest" }, { "name": "parent", @@ -1973,14 +7374,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabaseCharacterSetsPager", - "shortName": "list_autonomous_database_character_sets" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListExascaleDbStorageVaultsPager", + "shortName": "list_exascale_db_storage_vaults" }, - "description": "Sample for ListAutonomousDatabaseCharacterSets", - "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_sync.py", + "description": "Sample for ListExascaleDbStorageVaults", + "file": "oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabaseCharacterSets_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListExascaleDbStorageVaults_sync", "segments": [ { "end": 52, @@ -2013,7 +7414,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_database_character_sets_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_exascale_db_storage_vaults_sync.py" }, { "canonical": true, @@ -2023,19 +7424,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_autonomous_databases", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_gi_versions", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabases", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListGiVersions", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListAutonomousDatabases" + "shortName": "ListGiVersions" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabasesRequest" + "type": "google.cloud.oracledatabase_v1.types.ListGiVersionsRequest" }, { "name": "parent", @@ -2054,14 +7455,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabasesAsyncPager", - "shortName": "list_autonomous_databases" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListGiVersionsAsyncPager", + "shortName": "list_gi_versions" }, - "description": "Sample for ListAutonomousDatabases", - "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_databases_async.py", + "description": "Sample for ListGiVersions", + "file": "oracledatabase_v1_generated_oracle_database_list_gi_versions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabases_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListGiVersions_async", "segments": [ { "end": 52, @@ -2094,7 +7495,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_databases_async.py" + "title": "oracledatabase_v1_generated_oracle_database_list_gi_versions_async.py" }, { "canonical": true, @@ -2103,19 +7504,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_databases", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_gi_versions", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDatabases", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListGiVersions", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListAutonomousDatabases" + "shortName": "ListGiVersions" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDatabasesRequest" + "type": "google.cloud.oracledatabase_v1.types.ListGiVersionsRequest" }, { "name": "parent", @@ -2134,14 +7535,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDatabasesPager", - "shortName": "list_autonomous_databases" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListGiVersionsPager", + "shortName": "list_gi_versions" }, - "description": "Sample for ListAutonomousDatabases", - "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_databases_sync.py", + "description": "Sample for ListGiVersions", + "file": "oracledatabase_v1_generated_oracle_database_list_gi_versions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDatabases_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListGiVersions_sync", "segments": [ { "end": 52, @@ -2174,7 +7575,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_databases_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_gi_versions_sync.py" }, { "canonical": true, @@ -2184,19 +7585,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_autonomous_db_versions", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_minor_versions", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDbVersions", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListMinorVersions", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListAutonomousDbVersions" + "shortName": "ListMinorVersions" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDbVersionsRequest" + "type": "google.cloud.oracledatabase_v1.types.ListMinorVersionsRequest" }, { "name": "parent", @@ -2215,14 +7616,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDbVersionsAsyncPager", - "shortName": "list_autonomous_db_versions" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListMinorVersionsAsyncPager", + "shortName": "list_minor_versions" }, - "description": "Sample for ListAutonomousDbVersions", - "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_async.py", + "description": "Sample for ListMinorVersions", + "file": "oracledatabase_v1_generated_oracle_database_list_minor_versions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDbVersions_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListMinorVersions_async", "segments": [ { "end": 52, @@ -2255,7 +7656,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_async.py" + "title": "oracledatabase_v1_generated_oracle_database_list_minor_versions_async.py" }, { "canonical": true, @@ -2264,19 +7665,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_autonomous_db_versions", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_minor_versions", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListAutonomousDbVersions", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListMinorVersions", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListAutonomousDbVersions" + "shortName": "ListMinorVersions" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListAutonomousDbVersionsRequest" + "type": "google.cloud.oracledatabase_v1.types.ListMinorVersionsRequest" }, { "name": "parent", @@ -2295,14 +7696,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListAutonomousDbVersionsPager", - "shortName": "list_autonomous_db_versions" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListMinorVersionsPager", + "shortName": "list_minor_versions" }, - "description": "Sample for ListAutonomousDbVersions", - "file": "oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_sync.py", + "description": "Sample for ListMinorVersions", + "file": "oracledatabase_v1_generated_oracle_database_list_minor_versions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListAutonomousDbVersions_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListMinorVersions_sync", "segments": [ { "end": 52, @@ -2335,7 +7736,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_autonomous_db_versions_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_minor_versions_sync.py" }, { "canonical": true, @@ -2345,19 +7746,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_cloud_exadata_infrastructures", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_odb_networks", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListCloudExadataInfrastructures", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListOdbNetworks", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListCloudExadataInfrastructures" + "shortName": "ListOdbNetworks" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListCloudExadataInfrastructuresRequest" + "type": "google.cloud.oracledatabase_v1.types.ListOdbNetworksRequest" }, { "name": "parent", @@ -2376,14 +7777,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListCloudExadataInfrastructuresAsyncPager", - "shortName": "list_cloud_exadata_infrastructures" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListOdbNetworksAsyncPager", + "shortName": "list_odb_networks" }, - "description": "Sample for ListCloudExadataInfrastructures", - "file": "oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_async.py", + "description": "Sample for ListOdbNetworks", + "file": "oracledatabase_v1_generated_oracle_database_list_odb_networks_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListCloudExadataInfrastructures_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListOdbNetworks_async", "segments": [ { "end": 52, @@ -2416,7 +7817,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_async.py" + "title": "oracledatabase_v1_generated_oracle_database_list_odb_networks_async.py" }, { "canonical": true, @@ -2425,19 +7826,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_cloud_exadata_infrastructures", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_odb_networks", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListCloudExadataInfrastructures", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListOdbNetworks", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListCloudExadataInfrastructures" + "shortName": "ListOdbNetworks" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListCloudExadataInfrastructuresRequest" + "type": "google.cloud.oracledatabase_v1.types.ListOdbNetworksRequest" }, { "name": "parent", @@ -2456,14 +7857,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListCloudExadataInfrastructuresPager", - "shortName": "list_cloud_exadata_infrastructures" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListOdbNetworksPager", + "shortName": "list_odb_networks" }, - "description": "Sample for ListCloudExadataInfrastructures", - "file": "oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_sync.py", + "description": "Sample for ListOdbNetworks", + "file": "oracledatabase_v1_generated_oracle_database_list_odb_networks_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListCloudExadataInfrastructures_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListOdbNetworks_sync", "segments": [ { "end": 52, @@ -2496,7 +7897,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_cloud_exadata_infrastructures_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_odb_networks_sync.py" }, { "canonical": true, @@ -2506,19 +7907,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_cloud_vm_clusters", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_odb_subnets", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListCloudVmClusters", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListOdbSubnets", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListCloudVmClusters" + "shortName": "ListOdbSubnets" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListCloudVmClustersRequest" + "type": "google.cloud.oracledatabase_v1.types.ListOdbSubnetsRequest" }, { "name": "parent", @@ -2537,14 +7938,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListCloudVmClustersAsyncPager", - "shortName": "list_cloud_vm_clusters" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListOdbSubnetsAsyncPager", + "shortName": "list_odb_subnets" }, - "description": "Sample for ListCloudVmClusters", - "file": "oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_async.py", + "description": "Sample for ListOdbSubnets", + "file": "oracledatabase_v1_generated_oracle_database_list_odb_subnets_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListCloudVmClusters_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListOdbSubnets_async", "segments": [ { "end": 52, @@ -2577,7 +7978,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_async.py" + "title": "oracledatabase_v1_generated_oracle_database_list_odb_subnets_async.py" }, { "canonical": true, @@ -2586,19 +7987,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_cloud_vm_clusters", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_odb_subnets", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListCloudVmClusters", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListOdbSubnets", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListCloudVmClusters" + "shortName": "ListOdbSubnets" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListCloudVmClustersRequest" + "type": "google.cloud.oracledatabase_v1.types.ListOdbSubnetsRequest" }, { "name": "parent", @@ -2617,14 +8018,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListCloudVmClustersPager", - "shortName": "list_cloud_vm_clusters" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListOdbSubnetsPager", + "shortName": "list_odb_subnets" }, - "description": "Sample for ListCloudVmClusters", - "file": "oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_sync.py", + "description": "Sample for ListOdbSubnets", + "file": "oracledatabase_v1_generated_oracle_database_list_odb_subnets_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListCloudVmClusters_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListOdbSubnets_sync", "segments": [ { "end": 52, @@ -2657,7 +8058,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_cloud_vm_clusters_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_odb_subnets_sync.py" }, { "canonical": true, @@ -2667,19 +8068,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_db_nodes", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_pluggable_databases", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbNodes", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListPluggableDatabases", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListDbNodes" + "shortName": "ListPluggableDatabases" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListDbNodesRequest" + "type": "google.cloud.oracledatabase_v1.types.ListPluggableDatabasesRequest" }, { "name": "parent", @@ -2698,14 +8099,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbNodesAsyncPager", - "shortName": "list_db_nodes" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListPluggableDatabasesAsyncPager", + "shortName": "list_pluggable_databases" }, - "description": "Sample for ListDbNodes", - "file": "oracledatabase_v1_generated_oracle_database_list_db_nodes_async.py", + "description": "Sample for ListPluggableDatabases", + "file": "oracledatabase_v1_generated_oracle_database_list_pluggable_databases_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbNodes_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListPluggableDatabases_async", "segments": [ { "end": 52, @@ -2738,7 +8139,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_db_nodes_async.py" + "title": "oracledatabase_v1_generated_oracle_database_list_pluggable_databases_async.py" }, { "canonical": true, @@ -2747,19 +8148,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_nodes", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_pluggable_databases", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbNodes", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListPluggableDatabases", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListDbNodes" + "shortName": "ListPluggableDatabases" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListDbNodesRequest" + "type": "google.cloud.oracledatabase_v1.types.ListPluggableDatabasesRequest" }, { "name": "parent", @@ -2778,14 +8179,14 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbNodesPager", - "shortName": "list_db_nodes" + "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListPluggableDatabasesPager", + "shortName": "list_pluggable_databases" }, - "description": "Sample for ListDbNodes", - "file": "oracledatabase_v1_generated_oracle_database_list_db_nodes_sync.py", + "description": "Sample for ListPluggableDatabases", + "file": "oracledatabase_v1_generated_oracle_database_list_pluggable_databases_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbNodes_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListPluggableDatabases_sync", "segments": [ { "end": 52, @@ -2818,7 +8219,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_db_nodes_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_list_pluggable_databases_sync.py" }, { "canonical": true, @@ -2828,24 +8229,28 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_db_servers", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.remove_virtual_machine_exadb_vm_cluster", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbServers", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RemoveVirtualMachineExadbVmCluster", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListDbServers" + "shortName": "RemoveVirtualMachineExadbVmCluster" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListDbServersRequest" + "type": "google.cloud.oracledatabase_v1.types.RemoveVirtualMachineExadbVmClusterRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, + { + "name": "hostnames", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -2859,22 +8264,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbServersAsyncPager", - "shortName": "list_db_servers" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "remove_virtual_machine_exadb_vm_cluster" }, - "description": "Sample for ListDbServers", - "file": "oracledatabase_v1_generated_oracle_database_list_db_servers_async.py", + "description": "Sample for RemoveVirtualMachineExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbServers_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_RemoveVirtualMachineExadbVmCluster_async", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -2884,22 +8289,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_db_servers_async.py" + "title": "oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_async.py" }, { "canonical": true, @@ -2908,24 +8313,28 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_servers", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.remove_virtual_machine_exadb_vm_cluster", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbServers", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RemoveVirtualMachineExadbVmCluster", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListDbServers" + "shortName": "RemoveVirtualMachineExadbVmCluster" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListDbServersRequest" + "type": "google.cloud.oracledatabase_v1.types.RemoveVirtualMachineExadbVmClusterRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, + { + "name": "hostnames", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -2939,22 +8348,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbServersPager", - "shortName": "list_db_servers" + "resultType": "google.api_core.operation.Operation", + "shortName": "remove_virtual_machine_exadb_vm_cluster" }, - "description": "Sample for ListDbServers", - "file": "oracledatabase_v1_generated_oracle_database_list_db_servers_sync.py", + "description": "Sample for RemoveVirtualMachineExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbServers_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_RemoveVirtualMachineExadbVmCluster_sync", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -2964,22 +8373,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_db_servers_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_remove_virtual_machine_exadb_vm_cluster_sync.py" }, { "canonical": true, @@ -2989,22 +8398,22 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_db_system_shapes", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.restart_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbSystemShapes", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RestartAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListDbSystemShapes" + "shortName": "RestartAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListDbSystemShapesRequest" + "type": "google.cloud.oracledatabase_v1.types.RestartAutonomousDatabaseRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -3020,22 +8429,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemShapesAsyncPager", - "shortName": "list_db_system_shapes" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "restart_autonomous_database" }, - "description": "Sample for ListDbSystemShapes", - "file": "oracledatabase_v1_generated_oracle_database_list_db_system_shapes_async.py", + "description": "Sample for RestartAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_restart_autonomous_database_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbSystemShapes_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_RestartAutonomousDatabase_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -3050,17 +8459,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_db_system_shapes_async.py" + "title": "oracledatabase_v1_generated_oracle_database_restart_autonomous_database_async.py" }, { "canonical": true, @@ -3069,22 +8478,22 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_db_system_shapes", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.restart_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListDbSystemShapes", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RestartAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListDbSystemShapes" + "shortName": "RestartAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListDbSystemShapesRequest" + "type": "google.cloud.oracledatabase_v1.types.RestartAutonomousDatabaseRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -3100,22 +8509,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListDbSystemShapesPager", - "shortName": "list_db_system_shapes" + "resultType": "google.api_core.operation.Operation", + "shortName": "restart_autonomous_database" }, - "description": "Sample for ListDbSystemShapes", - "file": "oracledatabase_v1_generated_oracle_database_list_db_system_shapes_sync.py", + "description": "Sample for RestartAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_restart_autonomous_database_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListDbSystemShapes_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_RestartAutonomousDatabase_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -3130,17 +8539,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_db_system_shapes_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_restart_autonomous_database_sync.py" }, { "canonical": true, @@ -3150,24 +8559,28 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_entitlements", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.restore_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListEntitlements", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RestoreAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListEntitlements" + "shortName": "RestoreAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListEntitlementsRequest" + "type": "google.cloud.oracledatabase_v1.types.RestoreAutonomousDatabaseRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, + { + "name": "restore_time", + "type": "google.protobuf.timestamp_pb2.Timestamp" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -3181,22 +8594,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListEntitlementsAsyncPager", - "shortName": "list_entitlements" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "restore_autonomous_database" }, - "description": "Sample for ListEntitlements", - "file": "oracledatabase_v1_generated_oracle_database_list_entitlements_async.py", + "description": "Sample for RestoreAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_restore_autonomous_database_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListEntitlements_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_RestoreAutonomousDatabase_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -3211,17 +8624,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_entitlements_async.py" + "title": "oracledatabase_v1_generated_oracle_database_restore_autonomous_database_async.py" }, { "canonical": true, @@ -3230,24 +8643,28 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_entitlements", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.restore_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListEntitlements", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RestoreAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListEntitlements" + "shortName": "RestoreAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListEntitlementsRequest" + "type": "google.cloud.oracledatabase_v1.types.RestoreAutonomousDatabaseRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, + { + "name": "restore_time", + "type": "google.protobuf.timestamp_pb2.Timestamp" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -3261,22 +8678,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListEntitlementsPager", - "shortName": "list_entitlements" + "resultType": "google.api_core.operation.Operation", + "shortName": "restore_autonomous_database" }, - "description": "Sample for ListEntitlements", - "file": "oracledatabase_v1_generated_oracle_database_list_entitlements_sync.py", + "description": "Sample for RestoreAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_restore_autonomous_database_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListEntitlements_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_RestoreAutonomousDatabase_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -3291,17 +8708,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_entitlements_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_restore_autonomous_database_sync.py" }, { "canonical": true, @@ -3311,22 +8728,22 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.list_gi_versions", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.start_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListGiVersions", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.StartAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListGiVersions" + "shortName": "StartAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListGiVersionsRequest" + "type": "google.cloud.oracledatabase_v1.types.StartAutonomousDatabaseRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -3342,22 +8759,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListGiVersionsAsyncPager", - "shortName": "list_gi_versions" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "start_autonomous_database" }, - "description": "Sample for ListGiVersions", - "file": "oracledatabase_v1_generated_oracle_database_list_gi_versions_async.py", + "description": "Sample for StartAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_start_autonomous_database_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListGiVersions_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_StartAutonomousDatabase_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -3372,17 +8789,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_gi_versions_async.py" + "title": "oracledatabase_v1_generated_oracle_database_start_autonomous_database_async.py" }, { "canonical": true, @@ -3391,22 +8808,22 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.list_gi_versions", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.start_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.ListGiVersions", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.StartAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "ListGiVersions" + "shortName": "StartAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.ListGiVersionsRequest" + "type": "google.cloud.oracledatabase_v1.types.StartAutonomousDatabaseRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -3422,22 +8839,22 @@ "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], - "resultType": "google.cloud.oracledatabase_v1.services.oracle_database.pagers.ListGiVersionsPager", - "shortName": "list_gi_versions" + "resultType": "google.api_core.operation.Operation", + "shortName": "start_autonomous_database" }, - "description": "Sample for ListGiVersions", - "file": "oracledatabase_v1_generated_oracle_database_list_gi_versions_sync.py", + "description": "Sample for StartAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_start_autonomous_database_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_ListGiVersions_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_StartAutonomousDatabase_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -3452,17 +8869,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_list_gi_versions_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_start_autonomous_database_sync.py" }, { "canonical": true, @@ -3472,19 +8889,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.restart_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.stop_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RestartAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.StopAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "RestartAutonomousDatabase" + "shortName": "StopAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.RestartAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.StopAutonomousDatabaseRequest" }, { "name": "name", @@ -3504,13 +8921,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "restart_autonomous_database" + "shortName": "stop_autonomous_database" }, - "description": "Sample for RestartAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_restart_autonomous_database_async.py", + "description": "Sample for StopAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_stop_autonomous_database_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_RestartAutonomousDatabase_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_StopAutonomousDatabase_async", "segments": [ { "end": 55, @@ -3543,7 +8960,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_restart_autonomous_database_async.py" + "title": "oracledatabase_v1_generated_oracle_database_stop_autonomous_database_async.py" }, { "canonical": true, @@ -3552,19 +8969,19 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.restart_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.stop_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RestartAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.StopAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "RestartAutonomousDatabase" + "shortName": "StopAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.RestartAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.StopAutonomousDatabaseRequest" }, { "name": "name", @@ -3584,13 +9001,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "restart_autonomous_database" + "shortName": "stop_autonomous_database" }, - "description": "Sample for RestartAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_restart_autonomous_database_sync.py", + "description": "Sample for StopAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_stop_autonomous_database_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_RestartAutonomousDatabase_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_StopAutonomousDatabase_sync", "segments": [ { "end": 55, @@ -3623,7 +9040,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_restart_autonomous_database_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_stop_autonomous_database_sync.py" }, { "canonical": true, @@ -3633,27 +9050,27 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.restore_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.switchover_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RestoreAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.SwitchoverAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "RestoreAutonomousDatabase" + "shortName": "SwitchoverAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.RestoreAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.SwitchoverAutonomousDatabaseRequest" }, { "name": "name", "type": "str" }, { - "name": "restore_time", - "type": "google.protobuf.timestamp_pb2.Timestamp" + "name": "peer_autonomous_database", + "type": "str" }, { "name": "retry", @@ -3669,21 +9086,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "restore_autonomous_database" + "shortName": "switchover_autonomous_database" }, - "description": "Sample for RestoreAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_restore_autonomous_database_async.py", + "description": "Sample for SwitchoverAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_RestoreAutonomousDatabase_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_SwitchoverAutonomousDatabase_async", "segments": [ { - "end": 55, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 56, "start": 27, "type": "SHORT" }, @@ -3693,22 +9110,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_restore_autonomous_database_async.py" + "title": "oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_async.py" }, { "canonical": true, @@ -3717,27 +9134,27 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.restore_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.switchover_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.RestoreAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.SwitchoverAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "RestoreAutonomousDatabase" + "shortName": "SwitchoverAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.RestoreAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.SwitchoverAutonomousDatabaseRequest" }, { "name": "name", "type": "str" }, { - "name": "restore_time", - "type": "google.protobuf.timestamp_pb2.Timestamp" + "name": "peer_autonomous_database", + "type": "str" }, { "name": "retry", @@ -3753,21 +9170,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "restore_autonomous_database" + "shortName": "switchover_autonomous_database" }, - "description": "Sample for RestoreAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_restore_autonomous_database_sync.py", + "description": "Sample for SwitchoverAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_RestoreAutonomousDatabase_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_SwitchoverAutonomousDatabase_sync", "segments": [ { - "end": 55, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 56, "start": 27, "type": "SHORT" }, @@ -3777,22 +9194,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_restore_autonomous_database_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_switchover_autonomous_database_sync.py" }, { "canonical": true, @@ -3802,23 +9219,27 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.start_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.update_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.StartAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.UpdateAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "StartAutonomousDatabase" + "shortName": "UpdateAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.StartAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.UpdateAutonomousDatabaseRequest" }, { - "name": "name", - "type": "str" + "name": "autonomous_database", + "type": "google.cloud.oracledatabase_v1.types.AutonomousDatabase" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -3834,21 +9255,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "start_autonomous_database" + "shortName": "update_autonomous_database" }, - "description": "Sample for StartAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_start_autonomous_database_async.py", + "description": "Sample for UpdateAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_update_autonomous_database_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_StartAutonomousDatabase_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_UpdateAutonomousDatabase_async", "segments": [ { - "end": 55, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 54, "start": 27, "type": "SHORT" }, @@ -3858,22 +9279,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 51, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_start_autonomous_database_async.py" + "title": "oracledatabase_v1_generated_oracle_database_update_autonomous_database_async.py" }, { "canonical": true, @@ -3882,23 +9303,27 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.start_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.update_autonomous_database", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.StartAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.UpdateAutonomousDatabase", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "StartAutonomousDatabase" + "shortName": "UpdateAutonomousDatabase" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.StartAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.UpdateAutonomousDatabaseRequest" }, { - "name": "name", - "type": "str" + "name": "autonomous_database", + "type": "google.cloud.oracledatabase_v1.types.AutonomousDatabase" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -3914,21 +9339,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "start_autonomous_database" + "shortName": "update_autonomous_database" }, - "description": "Sample for StartAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_start_autonomous_database_sync.py", + "description": "Sample for UpdateAutonomousDatabase", + "file": "oracledatabase_v1_generated_oracle_database_update_autonomous_database_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_StartAutonomousDatabase_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_UpdateAutonomousDatabase_sync", "segments": [ { - "end": 55, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 54, "start": 27, "type": "SHORT" }, @@ -3938,22 +9363,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 51, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_start_autonomous_database_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_update_autonomous_database_sync.py" }, { "canonical": true, @@ -3963,23 +9388,27 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient", "shortName": "OracleDatabaseAsyncClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.stop_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseAsyncClient.update_exadb_vm_cluster", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.StopAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.UpdateExadbVmCluster", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "StopAutonomousDatabase" + "shortName": "UpdateExadbVmCluster" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.StopAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.UpdateExadbVmClusterRequest" }, { - "name": "name", - "type": "str" + "name": "exadb_vm_cluster", + "type": "google.cloud.oracledatabase_v1.types.ExadbVmCluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -3995,21 +9424,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "stop_autonomous_database" + "shortName": "update_exadb_vm_cluster" }, - "description": "Sample for StopAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_stop_autonomous_database_async.py", + "description": "Sample for UpdateExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_StopAutonomousDatabase_async", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_UpdateExadbVmCluster_async", "segments": [ { - "end": 55, + "end": 68, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 68, "start": 27, "type": "SHORT" }, @@ -4019,22 +9448,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 58, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 65, + "start": 59, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 69, + "start": 66, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_stop_autonomous_database_async.py" + "title": "oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_async.py" }, { "canonical": true, @@ -4043,23 +9472,27 @@ "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient", "shortName": "OracleDatabaseClient" }, - "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.stop_autonomous_database", + "fullName": "google.cloud.oracledatabase_v1.OracleDatabaseClient.update_exadb_vm_cluster", "method": { - "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.StopAutonomousDatabase", + "fullName": "google.cloud.oracledatabase.v1.OracleDatabase.UpdateExadbVmCluster", "service": { "fullName": "google.cloud.oracledatabase.v1.OracleDatabase", "shortName": "OracleDatabase" }, - "shortName": "StopAutonomousDatabase" + "shortName": "UpdateExadbVmCluster" }, "parameters": [ { "name": "request", - "type": "google.cloud.oracledatabase_v1.types.StopAutonomousDatabaseRequest" + "type": "google.cloud.oracledatabase_v1.types.UpdateExadbVmClusterRequest" }, { - "name": "name", - "type": "str" + "name": "exadb_vm_cluster", + "type": "google.cloud.oracledatabase_v1.types.ExadbVmCluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -4075,21 +9508,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "stop_autonomous_database" + "shortName": "update_exadb_vm_cluster" }, - "description": "Sample for StopAutonomousDatabase", - "file": "oracledatabase_v1_generated_oracle_database_stop_autonomous_database_sync.py", + "description": "Sample for UpdateExadbVmCluster", + "file": "oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "oracledatabase_v1_generated_OracleDatabase_StopAutonomousDatabase_sync", + "regionTag": "oracledatabase_v1_generated_OracleDatabase_UpdateExadbVmCluster_sync", "segments": [ { - "end": 55, + "end": 68, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 68, "start": 27, "type": "SHORT" }, @@ -4099,22 +9532,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 58, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 65, + "start": 59, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 69, + "start": 66, "type": "RESPONSE_HANDLING" } ], - "title": "oracledatabase_v1_generated_oracle_database_stop_autonomous_database_sync.py" + "title": "oracledatabase_v1_generated_oracle_database_update_exadb_vm_cluster_sync.py" } ] } diff --git a/packages/google-cloud-oracledatabase/scripts/fixup_oracledatabase_v1_keywords.py b/packages/google-cloud-oracledatabase/scripts/fixup_oracledatabase_v1_keywords.py index df8133d8b837..c13f82059475 100644 --- a/packages/google-cloud-oracledatabase/scripts/fixup_oracledatabase_v1_keywords.py +++ b/packages/google-cloud-oracledatabase/scripts/fixup_oracledatabase_v1_keywords.py @@ -42,28 +42,61 @@ class oracledatabaseCallTransformer(cst.CSTTransformer): 'create_autonomous_database': ('parent', 'autonomous_database_id', 'autonomous_database', 'request_id', ), 'create_cloud_exadata_infrastructure': ('parent', 'cloud_exadata_infrastructure_id', 'cloud_exadata_infrastructure', 'request_id', ), 'create_cloud_vm_cluster': ('parent', 'cloud_vm_cluster_id', 'cloud_vm_cluster', 'request_id', ), + 'create_db_system': ('parent', 'db_system_id', 'db_system', 'request_id', ), + 'create_exadb_vm_cluster': ('parent', 'exadb_vm_cluster_id', 'exadb_vm_cluster', 'request_id', ), + 'create_exascale_db_storage_vault': ('parent', 'exascale_db_storage_vault_id', 'exascale_db_storage_vault', 'request_id', ), + 'create_odb_network': ('parent', 'odb_network_id', 'odb_network', 'request_id', ), + 'create_odb_subnet': ('parent', 'odb_subnet_id', 'odb_subnet', 'request_id', ), 'delete_autonomous_database': ('name', 'request_id', ), 'delete_cloud_exadata_infrastructure': ('name', 'request_id', 'force', ), 'delete_cloud_vm_cluster': ('name', 'request_id', 'force', ), + 'delete_db_system': ('name', 'request_id', ), + 'delete_exadb_vm_cluster': ('name', 'request_id', ), + 'delete_exascale_db_storage_vault': ('name', 'request_id', ), + 'delete_odb_network': ('name', 'request_id', ), + 'delete_odb_subnet': ('name', 'request_id', ), + 'failover_autonomous_database': ('name', 'peer_autonomous_database', ), 'generate_autonomous_database_wallet': ('name', 'password', 'type_', 'is_regional', ), 'get_autonomous_database': ('name', ), 'get_cloud_exadata_infrastructure': ('name', ), 'get_cloud_vm_cluster': ('name', ), + 'get_database': ('name', ), + 'get_db_system': ('name', ), + 'get_exadb_vm_cluster': ('name', ), + 'get_exascale_db_storage_vault': ('name', ), + 'get_odb_network': ('name', ), + 'get_odb_subnet': ('name', ), + 'get_pluggable_database': ('name', ), 'list_autonomous_database_backups': ('parent', 'filter', 'page_size', 'page_token', ), 'list_autonomous_database_character_sets': ('parent', 'page_size', 'page_token', 'filter', ), 'list_autonomous_databases': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ), 'list_autonomous_db_versions': ('parent', 'page_size', 'page_token', ), - 'list_cloud_exadata_infrastructures': ('parent', 'page_size', 'page_token', ), + 'list_cloud_exadata_infrastructures': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ), 'list_cloud_vm_clusters': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_database_character_sets': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_databases': ('parent', 'page_size', 'page_token', 'filter', ), 'list_db_nodes': ('parent', 'page_size', 'page_token', ), 'list_db_servers': ('parent', 'page_size', 'page_token', ), - 'list_db_system_shapes': ('parent', 'page_size', 'page_token', ), + 'list_db_system_initial_storage_sizes': ('parent', 'page_size', 'page_token', ), + 'list_db_systems': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ), + 'list_db_system_shapes': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_db_versions': ('parent', 'page_size', 'page_token', 'filter', ), 'list_entitlements': ('parent', 'page_size', 'page_token', ), - 'list_gi_versions': ('parent', 'page_size', 'page_token', ), + 'list_exadb_vm_clusters': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ), + 'list_exascale_db_storage_vaults': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ), + 'list_gi_versions': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_minor_versions': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_odb_networks': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ), + 'list_odb_subnets': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ), + 'list_pluggable_databases': ('parent', 'page_size', 'page_token', 'filter', ), + 'remove_virtual_machine_exadb_vm_cluster': ('name', 'hostnames', 'request_id', ), 'restart_autonomous_database': ('name', ), 'restore_autonomous_database': ('name', 'restore_time', ), 'start_autonomous_database': ('name', ), 'stop_autonomous_database': ('name', ), + 'switchover_autonomous_database': ('name', 'peer_autonomous_database', ), + 'update_autonomous_database': ('autonomous_database', 'update_mask', 'request_id', ), + 'update_exadb_vm_cluster': ('exadb_vm_cluster', 'update_mask', 'request_id', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/packages/google-cloud-oracledatabase/tests/unit/gapic/oracledatabase_v1/test_oracle_database.py b/packages/google-cloud-oracledatabase/tests/unit/gapic/oracledatabase_v1/test_oracle_database.py index 6084dbf272f8..3de8a928efc3 100644 --- a/packages/google-cloud-oracledatabase/tests/unit/gapic/oracledatabase_v1/test_oracle_database.py +++ b/packages/google-cloud-oracledatabase/tests/unit/gapic/oracledatabase_v1/test_oracle_database.py @@ -14,7 +14,6 @@ # limitations under the License. # import os -import re # try/except added for compatibility with python < 3.8 try: @@ -65,6 +64,7 @@ from google.oauth2 import service_account from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.type import datetime_pb2 # type: ignore from google.type import dayofweek_pb2 # type: ignore @@ -82,19 +82,42 @@ autonomous_db_backup, autonomous_db_version, common, + database, + database_character_set, db_node, db_server, +) +from google.cloud.oracledatabase_v1.types import ( + db_system_initial_storage_size, db_system_shape, + db_version, entitlement, exadata_infra, - gi_version, +) +from google.cloud.oracledatabase_v1.types import ( oracledatabase, + pluggable_database, vm_cluster, ) from google.cloud.oracledatabase_v1.types import ( autonomous_database as gco_autonomous_database, ) +from google.cloud.oracledatabase_v1.types import ( + exadb_vm_cluster as gco_exadb_vm_cluster, +) +from google.cloud.oracledatabase_v1.types import ( + exascale_db_storage_vault as gco_exascale_db_storage_vault, +) from google.cloud.oracledatabase_v1.types import autonomous_database +from google.cloud.oracledatabase_v1.types import db_system +from google.cloud.oracledatabase_v1.types import db_system as gco_db_system +from google.cloud.oracledatabase_v1.types import exadb_vm_cluster +from google.cloud.oracledatabase_v1.types import exascale_db_storage_vault +from google.cloud.oracledatabase_v1.types import gi_version, minor_version +from google.cloud.oracledatabase_v1.types import odb_network +from google.cloud.oracledatabase_v1.types import odb_network as gco_odb_network +from google.cloud.oracledatabase_v1.types import odb_subnet +from google.cloud.oracledatabase_v1.types import odb_subnet as gco_odb_subnet CRED_INFO_JSON = { "credential_source": "/path/to/file", @@ -1198,6 +1221,8 @@ def test_list_cloud_exadata_infrastructures_non_empty_request_with_auto_populate request = oracledatabase.ListCloudExadataInfrastructuresRequest( parent="parent_value", page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1213,6 +1238,8 @@ def test_list_cloud_exadata_infrastructures_non_empty_request_with_auto_populate assert args[0] == oracledatabase.ListCloudExadataInfrastructuresRequest( parent="parent_value", page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", ) @@ -3353,10 +3380,13 @@ def test_get_cloud_vm_cluster(request_type, transport: str = "grpc"): name="name_value", exadata_infrastructure="exadata_infrastructure_value", display_name="display_name_value", - gcp_oracle_zone="gcp_oracle_zone_value", cidr="cidr_value", backup_subnet_cidr="backup_subnet_cidr_value", network="network_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + backup_odb_subnet="backup_odb_subnet_value", ) response = client.get_cloud_vm_cluster(request) @@ -3371,10 +3401,13 @@ def test_get_cloud_vm_cluster(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.exadata_infrastructure == "exadata_infrastructure_value" assert response.display_name == "display_name_value" - assert response.gcp_oracle_zone == "gcp_oracle_zone_value" assert response.cidr == "cidr_value" assert response.backup_subnet_cidr == "backup_subnet_cidr_value" assert response.network == "network_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.backup_odb_subnet == "backup_odb_subnet_value" def test_get_cloud_vm_cluster_non_empty_request_with_auto_populated_field(): @@ -3512,10 +3545,13 @@ async def test_get_cloud_vm_cluster_async( name="name_value", exadata_infrastructure="exadata_infrastructure_value", display_name="display_name_value", - gcp_oracle_zone="gcp_oracle_zone_value", cidr="cidr_value", backup_subnet_cidr="backup_subnet_cidr_value", network="network_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + backup_odb_subnet="backup_odb_subnet_value", ) ) response = await client.get_cloud_vm_cluster(request) @@ -3531,10 +3567,13 @@ async def test_get_cloud_vm_cluster_async( assert response.name == "name_value" assert response.exadata_infrastructure == "exadata_infrastructure_value" assert response.display_name == "display_name_value" - assert response.gcp_oracle_zone == "gcp_oracle_zone_value" assert response.cidr == "cidr_value" assert response.backup_subnet_cidr == "backup_subnet_cidr_value" assert response.network == "network_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.backup_odb_subnet == "backup_odb_subnet_value" @pytest.mark.asyncio @@ -6040,6 +6079,7 @@ def test_list_gi_versions_non_empty_request_with_auto_populated_field(): request = oracledatabase.ListGiVersionsRequest( parent="parent_value", page_token="page_token_value", + filter="filter_value", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6053,6 +6093,7 @@ def test_list_gi_versions_non_empty_request_with_auto_populated_field(): assert args[0] == oracledatabase.ListGiVersionsRequest( parent="parent_value", page_token="page_token_value", + filter="filter_value", ) @@ -6511,6 +6552,553 @@ async def test_list_gi_versions_async_pages(): assert page_.raw_page.next_page_token == token +@pytest.mark.parametrize( + "request_type", + [ + minor_version.ListMinorVersionsRequest, + dict, + ], +) +def test_list_minor_versions(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = minor_version.ListMinorVersionsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_minor_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = minor_version.ListMinorVersionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMinorVersionsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_minor_versions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = minor_version.ListMinorVersionsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_minor_versions(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == minor_version.ListMinorVersionsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + ) + + +def test_list_minor_versions_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_minor_versions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_minor_versions + ] = mock_rpc + request = {} + client.list_minor_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_minor_versions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_minor_versions_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_minor_versions + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_minor_versions + ] = mock_rpc + + request = {} + await client.list_minor_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_minor_versions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_minor_versions_async( + transport: str = "grpc_asyncio", request_type=minor_version.ListMinorVersionsRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + minor_version.ListMinorVersionsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_minor_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = minor_version.ListMinorVersionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMinorVersionsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_minor_versions_async_from_dict(): + await test_list_minor_versions_async(request_type=dict) + + +def test_list_minor_versions_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = minor_version.ListMinorVersionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + call.return_value = minor_version.ListMinorVersionsResponse() + client.list_minor_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_minor_versions_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = minor_version.ListMinorVersionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + minor_version.ListMinorVersionsResponse() + ) + await client.list_minor_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_minor_versions_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = minor_version.ListMinorVersionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_minor_versions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_minor_versions_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_minor_versions( + minor_version.ListMinorVersionsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_minor_versions_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = minor_version.ListMinorVersionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + minor_version.ListMinorVersionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_minor_versions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_minor_versions_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_minor_versions( + minor_version.ListMinorVersionsRequest(), + parent="parent_value", + ) + + +def test_list_minor_versions_pager(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + next_page_token="abc", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[], + next_page_token="def", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + ], + next_page_token="ghi", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_minor_versions(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, minor_version.MinorVersion) for i in results) + + +def test_list_minor_versions_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + next_page_token="abc", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[], + next_page_token="def", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + ], + next_page_token="ghi", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + ), + RuntimeError, + ) + pages = list(client.list_minor_versions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_minor_versions_async_pager(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + next_page_token="abc", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[], + next_page_token="def", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + ], + next_page_token="ghi", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_minor_versions( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, minor_version.MinorVersion) for i in responses) + + +@pytest.mark.asyncio +async def test_list_minor_versions_async_pages(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + next_page_token="abc", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[], + next_page_token="def", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + ], + next_page_token="ghi", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_minor_versions(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + @pytest.mark.parametrize( "request_type", [ @@ -6563,6 +7151,7 @@ def test_list_db_system_shapes_non_empty_request_with_auto_populated_field(): request = oracledatabase.ListDbSystemShapesRequest( parent="parent_value", page_token="page_token_value", + filter="filter_value", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6578,6 +7167,7 @@ def test_list_db_system_shapes_non_empty_request_with_auto_populated_field(): assert args[0] == oracledatabase.ListDbSystemShapesRequest( parent="parent_value", page_token="page_token_value", + filter="filter_value", ) @@ -7645,6 +8235,12 @@ def test_get_autonomous_database(request_type, transport: str = "grpc"): admin_password="admin_password_value", network="network_value", cidr="cidr_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + peer_autonomous_databases=["peer_autonomous_databases_value"], + disaster_recovery_supported_locations=[ + "disaster_recovery_supported_locations_value" + ], ) response = client.get_autonomous_database(request) @@ -7663,6 +8259,12 @@ def test_get_autonomous_database(request_type, transport: str = "grpc"): assert response.admin_password == "admin_password_value" assert response.network == "network_value" assert response.cidr == "cidr_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.peer_autonomous_databases == ["peer_autonomous_databases_value"] + assert response.disaster_recovery_supported_locations == [ + "disaster_recovery_supported_locations_value" + ] def test_get_autonomous_database_non_empty_request_with_auto_populated_field(): @@ -7805,6 +8407,12 @@ async def test_get_autonomous_database_async( admin_password="admin_password_value", network="network_value", cidr="cidr_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + peer_autonomous_databases=["peer_autonomous_databases_value"], + disaster_recovery_supported_locations=[ + "disaster_recovery_supported_locations_value" + ], ) ) response = await client.get_autonomous_database(request) @@ -7824,6 +8432,12 @@ async def test_get_autonomous_database_async( assert response.admin_password == "admin_password_value" assert response.network == "network_value" assert response.cidr == "cidr_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.peer_autonomous_databases == ["peer_autonomous_databases_value"] + assert response.disaster_recovery_supported_locations == [ + "disaster_recovery_supported_locations_value" + ] @pytest.mark.asyncio @@ -8359,6 +8973,367 @@ async def test_create_autonomous_database_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.UpdateAutonomousDatabaseRequest, + dict, + ], +) +def test_update_autonomous_database(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = oracledatabase.UpdateAutonomousDatabaseRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_autonomous_database_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = oracledatabase.UpdateAutonomousDatabaseRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_autonomous_database(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == oracledatabase.UpdateAutonomousDatabaseRequest() + + +def test_update_autonomous_database_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_autonomous_database + ] = mock_rpc + request = {} + client.update_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_autonomous_database_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_autonomous_database + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_autonomous_database + ] = mock_rpc + + request = {} + await client.update_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_autonomous_database_async( + transport: str = "grpc_asyncio", + request_type=oracledatabase.UpdateAutonomousDatabaseRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = oracledatabase.UpdateAutonomousDatabaseRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_autonomous_database_async_from_dict(): + await test_update_autonomous_database_async(request_type=dict) + + +def test_update_autonomous_database_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.UpdateAutonomousDatabaseRequest() + + request.autonomous_database.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "autonomous_database.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_autonomous_database_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.UpdateAutonomousDatabaseRequest() + + request.autonomous_database.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "autonomous_database.name=name_value", + ) in kw["metadata"] + + +def test_update_autonomous_database_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_autonomous_database( + autonomous_database=gco_autonomous_database.AutonomousDatabase( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].autonomous_database + mock_val = gco_autonomous_database.AutonomousDatabase(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_autonomous_database_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_autonomous_database( + oracledatabase.UpdateAutonomousDatabaseRequest(), + autonomous_database=gco_autonomous_database.AutonomousDatabase( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_autonomous_database_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_autonomous_database( + autonomous_database=gco_autonomous_database.AutonomousDatabase( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].autonomous_database + mock_val = gco_autonomous_database.AutonomousDatabase(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_autonomous_database_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_autonomous_database( + oracledatabase.UpdateAutonomousDatabaseRequest(), + autonomous_database=gco_autonomous_database.AutonomousDatabase( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + @pytest.mark.parametrize( "request_type", [ @@ -12150,13 +13125,80 @@ async def test_restart_autonomous_database_flattened_error_async(): ) -def test_list_cloud_exadata_infrastructures_rest_use_cached_wrapped_rpc(): +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.SwitchoverAutonomousDatabaseRequest, + dict, + ], +) +def test_switchover_autonomous_database(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.switchover_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = oracledatabase.SwitchoverAutonomousDatabaseRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_switchover_autonomous_database_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = oracledatabase.SwitchoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.switchover_autonomous_database(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == oracledatabase.SwitchoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + +def test_switchover_autonomous_database_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -12165,7 +13207,7 @@ def test_list_cloud_exadata_infrastructures_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.list_cloud_exadata_infrastructures + client._transport.switchover_autonomous_database in client._transport._wrapped_methods ) @@ -12175,262 +13217,347 @@ def test_list_cloud_exadata_infrastructures_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_cloud_exadata_infrastructures + client._transport.switchover_autonomous_database ] = mock_rpc - request = {} - client.list_cloud_exadata_infrastructures(request) + client.switchover_autonomous_database(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_cloud_exadata_infrastructures(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.switchover_autonomous_database(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_cloud_exadata_infrastructures_rest_required_fields( - request_type=oracledatabase.ListCloudExadataInfrastructuresRequest, +@pytest.mark.asyncio +async def test_switchover_autonomous_database_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.switchover_autonomous_database + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_cloud_exadata_infrastructures._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.switchover_autonomous_database + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.switchover_autonomous_database(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_cloud_exadata_infrastructures._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.switchover_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_switchover_autonomous_database_async( + transport: str = "grpc_asyncio", + request_type=oracledatabase.SwitchoverAutonomousDatabaseRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.switchover_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = oracledatabase.SwitchoverAutonomousDatabaseRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_switchover_autonomous_database_async_from_dict(): + await test_switchover_autonomous_database_async(request_type=dict) + +def test_switchover_autonomous_database_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListCloudExadataInfrastructuresResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.SwitchoverAutonomousDatabaseRequest() - # Convert return value to protobuf type - return_value = oracledatabase.ListCloudExadataInfrastructuresResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) + request.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.switchover_autonomous_database(request) - response = client.list_cloud_exadata_infrastructures(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_list_cloud_exadata_infrastructures_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_switchover_autonomous_database_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = ( - transport.list_cloud_exadata_infrastructures._get_unset_required_fields({}) - ) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.SwitchoverAutonomousDatabaseRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - & set(("parent",)) - ) + await client.switchover_autonomous_database(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_list_cloud_exadata_infrastructures_rest_flattened(): + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_switchover_autonomous_database_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListCloudExadataInfrastructuresResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.switchover_autonomous_database( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].peer_autonomous_database + mock_val = "peer_autonomous_database_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListCloudExadataInfrastructuresResponse.pb( - return_value +def test_switchover_autonomous_database_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.switchover_autonomous_database( + oracledatabase.SwitchoverAutonomousDatabaseRequest(), + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", ) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_cloud_exadata_infrastructures(**mock_args) + +@pytest.mark.asyncio +async def test_switchover_autonomous_database_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.switchover_autonomous_database( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/cloudExadataInfrastructures" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].peer_autonomous_database + mock_val = "peer_autonomous_database_value" + assert arg == mock_val -def test_list_cloud_exadata_infrastructures_rest_flattened_error( - transport: str = "rest", -): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_switchover_autonomous_database_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_cloud_exadata_infrastructures( - oracledatabase.ListCloudExadataInfrastructuresRequest(), - parent="parent_value", + await client.switchover_autonomous_database( + oracledatabase.SwitchoverAutonomousDatabaseRequest(), + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", ) -def test_list_cloud_exadata_infrastructures_rest_pager(transport: str = "rest"): +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.FailoverAutonomousDatabaseRequest, + dict, + ], +) +def test_failover_autonomous_database(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListCloudExadataInfrastructuresResponse( - cloud_exadata_infrastructures=[ - exadata_infra.CloudExadataInfrastructure(), - exadata_infra.CloudExadataInfrastructure(), - exadata_infra.CloudExadataInfrastructure(), - ], - next_page_token="abc", - ), - oracledatabase.ListCloudExadataInfrastructuresResponse( - cloud_exadata_infrastructures=[], - next_page_token="def", - ), - oracledatabase.ListCloudExadataInfrastructuresResponse( - cloud_exadata_infrastructures=[ - exadata_infra.CloudExadataInfrastructure(), - ], - next_page_token="ghi", - ), - oracledatabase.ListCloudExadataInfrastructuresResponse( - cloud_exadata_infrastructures=[ - exadata_infra.CloudExadataInfrastructure(), - exadata_infra.CloudExadataInfrastructure(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListCloudExadataInfrastructuresResponse.to_json(x) - for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.failover_autonomous_database(request) - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = oracledatabase.FailoverAutonomousDatabaseRequest() + assert args[0] == request - pager = client.list_cloud_exadata_infrastructures(request=sample_request) + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - results = list(pager) - assert len(results) == 6 - assert all( - isinstance(i, exadata_infra.CloudExadataInfrastructure) for i in results - ) - pages = list( - client.list_cloud_exadata_infrastructures(request=sample_request).pages +def test_failover_autonomous_database_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = oracledatabase.FailoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.failover_autonomous_database(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == oracledatabase.FailoverAutonomousDatabaseRequest( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", ) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token -def test_get_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): +def test_failover_autonomous_database_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -12439,7 +13566,7 @@ def test_get_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.get_cloud_exadata_infrastructure + client._transport.failover_autonomous_database in client._transport._wrapped_methods ) @@ -12449,412 +13576,356 @@ def test_get_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.get_cloud_exadata_infrastructure + client._transport.failover_autonomous_database ] = mock_rpc - request = {} - client.get_cloud_exadata_infrastructure(request) + client.failover_autonomous_database(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_cloud_exadata_infrastructure(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.failover_autonomous_database(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_cloud_exadata_infrastructure_rest_required_fields( - request_type=oracledatabase.GetCloudExadataInfrastructureRequest, +@pytest.mark.asyncio +async def test_failover_autonomous_database_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.failover_autonomous_database + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.failover_autonomous_database + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.failover_autonomous_database(request) - jsonified_request["name"] = "name_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + await client.failover_autonomous_database(request) - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = exadata_infra.CloudExadataInfrastructure() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = exadata_infra.CloudExadataInfrastructure.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) +@pytest.mark.asyncio +async def test_failover_autonomous_database_async( + transport: str = "grpc_asyncio", + request_type=oracledatabase.FailoverAutonomousDatabaseRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response = client.get_cloud_exadata_infrastructure(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.failover_autonomous_database(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = oracledatabase.FailoverAutonomousDatabaseRequest() + assert args[0] == request + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -def test_get_cloud_exadata_infrastructure_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - unset_fields = ( - transport.get_cloud_exadata_infrastructure._get_unset_required_fields({}) - ) - assert set(unset_fields) == (set(()) & set(("name",))) +@pytest.mark.asyncio +async def test_failover_autonomous_database_async_from_dict(): + await test_failover_autonomous_database_async(request_type=dict) -def test_get_cloud_exadata_infrastructure_rest_flattened(): +def test_failover_autonomous_database_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = exadata_infra.CloudExadataInfrastructure() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" - } + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.FailoverAutonomousDatabaseRequest() - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) + request.name = "name_value" - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = exadata_infra.CloudExadataInfrastructure.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.failover_autonomous_database(request) - client.get_cloud_exadata_infrastructure(**mock_args) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/cloudExadataInfrastructures/*}" - % client.transport._host, - args[1], - ) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_get_cloud_exadata_infrastructure_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_failover_autonomous_database_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cloud_exadata_infrastructure( - oracledatabase.GetCloudExadataInfrastructureRequest(), - name="name_value", - ) - - -def test_create_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.FailoverAutonomousDatabaseRequest() - # Ensure method has been cached - assert ( - client._transport.create_cloud_exadata_infrastructure - in client._transport._wrapped_methods - ) + request.name = "name_value" - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - client._transport._wrapped_methods[ - client._transport.create_cloud_exadata_infrastructure - ] = mock_rpc - - request = {} - client.create_cloud_exadata_infrastructure(request) + await client.failover_autonomous_database(request) # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_cloud_exadata_infrastructure(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_cloud_exadata_infrastructure_rest_required_fields( - request_type=oracledatabase.CreateCloudExadataInfrastructureRequest, -): - transport_class = transports.OracleDatabaseRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["cloud_exadata_infrastructure_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - # verify fields with default values are dropped - assert "cloudExadataInfrastructureId" not in jsonified_request + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with default values are now present - assert "cloudExadataInfrastructureId" in jsonified_request - assert ( - jsonified_request["cloudExadataInfrastructureId"] - == request_init["cloud_exadata_infrastructure_id"] +def test_failover_autonomous_database_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - jsonified_request["parent"] = "parent_value" - jsonified_request[ - "cloudExadataInfrastructureId" - ] = "cloud_exadata_infrastructure_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "cloud_exadata_infrastructure_id", - "request_id", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.failover_autonomous_database( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", ) - ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "cloudExadataInfrastructureId" in jsonified_request - assert ( - jsonified_request["cloudExadataInfrastructureId"] - == "cloud_exadata_infrastructure_id_value" - ) + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].peer_autonomous_database + mock_val = "peer_autonomous_database_value" + assert arg == mock_val + +def test_failover_autonomous_database_flattened_error(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.failover_autonomous_database( + oracledatabase.FailoverAutonomousDatabaseRequest(), + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} +@pytest.mark.asyncio +async def test_failover_autonomous_database_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) - response = client.create_cloud_exadata_infrastructure(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") - expected_params = [ - ( - "cloudExadataInfrastructureId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.failover_autonomous_database( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].peer_autonomous_database + mock_val = "peer_autonomous_database_value" + assert arg == mock_val -def test_create_cloud_exadata_infrastructure_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_failover_autonomous_database_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = ( - transport.create_cloud_exadata_infrastructure._get_unset_required_fields({}) - ) - assert set(unset_fields) == ( - set( - ( - "cloudExadataInfrastructureId", - "requestId", - ) - ) - & set( - ( - "parent", - "cloudExadataInfrastructureId", - "cloudExadataInfrastructure", - ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.failover_autonomous_database( + oracledatabase.FailoverAutonomousDatabaseRequest(), + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", ) - ) -def test_create_cloud_exadata_infrastructure_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + odb_network.ListOdbNetworksRequest, + dict, + ], +) +def test_list_odb_networks(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - cloud_exadata_infrastructure=exadata_infra.CloudExadataInfrastructure( - name="name_value" - ), - cloud_exadata_infrastructure_id="cloud_exadata_infrastructure_id_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = odb_network.ListOdbNetworksResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_odb_networks(request) - client.create_cloud_exadata_infrastructure(**mock_args) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = odb_network.ListOdbNetworksRequest() + assert args[0] == request - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/cloudExadataInfrastructures" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListOdbNetworksPager) + assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] -def test_create_cloud_exadata_infrastructure_rest_flattened_error( - transport: str = "rest", -): +def test_list_odb_networks_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cloud_exadata_infrastructure( - oracledatabase.CreateCloudExadataInfrastructureRequest(), + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = odb_network.ListOdbNetworksRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_odb_networks(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == odb_network.ListOdbNetworksRequest( parent="parent_value", - cloud_exadata_infrastructure=exadata_infra.CloudExadataInfrastructure( - name="name_value" - ), - cloud_exadata_infrastructure_id="cloud_exadata_infrastructure_id_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", ) -def test_delete_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): +def test_list_odb_networks_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -12862,10 +13933,7 @@ def test_delete_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.delete_cloud_exadata_infrastructure - in client._transport._wrapped_methods - ) + assert client._transport.list_odb_networks in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() @@ -12873,638 +13941,576 @@ def test_delete_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.delete_cloud_exadata_infrastructure + client._transport.list_odb_networks ] = mock_rpc - request = {} - client.delete_cloud_exadata_infrastructure(request) + client.list_odb_networks(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.delete_cloud_exadata_infrastructure(request) + client.list_odb_networks(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_cloud_exadata_infrastructure_rest_required_fields( - request_type=oracledatabase.DeleteCloudExadataInfrastructureRequest, +@pytest.mark.asyncio +async def test_list_odb_networks_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.list_odb_networks + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_odb_networks + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.list_odb_networks(request) - jsonified_request["name"] = "name_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "force", - "request_id", - ) - ) - jsonified_request.update(unset_fields) + await client.list_odb_networks(request) - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_cloud_exadata_infrastructure(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_delete_cloud_exadata_infrastructure_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_list_odb_networks_async( + transport: str = "grpc_asyncio", request_type=odb_network.ListOdbNetworksRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - unset_fields = ( - transport.delete_cloud_exadata_infrastructure._get_unset_required_fields({}) - ) - assert set(unset_fields) == ( - set( - ( - "force", - "requestId", + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_network.ListOdbNetworksResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], ) ) - & set(("name",)) - ) - - -def test_delete_cloud_exadata_infrastructure_rest_flattened(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" - } + response = await client.list_odb_networks(request) - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = odb_network.ListOdbNetworksRequest() + assert args[0] == request - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListOdbNetworksAsyncPager) + assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] - client.delete_cloud_exadata_infrastructure(**mock_args) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/cloudExadataInfrastructures/*}" - % client.transport._host, - args[1], - ) +@pytest.mark.asyncio +async def test_list_odb_networks_async_from_dict(): + await test_list_odb_networks_async(request_type=dict) -def test_delete_cloud_exadata_infrastructure_rest_flattened_error( - transport: str = "rest", -): +def test_list_odb_networks_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cloud_exadata_infrastructure( - oracledatabase.DeleteCloudExadataInfrastructureRequest(), - name="name_value", - ) - - -def test_list_cloud_vm_clusters_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.list_cloud_vm_clusters - in client._transport._wrapped_methods - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_network.ListOdbNetworksRequest() - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_cloud_vm_clusters - ] = mock_rpc + request.parent = "parent_value" - request = {} - client.list_cloud_vm_clusters(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + call.return_value = odb_network.ListOdbNetworksResponse() + client.list_odb_networks(request) # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_cloud_vm_clusters(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_list_cloud_vm_clusters_rest_required_fields( - request_type=oracledatabase.ListCloudVmClustersRequest, -): - transport_class = transports.OracleDatabaseRestTransport - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +@pytest.mark.asyncio +async def test_list_odb_networks_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # verify fields with default values are dropped + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_network.ListOdbNetworksRequest() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_cloud_vm_clusters._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + request.parent = "parent_value" - # verify required fields with default values are now present + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_network.ListOdbNetworksResponse() + ) + await client.list_odb_networks(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_cloud_vm_clusters._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" +def test_list_odb_networks_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListCloudVmClustersResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = oracledatabase.ListCloudVmClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_cloud_vm_clusters(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = odb_network.ListOdbNetworksResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_odb_networks( + parent="parent_value", + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -def test_list_cloud_vm_clusters_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +def test_list_odb_networks_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - unset_fields = transport.list_cloud_vm_clusters._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "filter", - "pageSize", - "pageToken", - ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_odb_networks( + odb_network.ListOdbNetworksRequest(), + parent="parent_value", ) - & set(("parent",)) - ) -def test_list_cloud_vm_clusters_rest_flattened(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +@pytest.mark.asyncio +async def test_list_odb_networks_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListCloudVmClustersResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = odb_network.ListOdbNetworksResponse() - # get truthy value for each flattened field - mock_args = dict( + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_network.ListOdbNetworksResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_odb_networks( parent="parent_value", ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListCloudVmClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_cloud_vm_clusters(**mock_args) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/cloudVmClusters" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -def test_list_cloud_vm_clusters_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_list_odb_networks_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_cloud_vm_clusters( - oracledatabase.ListCloudVmClustersRequest(), + await client.list_odb_networks( + odb_network.ListOdbNetworksRequest(), parent="parent_value", ) -def test_list_cloud_vm_clusters_rest_pager(transport: str = "rest"): +def test_list_odb_networks_pager(transport_name: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport=transport_name, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListCloudVmClustersResponse( - cloud_vm_clusters=[ - vm_cluster.CloudVmCluster(), - vm_cluster.CloudVmCluster(), - vm_cluster.CloudVmCluster(), + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), ], next_page_token="abc", ), - oracledatabase.ListCloudVmClustersResponse( - cloud_vm_clusters=[], + odb_network.ListOdbNetworksResponse( + odb_networks=[], next_page_token="def", ), - oracledatabase.ListCloudVmClustersResponse( - cloud_vm_clusters=[ - vm_cluster.CloudVmCluster(), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), ], next_page_token="ghi", ), - oracledatabase.ListCloudVmClustersResponse( - cloud_vm_clusters=[ - vm_cluster.CloudVmCluster(), - vm_cluster.CloudVmCluster(), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), ], ), + RuntimeError, ) - # Two responses for two calls - response = response + response - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListCloudVmClustersResponse.to_json(x) for x in response + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/locations/sample2"} + pager = client.list_odb_networks(request={}, retry=retry, timeout=timeout) - pager = client.list_cloud_vm_clusters(request=sample_request) + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 - assert all(isinstance(i, vm_cluster.CloudVmCluster) for i in results) + assert all(isinstance(i, odb_network.OdbNetwork) for i in results) - pages = list(client.list_cloud_vm_clusters(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token +def test_list_odb_networks_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) -def test_get_cloud_vm_cluster_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + ], + next_page_token="abc", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[], + next_page_token="def", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + ], + next_page_token="ghi", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + ], + ), + RuntimeError, ) + pages = list(client.list_odb_networks(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - # Ensure method has been cached - assert ( - client._transport.get_cloud_vm_cluster in client._transport._wrapped_methods - ) +@pytest.mark.asyncio +async def test_list_odb_networks_async_pager(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + ], + next_page_token="abc", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[], + next_page_token="def", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + ], + next_page_token="ghi", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + ], + ), + RuntimeError, ) - client._transport._wrapped_methods[ - client._transport.get_cloud_vm_cluster - ] = mock_rpc - - request = {} - client.get_cloud_vm_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_cloud_vm_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 + async_pager = await client.list_odb_networks( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + assert len(responses) == 6 + assert all(isinstance(i, odb_network.OdbNetwork) for i in responses) -def test_get_cloud_vm_cluster_rest_required_fields( - request_type=oracledatabase.GetCloudVmClusterRequest, -): - transport_class = transports.OracleDatabaseRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +@pytest.mark.asyncio +async def test_list_odb_networks_async_pages(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cloud_vm_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cloud_vm_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + ], + next_page_token="abc", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[], + next_page_token="def", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + ], + next_page_token="ghi", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_odb_networks(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +@pytest.mark.parametrize( + "request_type", + [ + odb_network.GetOdbNetworkRequest, + dict, + ], +) +def test_get_odb_network(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = vm_cluster.CloudVmCluster() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = vm_cluster.CloudVmCluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_cloud_vm_cluster(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = odb_network.OdbNetwork( + name="name_value", + network="network_value", + state=odb_network.OdbNetwork.State.PROVISIONING, + entitlement_id="entitlement_id_value", + gcp_oracle_zone="gcp_oracle_zone_value", + ) + response = client.get_odb_network(request) -def test_get_cloud_vm_cluster_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = odb_network.GetOdbNetworkRequest() + assert args[0] == request - unset_fields = transport.get_cloud_vm_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the response is the type that we expect. + assert isinstance(response, odb_network.OdbNetwork) + assert response.name == "name_value" + assert response.network == "network_value" + assert response.state == odb_network.OdbNetwork.State.PROVISIONING + assert response.entitlement_id == "entitlement_id_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" -def test_get_cloud_vm_cluster_rest_flattened(): +def test_get_odb_network_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = vm_cluster.CloudVmCluster() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" - } + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = odb_network.GetOdbNetworkRequest( + name="name_value", + ) - # get truthy value for each flattened field - mock_args = dict( + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_odb_network(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == odb_network.GetOdbNetworkRequest( name="name_value", ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = vm_cluster.CloudVmCluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_cloud_vm_cluster(**mock_args) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/cloudVmClusters/*}" - % client.transport._host, - args[1], +def test_get_odb_network_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -def test_get_cloud_vm_cluster_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) + # Ensure method has been cached + assert client._transport.get_odb_network in client._transport._wrapped_methods - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cloud_vm_cluster( - oracledatabase.GetCloudVmClusterRequest(), - name="name_value", + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) + client._transport._wrapped_methods[client._transport.get_odb_network] = mock_rpc + request = {} + client.get_odb_network(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.get_odb_network(request) -def test_create_cloud_vm_cluster_rest_use_cached_wrapped_rpc(): + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_odb_network_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) # Should wrap all calls on client creation @@ -13513,418 +14519,341 @@ def test_create_cloud_vm_cluster_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.create_cloud_vm_cluster - in client._transport._wrapped_methods + client._client._transport.get_odb_network + in client._client._transport._wrapped_methods ) # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_cloud_vm_cluster + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_odb_network ] = mock_rpc request = {} - client.create_cloud_vm_cluster(request) + await client.get_odb_network(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_cloud_vm_cluster(request) + await client.get_odb_network(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_cloud_vm_cluster_rest_required_fields( - request_type=oracledatabase.CreateCloudVmClusterRequest, +@pytest.mark.asyncio +async def test_get_odb_network_async( + transport: str = "grpc_asyncio", request_type=odb_network.GetOdbNetworkRequest ): - transport_class = transports.OracleDatabaseRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["cloud_vm_cluster_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # verify fields with default values are dropped - assert "cloudVmClusterId" not in jsonified_request + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cloud_vm_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_network.OdbNetwork( + name="name_value", + network="network_value", + state=odb_network.OdbNetwork.State.PROVISIONING, + entitlement_id="entitlement_id_value", + gcp_oracle_zone="gcp_oracle_zone_value", + ) + ) + response = await client.get_odb_network(request) - # verify required fields with default values are now present - assert "cloudVmClusterId" in jsonified_request - assert jsonified_request["cloudVmClusterId"] == request_init["cloud_vm_cluster_id"] + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = odb_network.GetOdbNetworkRequest() + assert args[0] == request - jsonified_request["parent"] = "parent_value" - jsonified_request["cloudVmClusterId"] = "cloud_vm_cluster_id_value" + # Establish that the response is the type that we expect. + assert isinstance(response, odb_network.OdbNetwork) + assert response.name == "name_value" + assert response.network == "network_value" + assert response.state == odb_network.OdbNetwork.State.PROVISIONING + assert response.entitlement_id == "entitlement_id_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cloud_vm_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "cloud_vm_cluster_id", - "request_id", - ) - ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "cloudVmClusterId" in jsonified_request - assert jsonified_request["cloudVmClusterId"] == "cloud_vm_cluster_id_value" +@pytest.mark.asyncio +async def test_get_odb_network_async_from_dict(): + await test_get_odb_network_async(request_type=dict) + +def test_get_odb_network_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_network.GetOdbNetworkRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + request.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + call.return_value = odb_network.OdbNetwork() + client.get_odb_network(request) - response = client.create_cloud_vm_cluster(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [ - ( - "cloudVmClusterId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_create_cloud_vm_cluster_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_get_odb_network_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.create_cloud_vm_cluster._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "cloudVmClusterId", - "requestId", - ) - ) - & set( - ( - "parent", - "cloudVmClusterId", - "cloudVmCluster", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_network.GetOdbNetworkRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_network.OdbNetwork() ) - ) + await client.get_odb_network(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_create_cloud_vm_cluster_rest_flattened(): + +def test_get_odb_network_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - cloud_vm_cluster=vm_cluster.CloudVmCluster(name="name_value"), - cloud_vm_cluster_id="cloud_vm_cluster_id_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = odb_network.OdbNetwork() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_odb_network( + name="name_value", ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_cloud_vm_cluster(**mock_args) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/cloudVmClusters" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_create_cloud_vm_cluster_rest_flattened_error(transport: str = "rest"): +def test_get_odb_network_flattened_error(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_cloud_vm_cluster( - oracledatabase.CreateCloudVmClusterRequest(), - parent="parent_value", - cloud_vm_cluster=vm_cluster.CloudVmCluster(name="name_value"), - cloud_vm_cluster_id="cloud_vm_cluster_id_value", + client.get_odb_network( + odb_network.GetOdbNetworkRequest(), + name="name_value", ) -def test_delete_cloud_vm_cluster_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) +@pytest.mark.asyncio +async def test_get_odb_network_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = odb_network.OdbNetwork() - # Ensure method has been cached - assert ( - client._transport.delete_cloud_vm_cluster - in client._transport._wrapped_methods + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_network.OdbNetwork() ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_odb_network( + name="name_value", ) - client._transport._wrapped_methods[ - client._transport.delete_cloud_vm_cluster - ] = mock_rpc - - request = {} - client.delete_cloud_vm_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - client.delete_cloud_vm_cluster(request) - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 +@pytest.mark.asyncio +async def test_get_odb_network_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_odb_network( + odb_network.GetOdbNetworkRequest(), + name="name_value", + ) -def test_delete_cloud_vm_cluster_rest_required_fields( - request_type=oracledatabase.DeleteCloudVmClusterRequest, -): - transport_class = transports.OracleDatabaseRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +@pytest.mark.parametrize( + "request_type", + [ + gco_odb_network.CreateOdbNetworkRequest, + dict, + ], +) +def test_create_odb_network(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # verify fields with default values are dropped + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cloud_vm_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_odb_network(request) - jsonified_request["name"] = "name_value" + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = gco_odb_network.CreateOdbNetworkRequest() + assert args[0] == request - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cloud_vm_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "force", - "request_id", - ) - ) - jsonified_request.update(unset_fields) + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +def test_create_odb_network_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_cloud_vm_cluster(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - -def test_delete_cloud_vm_cluster_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = gco_odb_network.CreateOdbNetworkRequest( + parent="parent_value", + odb_network_id="odb_network_id_value", ) - unset_fields = transport.delete_cloud_vm_cluster._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "force", - "requestId", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_odb_network(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == gco_odb_network.CreateOdbNetworkRequest( + parent="parent_value", + odb_network_id="odb_network_id_value", ) - & set(("name",)) - ) - - -def test_delete_cloud_vm_cluster_rest_flattened(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" - } - # get truthy value for each flattened field - mock_args = dict( - name="name_value", +def test_create_odb_network_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - client.delete_cloud_vm_cluster(**mock_args) + # Ensure method has been cached + assert ( + client._transport.create_odb_network in client._transport._wrapped_methods + ) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/cloudVmClusters/*}" - % client.transport._host, - args[1], + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) + client._transport._wrapped_methods[ + client._transport.create_odb_network + ] = mock_rpc + request = {} + client.create_odb_network(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -def test_delete_cloud_vm_cluster_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cloud_vm_cluster( - oracledatabase.DeleteCloudVmClusterRequest(), - name="name_value", - ) + client.create_odb_network(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_list_entitlements_rest_use_cached_wrapped_rpc(): + +@pytest.mark.asyncio +async def test_create_odb_network_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) # Should wrap all calls on client creation @@ -13932,517 +14861,670 @@ def test_list_entitlements_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.list_entitlements in client._transport._wrapped_methods + assert ( + client._client._transport.create_odb_network + in client._client._transport._wrapped_methods + ) # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_entitlements + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_odb_network ] = mock_rpc request = {} - client.list_entitlements(request) + await client.create_odb_network(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_entitlements(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_odb_network(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_entitlements_rest_required_fields( - request_type=oracledatabase.ListEntitlementsRequest, +@pytest.mark.asyncio +async def test_create_odb_network_async( + transport: str = "grpc_asyncio", + request_type=gco_odb_network.CreateOdbNetworkRequest, ): - transport_class = transports.OracleDatabaseRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # verify fields with default values are dropped + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_entitlements._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_odb_network(request) - # verify required fields with default values are now present + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = gco_odb_network.CreateOdbNetworkRequest() + assert args[0] == request - jsonified_request["parent"] = "parent_value" + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_entitlements._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" +@pytest.mark.asyncio +async def test_create_odb_network_async_from_dict(): + await test_create_odb_network_async(request_type=dict) + +def test_create_odb_network_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListEntitlementsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gco_odb_network.CreateOdbNetworkRequest() - # Convert return value to protobuf type - return_value = oracledatabase.ListEntitlementsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + request.parent = "parent_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_odb_network(request) - response = client.list_entitlements(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_list_entitlements_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_create_odb_network_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.list_entitlements._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gco_odb_network.CreateOdbNetworkRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - & set(("parent",)) - ) + await client.create_odb_network(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_list_entitlements_rest_flattened(): + +def test_create_odb_network_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListEntitlementsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} - - # get truthy value for each flattened field - mock_args = dict( + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_odb_network( parent="parent_value", + odb_network=gco_odb_network.OdbNetwork(name="name_value"), + odb_network_id="odb_network_id_value", ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListEntitlementsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_entitlements(**mock_args) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/entitlements" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].odb_network + mock_val = gco_odb_network.OdbNetwork(name="name_value") + assert arg == mock_val + arg = args[0].odb_network_id + mock_val = "odb_network_id_value" + assert arg == mock_val -def test_list_entitlements_rest_flattened_error(transport: str = "rest"): +def test_create_odb_network_flattened_error(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_entitlements( - oracledatabase.ListEntitlementsRequest(), + client.create_odb_network( + gco_odb_network.CreateOdbNetworkRequest(), parent="parent_value", + odb_network=gco_odb_network.OdbNetwork(name="name_value"), + odb_network_id="odb_network_id_value", ) -def test_list_entitlements_rest_pager(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_create_odb_network_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListEntitlementsResponse( - entitlements=[ - entitlement.Entitlement(), - entitlement.Entitlement(), - entitlement.Entitlement(), - ], - next_page_token="abc", - ), - oracledatabase.ListEntitlementsResponse( - entitlements=[], - next_page_token="def", - ), - oracledatabase.ListEntitlementsResponse( - entitlements=[ - entitlement.Entitlement(), - ], - next_page_token="ghi", - ), - oracledatabase.ListEntitlementsResponse( - entitlements=[ - entitlement.Entitlement(), - entitlement.Entitlement(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListEntitlementsResponse.to_json(x) for x in response + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_odb_network( + parent="parent_value", + odb_network=gco_odb_network.OdbNetwork(name="name_value"), + odb_network_id="odb_network_id_value", ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/locations/sample2"} - - pager = client.list_entitlements(request=sample_request) - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, entitlement.Entitlement) for i in results) + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].odb_network + mock_val = gco_odb_network.OdbNetwork(name="name_value") + assert arg == mock_val + arg = args[0].odb_network_id + mock_val = "odb_network_id_value" + assert arg == mock_val - pages = list(client.list_entitlements(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token +@pytest.mark.asyncio +async def test_create_odb_network_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) -def test_list_db_servers_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_odb_network( + gco_odb_network.CreateOdbNetworkRequest(), + parent="parent_value", + odb_network=gco_odb_network.OdbNetwork(name="name_value"), + odb_network_id="odb_network_id_value", ) - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - # Ensure method has been cached - assert client._transport.list_db_servers in client._transport._wrapped_methods +@pytest.mark.parametrize( + "request_type", + [ + odb_network.DeleteOdbNetworkRequest, + dict, + ], +) +def test_delete_odb_network(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_db_servers] = mock_rpc + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - request = {} - client.list_db_servers(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_odb_network(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = odb_network.DeleteOdbNetworkRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_odb_network_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = odb_network.DeleteOdbNetworkRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_network), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_odb_network(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == odb_network.DeleteOdbNetworkRequest( + name="name_value", + ) + + +def test_delete_odb_network_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_odb_network in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_odb_network + ] = mock_rpc + request = {} + client.delete_odb_network(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_db_servers(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_odb_network(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_db_servers_rest_required_fields( - request_type=oracledatabase.ListDbServersRequest, +@pytest.mark.asyncio +async def test_delete_odb_network_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.delete_odb_network + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_db_servers._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_odb_network + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.delete_odb_network(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_db_servers._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.delete_odb_network(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_odb_network_async( + transport: str = "grpc_asyncio", request_type=odb_network.DeleteOdbNetworkRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_odb_network(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = odb_network.DeleteOdbNetworkRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + +@pytest.mark.asyncio +async def test_delete_odb_network_async_from_dict(): + await test_delete_odb_network_async(request_type=dict) + + +def test_delete_odb_network_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListDbServersResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_network.DeleteOdbNetworkRequest() - # Convert return value to protobuf type - return_value = oracledatabase.ListDbServersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + request.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_network), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_odb_network(request) - response = client.list_db_servers(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_list_db_servers_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_delete_odb_network_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.list_db_servers._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_network.DeleteOdbNetworkRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_network), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - & set(("parent",)) - ) + await client.delete_odb_network(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_list_db_servers_rest_flattened(): +def test_delete_odb_network_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListDbServersResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_odb_network( + name="name_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" - } + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", + +def test_delete_odb_network_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_odb_network( + odb_network.DeleteOdbNetworkRequest(), + name="name_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListDbServersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_db_servers(**mock_args) +@pytest.mark.asyncio +async def test_delete_odb_network_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_odb_network( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*/cloudExadataInfrastructures/*}/dbServers" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_list_db_servers_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_delete_odb_network_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_db_servers( - oracledatabase.ListDbServersRequest(), - parent="parent_value", + await client.delete_odb_network( + odb_network.DeleteOdbNetworkRequest(), + name="name_value", ) -def test_list_db_servers_rest_pager(transport: str = "rest"): +@pytest.mark.parametrize( + "request_type", + [ + odb_subnet.ListOdbSubnetsRequest, + dict, + ], +) +def test_list_odb_subnets(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListDbServersResponse( - db_servers=[ - db_server.DbServer(), - db_server.DbServer(), - db_server.DbServer(), - ], - next_page_token="abc", - ), - oracledatabase.ListDbServersResponse( - db_servers=[], - next_page_token="def", - ), - oracledatabase.ListDbServersResponse( - db_servers=[ - db_server.DbServer(), - ], - next_page_token="ghi", - ), - oracledatabase.ListDbServersResponse( - db_servers=[ - db_server.DbServer(), - db_server.DbServer(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListDbServersResponse.to_json(x) for x in response + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = odb_subnet.ListOdbSubnetsResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + response = client.list_odb_subnets(request) - sample_request = { - "parent": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" - } + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = odb_subnet.ListOdbSubnetsRequest() + assert args[0] == request - pager = client.list_db_servers(request=sample_request) + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListOdbSubnetsPager) + assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, db_server.DbServer) for i in results) - pages = list(client.list_db_servers(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token +def test_list_odb_subnets_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = odb_subnet.ListOdbSubnetsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) -def test_list_db_nodes_rest_use_cached_wrapped_rpc(): + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_odb_subnets(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == odb_subnet.ListOdbSubnetsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) + + +def test_list_odb_subnets_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -14450,259 +15532,526 @@ def test_list_db_nodes_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.list_db_nodes in client._transport._wrapped_methods + assert client._transport.list_odb_subnets in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.list_db_nodes] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.list_odb_subnets + ] = mock_rpc request = {} - client.list_db_nodes(request) + client.list_odb_subnets(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_db_nodes(request) + client.list_odb_subnets(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_db_nodes_rest_required_fields( - request_type=oracledatabase.ListDbNodesRequest, +@pytest.mark.asyncio +async def test_list_odb_subnets_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.list_odb_subnets + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_db_nodes._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_odb_subnets + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.list_odb_subnets(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_db_nodes._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) + await client.list_odb_subnets(request) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListDbNodesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result +@pytest.mark.asyncio +async def test_list_odb_subnets_async( + transport: str = "grpc_asyncio", request_type=odb_subnet.ListOdbSubnetsRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value = Response() - response_value.status_code = 200 + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Convert return value to protobuf type - return_value = oracledatabase.ListDbNodesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_subnet.ListOdbSubnetsResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], + ) + ) + response = await client.list_odb_subnets(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = odb_subnet.ListOdbSubnetsRequest() + assert args[0] == request - response = client.list_db_nodes(request) + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListOdbSubnetsAsyncPager) + assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +@pytest.mark.asyncio +async def test_list_odb_subnets_async_from_dict(): + await test_list_odb_subnets_async(request_type=dict) -def test_list_db_nodes_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials + +def test_list_odb_subnets_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - unset_fields = transport.list_db_nodes._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_subnet.ListOdbSubnetsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + call.return_value = odb_subnet.ListOdbSubnetsResponse() + client.list_odb_subnets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_odb_subnets_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_subnet.ListOdbSubnetsRequest() + + request.parent = "parent_value" -def test_list_db_nodes_rest_flattened(): + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_subnet.ListOdbSubnetsResponse() + ) + await client.list_odb_subnets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_odb_subnets_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListDbNodesResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = odb_subnet.ListOdbSubnetsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_odb_subnets( + parent="parent_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/locations/sample2/cloudVmClusters/sample3" - } + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( + +def test_list_odb_subnets_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_odb_subnets( + odb_subnet.ListOdbSubnetsRequest(), parent="parent_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListDbNodesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_db_nodes(**mock_args) +@pytest.mark.asyncio +async def test_list_odb_subnets_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = odb_subnet.ListOdbSubnetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_subnet.ListOdbSubnetsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_odb_subnets( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*/cloudVmClusters/*}/dbNodes" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -def test_list_db_nodes_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_list_odb_subnets_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_db_nodes( - oracledatabase.ListDbNodesRequest(), + await client.list_odb_subnets( + odb_subnet.ListOdbSubnetsRequest(), parent="parent_value", ) -def test_list_db_nodes_rest_pager(transport: str = "rest"): +def test_list_odb_subnets_pager(transport_name: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport=transport_name, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListDbNodesResponse( - db_nodes=[ - db_node.DbNode(), - db_node.DbNode(), - db_node.DbNode(), + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), ], next_page_token="abc", ), - oracledatabase.ListDbNodesResponse( - db_nodes=[], + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[], next_page_token="def", ), - oracledatabase.ListDbNodesResponse( - db_nodes=[ - db_node.DbNode(), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), ], next_page_token="ghi", ), - oracledatabase.ListDbNodesResponse( - db_nodes=[ - db_node.DbNode(), - db_node.DbNode(), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), ], ), + RuntimeError, ) - # Two responses for two calls - response = response + response - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListDbNodesResponse.to_json(x) for x in response + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/locations/sample2/cloudVmClusters/sample3" - } + pager = client.list_odb_subnets(request={}, retry=retry, timeout=timeout) - pager = client.list_db_nodes(request=sample_request) + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 - assert all(isinstance(i, db_node.DbNode) for i in results) + assert all(isinstance(i, odb_subnet.OdbSubnet) for i in results) - pages = list(client.list_db_nodes(request=sample_request).pages) + +def test_list_odb_subnets_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + ], + next_page_token="abc", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[], + next_page_token="def", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + ], + next_page_token="ghi", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + ], + ), + RuntimeError, + ) + pages = list(client.list_odb_subnets(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token -def test_list_gi_versions_rest_use_cached_wrapped_rpc(): +@pytest.mark.asyncio +async def test_list_odb_subnets_async_pager(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_subnets), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + ], + next_page_token="abc", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[], + next_page_token="def", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + ], + next_page_token="ghi", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_odb_subnets( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, odb_subnet.OdbSubnet) for i in responses) + + +@pytest.mark.asyncio +async def test_list_odb_subnets_async_pages(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_subnets), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + ], + next_page_token="abc", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[], + next_page_token="def", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + ], + next_page_token="ghi", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_odb_subnets(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + odb_subnet.GetOdbSubnetRequest, + dict, + ], +) +def test_get_odb_subnet(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = odb_subnet.OdbSubnet( + name="name_value", + cidr_range="cidr_range_value", + purpose=odb_subnet.OdbSubnet.Purpose.CLIENT_SUBNET, + state=odb_subnet.OdbSubnet.State.PROVISIONING, + ) + response = client.get_odb_subnet(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = odb_subnet.GetOdbSubnetRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, odb_subnet.OdbSubnet) + assert response.name == "name_value" + assert response.cidr_range == "cidr_range_value" + assert response.purpose == odb_subnet.OdbSubnet.Purpose.CLIENT_SUBNET + assert response.state == odb_subnet.OdbSubnet.State.PROVISIONING + + +def test_get_odb_subnet_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = odb_subnet.GetOdbSubnetRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_odb_subnet(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == odb_subnet.GetOdbSubnetRequest( + name="name_value", + ) + + +def test_get_odb_subnet_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -14710,256 +16059,331 @@ def test_list_gi_versions_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.list_gi_versions in client._transport._wrapped_methods + assert client._transport.get_odb_subnet in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.list_gi_versions - ] = mock_rpc - + client._transport._wrapped_methods[client._transport.get_odb_subnet] = mock_rpc request = {} - client.list_gi_versions(request) + client.get_odb_subnet(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_gi_versions(request) + client.get_odb_subnet(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_gi_versions_rest_required_fields( - request_type=oracledatabase.ListGiVersionsRequest, +@pytest.mark.asyncio +async def test_get_odb_subnet_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.get_odb_subnet + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_gi_versions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_odb_subnet + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.get_odb_subnet(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_gi_versions._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) + await client.get_odb_subnet(request) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + +@pytest.mark.asyncio +async def test_get_odb_subnet_async( + transport: str = "grpc_asyncio", request_type=odb_subnet.GetOdbSubnetRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListGiVersionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response_value = Response() - response_value.status_code = 200 + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_subnet.OdbSubnet( + name="name_value", + cidr_range="cidr_range_value", + purpose=odb_subnet.OdbSubnet.Purpose.CLIENT_SUBNET, + state=odb_subnet.OdbSubnet.State.PROVISIONING, + ) + ) + response = await client.get_odb_subnet(request) - # Convert return value to protobuf type - return_value = oracledatabase.ListGiVersionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = odb_subnet.GetOdbSubnetRequest() + assert args[0] == request - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Establish that the response is the type that we expect. + assert isinstance(response, odb_subnet.OdbSubnet) + assert response.name == "name_value" + assert response.cidr_range == "cidr_range_value" + assert response.purpose == odb_subnet.OdbSubnet.Purpose.CLIENT_SUBNET + assert response.state == odb_subnet.OdbSubnet.State.PROVISIONING - response = client.list_gi_versions(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +@pytest.mark.asyncio +async def test_get_odb_subnet_async_from_dict(): + await test_get_odb_subnet_async(request_type=dict) -def test_list_gi_versions_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +def test_get_odb_subnet_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - unset_fields = transport.list_gi_versions._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_subnet.GetOdbSubnetRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + call.return_value = odb_subnet.OdbSubnet() + client.get_odb_subnet(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_odb_subnet_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_subnet.GetOdbSubnetRequest() -def test_list_gi_versions_rest_flattened(): + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_subnet.OdbSubnet() + ) + await client.get_odb_subnet(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_odb_subnet_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListGiVersionsResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = odb_subnet.OdbSubnet() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_odb_subnet( + name="name_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", + +def test_get_odb_subnet_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_odb_subnet( + odb_subnet.GetOdbSubnetRequest(), + name="name_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListGiVersionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_gi_versions(**mock_args) +@pytest.mark.asyncio +async def test_get_odb_subnet_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = odb_subnet.OdbSubnet() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_subnet.OdbSubnet() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_odb_subnet( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/giVersions" % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_list_gi_versions_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_get_odb_subnet_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_gi_versions( - oracledatabase.ListGiVersionsRequest(), - parent="parent_value", + await client.get_odb_subnet( + odb_subnet.GetOdbSubnetRequest(), + name="name_value", ) -def test_list_gi_versions_rest_pager(transport: str = "rest"): +@pytest.mark.parametrize( + "request_type", + [ + gco_odb_subnet.CreateOdbSubnetRequest, + dict, + ], +) +def test_create_odb_subnet(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListGiVersionsResponse( - gi_versions=[ - gi_version.GiVersion(), - gi_version.GiVersion(), - gi_version.GiVersion(), - ], - next_page_token="abc", - ), - oracledatabase.ListGiVersionsResponse( - gi_versions=[], - next_page_token="def", - ), - oracledatabase.ListGiVersionsResponse( - gi_versions=[ - gi_version.GiVersion(), - ], - next_page_token="ghi", - ), - oracledatabase.ListGiVersionsResponse( - gi_versions=[ - gi_version.GiVersion(), - gi_version.GiVersion(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListGiVersionsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_odb_subnet(request) - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = gco_odb_subnet.CreateOdbSubnetRequest() + assert args[0] == request - pager = client.list_gi_versions(request=sample_request) + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, gi_version.GiVersion) for i in results) - pages = list(client.list_gi_versions(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token +def test_create_odb_subnet_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = gco_odb_subnet.CreateOdbSubnetRequest( + parent="parent_value", + odb_subnet_id="odb_subnet_id_value", + ) -def test_list_db_system_shapes_rest_use_cached_wrapped_rpc(): + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_subnet), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_odb_subnet(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == gco_odb_subnet.CreateOdbSubnetRequest( + parent="parent_value", + odb_subnet_id="odb_subnet_id_value", + ) + + +def test_create_odb_subnet_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -14967,10 +16391,7 @@ def test_list_db_system_shapes_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.list_db_system_shapes - in client._transport._wrapped_methods - ) + assert client._transport.create_odb_subnet in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() @@ -14978,249 +16399,354 @@ def test_list_db_system_shapes_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_db_system_shapes + client._transport.create_odb_subnet ] = mock_rpc - request = {} - client.list_db_system_shapes(request) + client.create_odb_subnet(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_db_system_shapes(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_odb_subnet(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_db_system_shapes_rest_required_fields( - request_type=oracledatabase.ListDbSystemShapesRequest, +@pytest.mark.asyncio +async def test_create_odb_subnet_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.create_odb_subnet + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_db_system_shapes._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_odb_subnet + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.create_odb_subnet(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_db_system_shapes._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_odb_subnet(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_odb_subnet_async( + transport: str = "grpc_asyncio", request_type=gco_odb_subnet.CreateOdbSubnetRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_odb_subnet(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = gco_odb_subnet.CreateOdbSubnetRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_odb_subnet_async_from_dict(): + await test_create_odb_subnet_async(request_type=dict) + +def test_create_odb_subnet_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListDbSystemShapesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gco_odb_subnet.CreateOdbSubnetRequest() - # Convert return value to protobuf type - return_value = oracledatabase.ListDbSystemShapesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + request.parent = "parent_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_subnet), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_odb_subnet(request) - response = client.list_db_system_shapes(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_list_db_system_shapes_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_create_odb_subnet_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.list_db_system_shapes._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gco_odb_subnet.CreateOdbSubnetRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_subnet), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - & set(("parent",)) - ) + await client.create_odb_subnet(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_list_db_system_shapes_rest_flattened(): + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_odb_subnet_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListDbSystemShapesResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_odb_subnet( + parent="parent_value", + odb_subnet=gco_odb_subnet.OdbSubnet(name="name_value"), + odb_subnet_id="odb_subnet_id_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].odb_subnet + mock_val = gco_odb_subnet.OdbSubnet(name="name_value") + assert arg == mock_val + arg = args[0].odb_subnet_id + mock_val = "odb_subnet_id_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( + +def test_create_odb_subnet_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_odb_subnet( + gco_odb_subnet.CreateOdbSubnetRequest(), parent="parent_value", + odb_subnet=gco_odb_subnet.OdbSubnet(name="name_value"), + odb_subnet_id="odb_subnet_id_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListDbSystemShapesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_db_system_shapes(**mock_args) +@pytest.mark.asyncio +async def test_create_odb_subnet_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_odb_subnet( + parent="parent_value", + odb_subnet=gco_odb_subnet.OdbSubnet(name="name_value"), + odb_subnet_id="odb_subnet_id_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/dbSystemShapes" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].odb_subnet + mock_val = gco_odb_subnet.OdbSubnet(name="name_value") + assert arg == mock_val + arg = args[0].odb_subnet_id + mock_val = "odb_subnet_id_value" + assert arg == mock_val -def test_list_db_system_shapes_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_create_odb_subnet_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_db_system_shapes( - oracledatabase.ListDbSystemShapesRequest(), + await client.create_odb_subnet( + gco_odb_subnet.CreateOdbSubnetRequest(), parent="parent_value", + odb_subnet=gco_odb_subnet.OdbSubnet(name="name_value"), + odb_subnet_id="odb_subnet_id_value", ) -def test_list_db_system_shapes_rest_pager(transport: str = "rest"): +@pytest.mark.parametrize( + "request_type", + [ + odb_subnet.DeleteOdbSubnetRequest, + dict, + ], +) +def test_delete_odb_subnet(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListDbSystemShapesResponse( - db_system_shapes=[ - db_system_shape.DbSystemShape(), - db_system_shape.DbSystemShape(), - db_system_shape.DbSystemShape(), - ], - next_page_token="abc", - ), - oracledatabase.ListDbSystemShapesResponse( - db_system_shapes=[], - next_page_token="def", - ), - oracledatabase.ListDbSystemShapesResponse( - db_system_shapes=[ - db_system_shape.DbSystemShape(), - ], - next_page_token="ghi", - ), - oracledatabase.ListDbSystemShapesResponse( - db_system_shapes=[ - db_system_shape.DbSystemShape(), - db_system_shape.DbSystemShape(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListDbSystemShapesResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_odb_subnet(request) - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = odb_subnet.DeleteOdbSubnetRequest() + assert args[0] == request - pager = client.list_db_system_shapes(request=sample_request) + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, db_system_shape.DbSystemShape) for i in results) - pages = list(client.list_db_system_shapes(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token +def test_delete_odb_subnet_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = odb_subnet.DeleteOdbSubnetRequest( + name="name_value", + ) -def test_list_autonomous_databases_rest_use_cached_wrapped_rpc(): + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_subnet), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_odb_subnet(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == odb_subnet.DeleteOdbSubnetRequest( + name="name_value", + ) + + +def test_delete_odb_subnet_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -15228,10 +16754,7 @@ def test_list_autonomous_databases_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.list_autonomous_databases - in client._transport._wrapped_methods - ) + assert client._transport.delete_odb_subnet in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() @@ -15239,257 +16762,343 @@ def test_list_autonomous_databases_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_autonomous_databases + client._transport.delete_odb_subnet ] = mock_rpc - request = {} - client.list_autonomous_databases(request) + client.delete_odb_subnet(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_autonomous_databases(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_odb_subnet(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_autonomous_databases_rest_required_fields( - request_type=oracledatabase.ListAutonomousDatabasesRequest, +@pytest.mark.asyncio +async def test_delete_odb_subnet_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.delete_odb_subnet + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_autonomous_databases._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_odb_subnet + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.delete_odb_subnet(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_autonomous_databases._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "order_by", - "page_size", - "page_token", - ) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.delete_odb_subnet(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_odb_subnet_async( + transport: str = "grpc_asyncio", request_type=odb_subnet.DeleteOdbSubnetRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_odb_subnet(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = odb_subnet.DeleteOdbSubnetRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + +@pytest.mark.asyncio +async def test_delete_odb_subnet_async_from_dict(): + await test_delete_odb_subnet_async(request_type=dict) + + +def test_delete_odb_subnet_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDatabasesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_subnet.DeleteOdbSubnetRequest() - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDatabasesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) + request.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_subnet), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_odb_subnet(request) - response = client.list_autonomous_databases(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_list_autonomous_databases_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_delete_odb_subnet_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.list_autonomous_databases._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "filter", - "orderBy", - "pageSize", - "pageToken", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = odb_subnet.DeleteOdbSubnetRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_subnet), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - & set(("parent",)) - ) + await client.delete_odb_subnet(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_list_autonomous_databases_rest_flattened(): + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_odb_subnet_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDatabasesResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_odb_subnet( + name="name_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", + +def test_delete_odb_subnet_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_odb_subnet( + odb_subnet.DeleteOdbSubnetRequest(), + name="name_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDatabasesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_autonomous_databases(**mock_args) +@pytest.mark.asyncio +async def test_delete_odb_subnet_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_odb_subnet( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/autonomousDatabases" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_list_autonomous_databases_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_delete_odb_subnet_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_autonomous_databases( - oracledatabase.ListAutonomousDatabasesRequest(), - parent="parent_value", + await client.delete_odb_subnet( + odb_subnet.DeleteOdbSubnetRequest(), + name="name_value", ) -def test_list_autonomous_databases_rest_pager(transport: str = "rest"): +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListExadbVmClustersRequest, + dict, + ], +) +def test_list_exadb_vm_clusters(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListAutonomousDatabasesResponse( - autonomous_databases=[ - autonomous_database.AutonomousDatabase(), - autonomous_database.AutonomousDatabase(), - autonomous_database.AutonomousDatabase(), - ], - next_page_token="abc", - ), - oracledatabase.ListAutonomousDatabasesResponse( - autonomous_databases=[], - next_page_token="def", - ), - oracledatabase.ListAutonomousDatabasesResponse( - autonomous_databases=[ - autonomous_database.AutonomousDatabase(), - ], - next_page_token="ghi", - ), - oracledatabase.ListAutonomousDatabasesResponse( - autonomous_databases=[ - autonomous_database.AutonomousDatabase(), - autonomous_database.AutonomousDatabase(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListAutonomousDatabasesResponse.to_json(x) for x in response + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = oracledatabase.ListExadbVmClustersResponse( + next_page_token="next_page_token_value", ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + response = client.list_exadb_vm_clusters(request) - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = oracledatabase.ListExadbVmClustersRequest() + assert args[0] == request - pager = client.list_autonomous_databases(request=sample_request) + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExadbVmClustersPager) + assert response.next_page_token == "next_page_token_value" - results = list(pager) - assert len(results) == 6 - assert all( - isinstance(i, autonomous_database.AutonomousDatabase) for i in results - ) - pages = list(client.list_autonomous_databases(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token +def test_list_exadb_vm_clusters_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = oracledatabase.ListExadbVmClustersRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) -def test_get_autonomous_database_rest_use_cached_wrapped_rpc(): + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_exadb_vm_clusters(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == oracledatabase.ListExadbVmClustersRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) + + +def test_list_exadb_vm_clusters_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -15498,7 +17107,7 @@ def test_get_autonomous_database_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.get_autonomous_database + client._transport.list_exadb_vm_clusters in client._transport._wrapped_methods ) @@ -15508,173 +17117,545 @@ def test_get_autonomous_database_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.get_autonomous_database + client._transport.list_exadb_vm_clusters ] = mock_rpc - request = {} - client.get_autonomous_database(request) + client.list_exadb_vm_clusters(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_autonomous_database(request) + client.list_exadb_vm_clusters(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_autonomous_database_rest_required_fields( - request_type=oracledatabase.GetAutonomousDatabaseRequest, +@pytest.mark.asyncio +async def test_list_exadb_vm_clusters_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.list_exadb_vm_clusters + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_exadb_vm_clusters + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.list_exadb_vm_clusters(request) - jsonified_request["name"] = "name_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + await client.list_exadb_vm_clusters(request) - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_exadb_vm_clusters_async( + transport: str = "grpc_asyncio", + request_type=oracledatabase.ListExadbVmClustersRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListExadbVmClustersResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_exadb_vm_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = oracledatabase.ListExadbVmClustersRequest() + assert args[0] == request + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExadbVmClustersAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_exadb_vm_clusters_async_from_dict(): + await test_list_exadb_vm_clusters_async(request_type=dict) + + +def test_list_exadb_vm_clusters_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = autonomous_database.AutonomousDatabase() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.ListExadbVmClustersRequest() - # Convert return value to protobuf type - return_value = autonomous_database.AutonomousDatabase.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + request.parent = "parent_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + call.return_value = oracledatabase.ListExadbVmClustersResponse() + client.list_exadb_vm_clusters(request) - response = client.get_autonomous_database(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_get_autonomous_database_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_list_exadb_vm_clusters_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.get_autonomous_database._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.ListExadbVmClustersRequest() + request.parent = "parent_value" -def test_get_autonomous_database_rest_flattened(): + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListExadbVmClustersResponse() + ) + await client.list_exadb_vm_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_exadb_vm_clusters_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = autonomous_database.AutonomousDatabase() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = oracledatabase.ListExadbVmClustersResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_exadb_vm_clusters( + parent="parent_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( - name="name_value", + +def test_list_exadb_vm_clusters_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_exadb_vm_clusters( + oracledatabase.ListExadbVmClustersRequest(), + parent="parent_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = autonomous_database.AutonomousDatabase.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_autonomous_database(**mock_args) +@pytest.mark.asyncio +async def test_list_exadb_vm_clusters_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = oracledatabase.ListExadbVmClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListExadbVmClustersResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_exadb_vm_clusters( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}" - % client.transport._host, - args[1], + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_exadb_vm_clusters_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_exadb_vm_clusters( + oracledatabase.ListExadbVmClustersRequest(), + parent="parent_value", ) -def test_get_autonomous_database_rest_flattened_error(transport: str = "rest"): +def test_list_exadb_vm_clusters_pager(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="abc", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[], + next_page_token="def", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="ghi", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_exadb_vm_clusters(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, exadb_vm_cluster.ExadbVmCluster) for i in results) + + +def test_list_exadb_vm_clusters_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="abc", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[], + next_page_token="def", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="ghi", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + ), + RuntimeError, + ) + pages = list(client.list_exadb_vm_clusters(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_exadb_vm_clusters_async_pager(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="abc", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[], + next_page_token="def", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="ghi", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_exadb_vm_clusters( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, exadb_vm_cluster.ExadbVmCluster) for i in responses) + + +@pytest.mark.asyncio +async def test_list_exadb_vm_clusters_async_pages(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="abc", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[], + next_page_token="def", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="ghi", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_exadb_vm_clusters(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.GetExadbVmClusterRequest, + dict, + ], +) +def test_get_exadb_vm_cluster(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_autonomous_database( - oracledatabase.GetAutonomousDatabaseRequest(), + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = exadb_vm_cluster.ExadbVmCluster( name="name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + backup_odb_subnet="backup_odb_subnet_value", + display_name="display_name_value", + entitlement_id="entitlement_id_value", ) + response = client.get_exadb_vm_cluster(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = oracledatabase.GetExadbVmClusterRequest() + assert args[0] == request -def test_create_autonomous_database_rest_use_cached_wrapped_rpc(): + # Establish that the response is the type that we expect. + assert isinstance(response, exadb_vm_cluster.ExadbVmCluster) + assert response.name == "name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.backup_odb_subnet == "backup_odb_subnet_value" + assert response.display_name == "display_name_value" + assert response.entitlement_id == "entitlement_id_value" + + +def test_get_exadb_vm_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = oracledatabase.GetExadbVmClusterRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exadb_vm_cluster), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_exadb_vm_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == oracledatabase.GetExadbVmClusterRequest( + name="name_value", + ) + + +def test_get_exadb_vm_cluster_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -15683,8 +17664,7 @@ def test_create_autonomous_database_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.create_autonomous_database - in client._transport._wrapped_methods + client._transport.get_exadb_vm_cluster in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -15693,216 +17673,342 @@ def test_create_autonomous_database_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.create_autonomous_database + client._transport.get_exadb_vm_cluster ] = mock_rpc - request = {} - client.create_autonomous_database(request) + client.get_exadb_vm_cluster(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_autonomous_database(request) + client.get_exadb_vm_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_autonomous_database_rest_required_fields( - request_type=oracledatabase.CreateAutonomousDatabaseRequest, +@pytest.mark.asyncio +async def test_get_exadb_vm_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request_init["autonomous_database_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped - assert "autonomousDatabaseId" not in jsonified_request + # Ensure method has been cached + assert ( + client._client._transport.get_exadb_vm_cluster + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_exadb_vm_cluster + ] = mock_rpc - # verify required fields with default values are now present - assert "autonomousDatabaseId" in jsonified_request - assert ( - jsonified_request["autonomousDatabaseId"] - == request_init["autonomous_database_id"] - ) + request = {} + await client.get_exadb_vm_cluster(request) - jsonified_request["parent"] = "parent_value" - jsonified_request["autonomousDatabaseId"] = "autonomous_database_id_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_autonomous_database._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "autonomous_database_id", - "request_id", - ) - ) - jsonified_request.update(unset_fields) + await client.get_exadb_vm_cluster(request) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "autonomousDatabaseId" in jsonified_request - assert jsonified_request["autonomousDatabaseId"] == "autonomous_database_id_value" + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + +@pytest.mark.asyncio +async def test_get_exadb_vm_cluster_async( + transport: str = "grpc_asyncio", + request_type=oracledatabase.GetExadbVmClusterRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exadb_vm_cluster.ExadbVmCluster( + name="name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + backup_odb_subnet="backup_odb_subnet_value", + display_name="display_name_value", + entitlement_id="entitlement_id_value", + ) + ) + response = await client.get_exadb_vm_cluster(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = oracledatabase.GetExadbVmClusterRequest() + assert args[0] == request - response = client.create_autonomous_database(request) + # Establish that the response is the type that we expect. + assert isinstance(response, exadb_vm_cluster.ExadbVmCluster) + assert response.name == "name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.backup_odb_subnet == "backup_odb_subnet_value" + assert response.display_name == "display_name_value" + assert response.entitlement_id == "entitlement_id_value" - expected_params = [ - ( - "autonomousDatabaseId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + +@pytest.mark.asyncio +async def test_get_exadb_vm_cluster_async_from_dict(): + await test_get_exadb_vm_cluster_async(request_type=dict) -def test_create_autonomous_database_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +def test_get_exadb_vm_cluster_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - unset_fields = transport.create_autonomous_database._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "autonomousDatabaseId", - "requestId", - ) - ) - & set( - ( - "parent", - "autonomousDatabaseId", - "autonomousDatabase", - ) - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.GetExadbVmClusterRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = exadb_vm_cluster.ExadbVmCluster() + client.get_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_exadb_vm_cluster_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.GetExadbVmClusterRequest() + + request.name = "name_value" -def test_create_autonomous_database_rest_flattened(): + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exadb_vm_cluster.ExadbVmCluster() + ) + await client.get_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_exadb_vm_cluster_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = exadb_vm_cluster.ExadbVmCluster() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_exadb_vm_cluster( + name="name_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - autonomous_database=gco_autonomous_database.AutonomousDatabase( - name="name_value" - ), - autonomous_database_id="autonomous_database_id_value", + +def test_get_exadb_vm_cluster_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_exadb_vm_cluster( + oracledatabase.GetExadbVmClusterRequest(), + name="name_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_autonomous_database(**mock_args) +@pytest.mark.asyncio +async def test_get_exadb_vm_cluster_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = exadb_vm_cluster.ExadbVmCluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exadb_vm_cluster.ExadbVmCluster() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_exadb_vm_cluster( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/autonomousDatabases" - % client.transport._host, - args[1], + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_exadb_vm_cluster_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_exadb_vm_cluster( + oracledatabase.GetExadbVmClusterRequest(), + name="name_value", ) -def test_create_autonomous_database_rest_flattened_error(transport: str = "rest"): +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.CreateExadbVmClusterRequest, + dict, + ], +) +def test_create_exadb_vm_cluster(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_autonomous_database( - oracledatabase.CreateAutonomousDatabaseRequest(), + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = oracledatabase.CreateExadbVmClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_exadb_vm_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = oracledatabase.CreateExadbVmClusterRequest( + parent="parent_value", + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exadb_vm_cluster), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_exadb_vm_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == oracledatabase.CreateExadbVmClusterRequest( parent="parent_value", - autonomous_database=gco_autonomous_database.AutonomousDatabase( - name="name_value" - ), - autonomous_database_id="autonomous_database_id_value", + exadb_vm_cluster_id="exadb_vm_cluster_id_value", ) -def test_delete_autonomous_database_rest_use_cached_wrapped_rpc(): +def test_create_exadb_vm_cluster_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -15911,7 +18017,7 @@ def test_delete_autonomous_database_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.delete_autonomous_database + client._transport.create_exadb_vm_cluster in client._transport._wrapped_methods ) @@ -15921,183 +18027,364 @@ def test_delete_autonomous_database_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.delete_autonomous_database + client._transport.create_exadb_vm_cluster ] = mock_rpc - request = {} - client.delete_autonomous_database(request) + client.create_exadb_vm_cluster(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.delete_autonomous_database(request) + client.create_exadb_vm_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_autonomous_database_rest_required_fields( - request_type=oracledatabase.DeleteAutonomousDatabaseRequest, +@pytest.mark.asyncio +async def test_create_exadb_vm_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_exadb_vm_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_exadb_vm_cluster + ] = mock_rpc + + request = {} + await client.create_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_exadb_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_exadb_vm_cluster_async( + transport: str = "grpc_asyncio", + request_type=oracledatabase.CreateExadbVmClusterRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # verify fields with default values are dropped + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_exadb_vm_cluster(request) - # verify required fields with default values are now present + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = oracledatabase.CreateExadbVmClusterRequest() + assert args[0] == request - jsonified_request["name"] = "name_value" + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_autonomous_database._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("request_id",)) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +@pytest.mark.asyncio +async def test_create_exadb_vm_cluster_async_from_dict(): + await test_create_exadb_vm_cluster_async(request_type=dict) + +def test_create_exadb_vm_cluster_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.CreateExadbVmClusterRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + request.parent = "parent_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_exadb_vm_cluster(request) - response = client.delete_autonomous_database(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_delete_autonomous_database_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_create_exadb_vm_cluster_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.delete_autonomous_database._get_unset_required_fields({}) - assert set(unset_fields) == (set(("requestId",)) & set(("name",))) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.CreateExadbVmClusterRequest() + request.parent = "parent_value" -def test_delete_autonomous_database_rest_flattened(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_exadb_vm_cluster(request) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} +def test_create_exadb_vm_cluster_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) - client.delete_autonomous_database(**mock_args) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_exadb_vm_cluster( + parent="parent_value", + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].exadb_vm_cluster + mock_val = gco_exadb_vm_cluster.ExadbVmCluster(name="name_value") + assert arg == mock_val + arg = args[0].exadb_vm_cluster_id + mock_val = "exadb_vm_cluster_id_value" + assert arg == mock_val -def test_delete_autonomous_database_rest_flattened_error(transport: str = "rest"): +def test_create_exadb_vm_cluster_flattened_error(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_autonomous_database( - oracledatabase.DeleteAutonomousDatabaseRequest(), - name="name_value", + client.create_exadb_vm_cluster( + oracledatabase.CreateExadbVmClusterRequest(), + parent="parent_value", + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + exadb_vm_cluster_id="exadb_vm_cluster_id_value", ) -def test_restore_autonomous_database_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) +@pytest.mark.asyncio +async def test_create_exadb_vm_cluster_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_exadb_vm_cluster( + parent="parent_value", + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].exadb_vm_cluster + mock_val = gco_exadb_vm_cluster.ExadbVmCluster(name="name_value") + assert arg == mock_val + arg = args[0].exadb_vm_cluster_id + mock_val = "exadb_vm_cluster_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_exadb_vm_cluster_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_exadb_vm_cluster( + oracledatabase.CreateExadbVmClusterRequest(), + parent="parent_value", + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.DeleteExadbVmClusterRequest, + dict, + ], +) +def test_delete_exadb_vm_cluster(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = oracledatabase.DeleteExadbVmClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_exadb_vm_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = oracledatabase.DeleteExadbVmClusterRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_exadb_vm_cluster), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_exadb_vm_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == oracledatabase.DeleteExadbVmClusterRequest( + name="name_value", + ) + + +def test_delete_exadb_vm_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() # Ensure method has been cached assert ( - client._transport.restore_autonomous_database + client._transport.delete_exadb_vm_cluster in client._transport._wrapped_methods ) @@ -16107,183 +18394,331 @@ def test_restore_autonomous_database_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.restore_autonomous_database + client._transport.delete_exadb_vm_cluster ] = mock_rpc - request = {} - client.restore_autonomous_database(request) + client.delete_exadb_vm_cluster(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.restore_autonomous_database(request) + client.delete_exadb_vm_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_restore_autonomous_database_rest_required_fields( - request_type=oracledatabase.RestoreAutonomousDatabaseRequest, +@pytest.mark.asyncio +async def test_delete_exadb_vm_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_exadb_vm_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_exadb_vm_cluster + ] = mock_rpc + + request = {} + await client.delete_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.delete_exadb_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_exadb_vm_cluster_async( + transport: str = "grpc_asyncio", + request_type=oracledatabase.DeleteExadbVmClusterRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # verify fields with default values are dropped + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).restore_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_exadb_vm_cluster(request) - # verify required fields with default values are now present + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = oracledatabase.DeleteExadbVmClusterRequest() + assert args[0] == request - jsonified_request["name"] = "name_value" + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).restore_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +@pytest.mark.asyncio +async def test_delete_exadb_vm_cluster_async_from_dict(): + await test_delete_exadb_vm_cluster_async(request_type=dict) + +def test_delete_exadb_vm_cluster_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.DeleteExadbVmClusterRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + request.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_exadb_vm_cluster(request) - response = client.restore_autonomous_database(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_restore_autonomous_database_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_delete_exadb_vm_cluster_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.restore_autonomous_database._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "restoreTime", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.DeleteExadbVmClusterRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - ) + await client.delete_exadb_vm_cluster(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_restore_autonomous_database_rest_flattened(): + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_exadb_vm_cluster_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_exadb_vm_cluster( + name="name_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( + +def test_delete_exadb_vm_cluster_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_exadb_vm_cluster( + oracledatabase.DeleteExadbVmClusterRequest(), name="name_value", - restore_time=timestamp_pb2.Timestamp(seconds=751), ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.restore_autonomous_database(**mock_args) +@pytest.mark.asyncio +async def test_delete_exadb_vm_cluster_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_exadb_vm_cluster( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:restore" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_restore_autonomous_database_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_delete_exadb_vm_cluster_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.restore_autonomous_database( - oracledatabase.RestoreAutonomousDatabaseRequest(), + await client.delete_exadb_vm_cluster( + oracledatabase.DeleteExadbVmClusterRequest(), name="name_value", - restore_time=timestamp_pb2.Timestamp(seconds=751), ) -def test_generate_autonomous_database_wallet_rest_use_cached_wrapped_rpc(): +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.UpdateExadbVmClusterRequest, + dict, + ], +) +def test_update_exadb_vm_cluster(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = oracledatabase.UpdateExadbVmClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_exadb_vm_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = oracledatabase.UpdateExadbVmClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_exadb_vm_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == oracledatabase.UpdateExadbVmClusterRequest() + + +def test_update_exadb_vm_cluster_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -16292,7 +18727,7 @@ def test_generate_autonomous_database_wallet_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.generate_autonomous_database_wallet + client._transport.update_exadb_vm_cluster in client._transport._wrapped_methods ) @@ -16302,200 +18737,345 @@ def test_generate_autonomous_database_wallet_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.generate_autonomous_database_wallet + client._transport.update_exadb_vm_cluster ] = mock_rpc - request = {} - client.generate_autonomous_database_wallet(request) + client.update_exadb_vm_cluster(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.generate_autonomous_database_wallet(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_exadb_vm_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_generate_autonomous_database_wallet_rest_required_fields( - request_type=oracledatabase.GenerateAutonomousDatabaseWalletRequest, +@pytest.mark.asyncio +async def test_update_exadb_vm_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request_init["password"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.update_exadb_vm_cluster + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).generate_autonomous_database_wallet._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_exadb_vm_cluster + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.update_exadb_vm_cluster(request) - jsonified_request["name"] = "name_value" - jsonified_request["password"] = "password_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).generate_autonomous_database_wallet._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - assert "password" in jsonified_request - assert jsonified_request["password"] == "password_value" + await client.update_exadb_vm_cluster(request) - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Designate an appropriate value for the returned response. - return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 +@pytest.mark.asyncio +async def test_update_exadb_vm_cluster_async( + transport: str = "grpc_asyncio", + request_type=oracledatabase.UpdateExadbVmClusterRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - # Convert return value to protobuf type - return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_exadb_vm_cluster(request) - response = client.generate_autonomous_database_wallet(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = oracledatabase.UpdateExadbVmClusterRequest() + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -def test_generate_autonomous_database_wallet_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_update_exadb_vm_cluster_async_from_dict(): + await test_update_exadb_vm_cluster_async(request_type=dict) + + +def test_update_exadb_vm_cluster_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - unset_fields = ( - transport.generate_autonomous_database_wallet._get_unset_required_fields({}) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.UpdateExadbVmClusterRequest() + + request.exadb_vm_cluster.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "exadb_vm_cluster.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_exadb_vm_cluster_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "password", - ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.UpdateExadbVmClusterRequest() + + request.exadb_vm_cluster.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - ) + await client.update_exadb_vm_cluster(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_generate_autonomous_database_wallet_rest_flattened(): + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "exadb_vm_cluster.name=name_value", + ) in kw["metadata"] + + +def test_update_exadb_vm_cluster_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_exadb_vm_cluster( + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].exadb_vm_cluster + mock_val = gco_exadb_vm_cluster.ExadbVmCluster(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - type_=autonomous_database.GenerateType.ALL, - is_regional=True, - password="password_value", - ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse.pb( - return_value +def test_update_exadb_vm_cluster_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_exadb_vm_cluster( + oracledatabase.UpdateExadbVmClusterRequest(), + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.generate_autonomous_database_wallet(**mock_args) + +@pytest.mark.asyncio +async def test_update_exadb_vm_cluster_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_exadb_vm_cluster( + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:generateWallet" - % client.transport._host, - args[1], + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].exadb_vm_cluster + mock_val = gco_exadb_vm_cluster.ExadbVmCluster(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_exadb_vm_cluster_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_exadb_vm_cluster( + oracledatabase.UpdateExadbVmClusterRequest(), + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_generate_autonomous_database_wallet_rest_flattened_error( - transport: str = "rest", -): +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, + dict, + ], +) +def test_remove_virtual_machine_exadb_vm_cluster(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.generate_autonomous_database_wallet( - oracledatabase.GenerateAutonomousDatabaseWalletRequest(), + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.remove_virtual_machine_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_remove_virtual_machine_exadb_vm_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.remove_virtual_machine_exadb_vm_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == oracledatabase.RemoveVirtualMachineExadbVmClusterRequest( name="name_value", - type_=autonomous_database.GenerateType.ALL, - is_regional=True, - password="password_value", ) -def test_list_autonomous_db_versions_rest_use_cached_wrapped_rpc(): +def test_remove_virtual_machine_exadb_vm_cluster_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -16504,7 +19084,7 @@ def test_list_autonomous_db_versions_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.list_autonomous_db_versions + client._transport.remove_virtual_machine_exadb_vm_cluster in client._transport._wrapped_methods ) @@ -16514,253 +19094,356 @@ def test_list_autonomous_db_versions_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_autonomous_db_versions + client._transport.remove_virtual_machine_exadb_vm_cluster ] = mock_rpc - request = {} - client.list_autonomous_db_versions(request) + client.remove_virtual_machine_exadb_vm_cluster(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_autonomous_db_versions(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.remove_virtual_machine_exadb_vm_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_autonomous_db_versions_rest_required_fields( - request_type=oracledatabase.ListAutonomousDbVersionsRequest, +@pytest.mark.asyncio +async def test_remove_virtual_machine_exadb_vm_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.remove_virtual_machine_exadb_vm_cluster + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_autonomous_db_versions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.remove_virtual_machine_exadb_vm_cluster + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.remove_virtual_machine_exadb_vm_cluster(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_autonomous_db_versions._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.remove_virtual_machine_exadb_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_remove_virtual_machine_exadb_vm_cluster_async( + transport: str = "grpc_asyncio", + request_type=oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.remove_virtual_machine_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest() + assert args[0] == request + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_remove_virtual_machine_exadb_vm_cluster_async_from_dict(): + await test_remove_virtual_machine_exadb_vm_cluster_async(request_type=dict) + + +def test_remove_virtual_machine_exadb_vm_cluster_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDbVersionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest() - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDbVersionsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) + request.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.remove_virtual_machine_exadb_vm_cluster(request) - response = client.list_autonomous_db_versions(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_list_autonomous_db_versions_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_remove_virtual_machine_exadb_vm_cluster_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.list_autonomous_db_versions._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - & set(("parent",)) - ) + await client.remove_virtual_machine_exadb_vm_cluster(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_list_autonomous_db_versions_rest_flattened(): + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_remove_virtual_machine_exadb_vm_cluster_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDbVersionsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.remove_virtual_machine_exadb_vm_cluster( + name="name_value", + hostnames=["hostnames_value"], ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDbVersionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_autonomous_db_versions(**mock_args) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/autonomousDbVersions" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].hostnames + mock_val = ["hostnames_value"] + assert arg == mock_val -def test_list_autonomous_db_versions_rest_flattened_error(transport: str = "rest"): +def test_remove_virtual_machine_exadb_vm_cluster_flattened_error(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_autonomous_db_versions( - oracledatabase.ListAutonomousDbVersionsRequest(), - parent="parent_value", + client.remove_virtual_machine_exadb_vm_cluster( + oracledatabase.RemoveVirtualMachineExadbVmClusterRequest(), + name="name_value", + hostnames=["hostnames_value"], ) -def test_list_autonomous_db_versions_rest_pager(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_remove_virtual_machine_exadb_vm_cluster_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListAutonomousDbVersionsResponse( - autonomous_db_versions=[ - autonomous_db_version.AutonomousDbVersion(), - autonomous_db_version.AutonomousDbVersion(), - autonomous_db_version.AutonomousDbVersion(), - ], - next_page_token="abc", - ), - oracledatabase.ListAutonomousDbVersionsResponse( - autonomous_db_versions=[], - next_page_token="def", - ), - oracledatabase.ListAutonomousDbVersionsResponse( - autonomous_db_versions=[ - autonomous_db_version.AutonomousDbVersion(), - ], - next_page_token="ghi", - ), - oracledatabase.ListAutonomousDbVersionsResponse( - autonomous_db_versions=[ - autonomous_db_version.AutonomousDbVersion(), - autonomous_db_version.AutonomousDbVersion(), - ], - ), + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.remove_virtual_machine_exadb_vm_cluster( + name="name_value", + hostnames=["hostnames_value"], ) - # Two responses for two calls - response = response + response - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListAutonomousDbVersionsResponse.to_json(x) for x in response + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].hostnames + mock_val = ["hostnames_value"] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_remove_virtual_machine_exadb_vm_cluster_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.remove_virtual_machine_exadb_vm_cluster( + oracledatabase.RemoveVirtualMachineExadbVmClusterRequest(), + name="name_value", + hostnames=["hostnames_value"], ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - sample_request = {"parent": "projects/sample1/locations/sample2"} - pager = client.list_autonomous_db_versions(request=sample_request) +@pytest.mark.parametrize( + "request_type", + [ + exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, + dict, + ], +) +def test_list_exascale_db_storage_vaults(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) - results = list(pager) - assert len(results) == 6 - assert all( - isinstance(i, autonomous_db_version.AutonomousDbVersion) for i in results + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + next_page_token="next_page_token_value", + ) ) + response = client.list_exascale_db_storage_vaults(request) - pages = list(client.list_autonomous_db_versions(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExascaleDbStorageVaultsPager) + assert response.next_page_token == "next_page_token_value" -def test_list_autonomous_database_character_sets_rest_use_cached_wrapped_rpc(): +def test_list_exascale_db_storage_vaults_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_exascale_db_storage_vaults(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) + + +def test_list_exascale_db_storage_vaults_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -16769,7 +19452,7 @@ def test_list_autonomous_database_character_sets_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.list_autonomous_database_character_sets + client._transport.list_exascale_db_storage_vaults in client._transport._wrapped_methods ) @@ -16779,548 +19462,553 @@ def test_list_autonomous_database_character_sets_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_autonomous_database_character_sets + client._transport.list_exascale_db_storage_vaults ] = mock_rpc - request = {} - client.list_autonomous_database_character_sets(request) + client.list_exascale_db_storage_vaults(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_autonomous_database_character_sets(request) + client.list_exascale_db_storage_vaults(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_autonomous_database_character_sets_rest_required_fields( - request_type=oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, +@pytest.mark.asyncio +async def test_list_exascale_db_storage_vaults_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.list_exascale_db_storage_vaults + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_autonomous_database_character_sets._get_unset_required_fields( - jsonified_request - ) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_exascale_db_storage_vaults + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.list_exascale_db_storage_vaults(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_autonomous_database_character_sets._get_unset_required_fields( - jsonified_request + await client.list_exascale_db_storage_vaults(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_exascale_db_storage_vaults_async( + transport: str = "grpc_asyncio", + request_type=exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "page_size", - "page_token", + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + next_page_token="next_page_token_value", + ) ) - ) - jsonified_request.update(unset_fields) + response = await client.list_exascale_db_storage_vaults(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExascaleDbStorageVaultsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_exascale_db_storage_vaults_async_from_dict(): + await test_list_exascale_db_storage_vaults_async(request_type=dict) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" +def test_list_exascale_db_storage_vaults_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest() - # Convert return value to protobuf type - return_value = ( - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.pb( - return_value - ) - ) - json_return_value = json_format.MessageToJson(return_value) + request.parent = "parent_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + call.return_value = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() + ) + client.list_exascale_db_storage_vaults(request) - response = client.list_autonomous_database_character_sets(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_list_autonomous_database_character_sets_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_list_exascale_db_storage_vaults_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = ( - transport.list_autonomous_database_character_sets._get_unset_required_fields({}) - ) - assert set(unset_fields) == ( - set( - ( - "filter", - "pageSize", - "pageToken", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() ) - & set(("parent",)) - ) + await client.list_exascale_db_storage_vaults(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_list_autonomous_database_character_sets_rest_flattened(): + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_exascale_db_storage_vaults_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_exascale_db_storage_vaults( + parent="parent_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( + +def test_list_exascale_db_storage_vaults_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_exascale_db_storage_vaults( + exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest(), parent="parent_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.pb( - return_value + +@pytest.mark.asyncio +async def test_list_exascale_db_storage_vaults_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() ) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_autonomous_database_character_sets(**mock_args) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_exascale_db_storage_vaults( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/autonomousDatabaseCharacterSets" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -def test_list_autonomous_database_character_sets_rest_flattened_error( - transport: str = "rest", -): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_list_exascale_db_storage_vaults_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_autonomous_database_character_sets( - oracledatabase.ListAutonomousDatabaseCharacterSetsRequest(), + await client.list_exascale_db_storage_vaults( + exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest(), parent="parent_value", ) -def test_list_autonomous_database_character_sets_rest_pager(transport: str = "rest"): +def test_list_exascale_db_storage_vaults_pager(transport_name: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport=transport_name, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( - autonomous_database_character_sets=[ - autonomous_database_character_set.AutonomousDatabaseCharacterSet(), - autonomous_database_character_set.AutonomousDatabaseCharacterSet(), - autonomous_database_character_set.AutonomousDatabaseCharacterSet(), + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), ], next_page_token="abc", ), - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( - autonomous_database_character_sets=[], + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[], next_page_token="def", ), - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( - autonomous_database_character_sets=[ - autonomous_database_character_set.AutonomousDatabaseCharacterSet(), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), ], next_page_token="ghi", ), - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( - autonomous_database_character_sets=[ - autonomous_database_character_set.AutonomousDatabaseCharacterSet(), - autonomous_database_character_set.AutonomousDatabaseCharacterSet(), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), ], ), + RuntimeError, ) - # Two responses for two calls - response = response + response - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.to_json(x) - for x in response + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_exascale_db_storage_vaults( + request={}, retry=retry, timeout=timeout ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/locations/sample2"} - pager = client.list_autonomous_database_character_sets(request=sample_request) + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 assert all( - isinstance( - i, autonomous_database_character_set.AutonomousDatabaseCharacterSet - ) + isinstance(i, exascale_db_storage_vault.ExascaleDbStorageVault) for i in results ) - pages = list( - client.list_autonomous_database_character_sets(request=sample_request).pages - ) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token +def test_list_exascale_db_storage_vaults_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) -def test_list_autonomous_database_backups_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + next_page_token="abc", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[], + next_page_token="def", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + next_page_token="ghi", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + ), + RuntimeError, ) + pages = list(client.list_exascale_db_storage_vaults(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - # Ensure method has been cached - assert ( - client._transport.list_autonomous_database_backups - in client._transport._wrapped_methods - ) +@pytest.mark.asyncio +async def test_list_exascale_db_storage_vaults_async_pager(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_autonomous_database_backups - ] = mock_rpc - - request = {} - client.list_autonomous_database_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_autonomous_database_backups(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_autonomous_database_backups_rest_required_fields( - request_type=oracledatabase.ListAutonomousDatabaseBackupsRequest, -): - transport_class = transports.OracleDatabaseRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_autonomous_database_backups._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_autonomous_database_backups._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "page_size", - "page_token", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + next_page_token="abc", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[], + next_page_token="def", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + next_page_token="ghi", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + ), + RuntimeError, ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_autonomous_database_backups(request) + async_pager = await client.list_exascale_db_storage_vaults( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + assert len(responses) == 6 + assert all( + isinstance(i, exascale_db_storage_vault.ExascaleDbStorageVault) + for i in responses + ) -def test_list_autonomous_database_backups_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_list_exascale_db_storage_vaults_async_pages(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = ( - transport.list_autonomous_database_backups._get_unset_required_fields({}) - ) - assert set(unset_fields) == ( - set( - ( - "filter", - "pageSize", - "pageToken", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + next_page_token="abc", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[], + next_page_token="def", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + next_page_token="ghi", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + ), + RuntimeError, ) - & set(("parent",)) - ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_exascale_db_storage_vaults(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token -def test_list_autonomous_database_backups_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, + dict, + ], +) +def test_get_exascale_db_storage_vault(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse.pb( - return_value + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value", + display_name="display_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + entitlement_id="entitlement_id_value", ) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_exascale_db_storage_vault(request) - client.list_autonomous_database_backups(**mock_args) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest() + assert args[0] == request - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/autonomousDatabaseBackups" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, exascale_db_storage_vault.ExascaleDbStorageVault) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.entitlement_id == "entitlement_id_value" -def test_list_autonomous_database_backups_rest_flattened_error(transport: str = "rest"): +def test_get_exascale_db_storage_vault_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_autonomous_database_backups( - oracledatabase.ListAutonomousDatabaseBackupsRequest(), - parent="parent_value", - ) - - -def test_list_autonomous_database_backups_rest_pager(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest( + name="name_value", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - oracledatabase.ListAutonomousDatabaseBackupsResponse( - autonomous_database_backups=[ - autonomous_db_backup.AutonomousDatabaseBackup(), - autonomous_db_backup.AutonomousDatabaseBackup(), - autonomous_db_backup.AutonomousDatabaseBackup(), - ], - next_page_token="abc", - ), - oracledatabase.ListAutonomousDatabaseBackupsResponse( - autonomous_database_backups=[], - next_page_token="def", - ), - oracledatabase.ListAutonomousDatabaseBackupsResponse( - autonomous_database_backups=[ - autonomous_db_backup.AutonomousDatabaseBackup(), - ], - next_page_token="ghi", - ), - oracledatabase.ListAutonomousDatabaseBackupsResponse( - autonomous_database_backups=[ - autonomous_db_backup.AutonomousDatabaseBackup(), - autonomous_db_backup.AutonomousDatabaseBackup(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - oracledatabase.ListAutonomousDatabaseBackupsResponse.to_json(x) - for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/locations/sample2"} - - pager = client.list_autonomous_database_backups(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all( - isinstance(i, autonomous_db_backup.AutonomousDatabaseBackup) - for i in results + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exascale_db_storage_vault), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - - pages = list( - client.list_autonomous_database_backups(request=sample_request).pages + client.get_exascale_db_storage_vault(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == exascale_db_storage_vault.GetExascaleDbStorageVaultRequest( + name="name_value", ) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token -def test_stop_autonomous_database_rest_use_cached_wrapped_rpc(): +def test_get_exascale_db_storage_vault_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -17329,7 +20017,7 @@ def test_stop_autonomous_database_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.stop_autonomous_database + client._transport.get_exascale_db_storage_vault in client._transport._wrapped_methods ) @@ -17339,358 +20027,338 @@ def test_stop_autonomous_database_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.stop_autonomous_database + client._transport.get_exascale_db_storage_vault ] = mock_rpc - request = {} - client.stop_autonomous_database(request) + client.get_exascale_db_storage_vault(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.stop_autonomous_database(request) + client.get_exascale_db_storage_vault(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_stop_autonomous_database_rest_required_fields( - request_type=oracledatabase.StopAutonomousDatabaseRequest, +@pytest.mark.asyncio +async def test_get_exascale_db_storage_vault_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - # verify fields with default values are dropped + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).stop_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Ensure method has been cached + assert ( + client._client._transport.get_exascale_db_storage_vault + in client._client._transport._wrapped_methods + ) - # verify required fields with default values are now present + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_exascale_db_storage_vault + ] = mock_rpc - jsonified_request["name"] = "name_value" + request = {} + await client.get_exascale_db_storage_vault(request) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).stop_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + await client.get_exascale_db_storage_vault(request) - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) +@pytest.mark.asyncio +async def test_get_exascale_db_storage_vault_async( + transport: str = "grpc_asyncio", + request_type=exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response = client.stop_autonomous_database(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value", + display_name="display_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + entitlement_id="entitlement_id_value", + ) + ) + response = await client.get_exascale_db_storage_vault(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest() + assert args[0] == request + # Establish that the response is the type that we expect. + assert isinstance(response, exascale_db_storage_vault.ExascaleDbStorageVault) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.entitlement_id == "entitlement_id_value" -def test_stop_autonomous_database_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - unset_fields = transport.stop_autonomous_database._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) +@pytest.mark.asyncio +async def test_get_exascale_db_storage_vault_async_from_dict(): + await test_get_exascale_db_storage_vault_async(request_type=dict) -def test_stop_autonomous_database_rest_flattened(): +def test_get_exascale_db_storage_vault_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest() - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) + request.name = "name_value" - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exascale_db_storage_vault), "__call__" + ) as call: + call.return_value = exascale_db_storage_vault.ExascaleDbStorageVault() + client.get_exascale_db_storage_vault(request) - client.stop_autonomous_database(**mock_args) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:stop" - % client.transport._host, - args[1], - ) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_stop_autonomous_database_rest_flattened_error(transport: str = "rest"): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_get_exascale_db_storage_vault_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.stop_autonomous_database( - oracledatabase.StopAutonomousDatabaseRequest(), - name="name_value", - ) - - -def test_start_autonomous_database_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest() - # Ensure method has been cached - assert ( - client._transport.start_autonomous_database - in client._transport._wrapped_methods - ) + request.name = "name_value" - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exascale_db_storage_vault), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exascale_db_storage_vault.ExascaleDbStorageVault() ) - client._transport._wrapped_methods[ - client._transport.start_autonomous_database - ] = mock_rpc - - request = {} - client.start_autonomous_database(request) + await client.get_exascale_db_storage_vault(request) # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.start_autonomous_database(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_start_autonomous_database_rest_required_fields( - request_type=oracledatabase.StartAutonomousDatabaseRequest, -): - transport_class = transports.OracleDatabaseRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +def test_get_exascale_db_storage_vault_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).start_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = exascale_db_storage_vault.ExascaleDbStorageVault() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_exascale_db_storage_vault( + name="name_value", + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).start_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +def test_get_exascale_db_storage_vault_flattened_error(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_exascale_db_storage_vault( + exascale_db_storage_vault.GetExascaleDbStorageVaultRequest(), + name="name_value", + ) - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} +@pytest.mark.asyncio +async def test_get_exascale_db_storage_vault_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) - response = client.start_autonomous_database(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = exascale_db_storage_vault.ExascaleDbStorageVault() - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exascale_db_storage_vault.ExascaleDbStorageVault() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_exascale_db_storage_vault( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_start_autonomous_database_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_get_exascale_db_storage_vault_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.start_autonomous_database._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_exascale_db_storage_vault( + exascale_db_storage_vault.GetExascaleDbStorageVaultRequest(), + name="name_value", + ) -def test_start_autonomous_database_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, + dict, + ], +) +def test_create_exascale_db_storage_vault(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_exascale_db_storage_vault(request) - client.start_autonomous_database(**mock_args) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest() + assert args[0] == request - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:start" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -def test_start_autonomous_database_rest_flattened_error(transport: str = "rest"): +def test_create_exascale_db_storage_vault_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.start_autonomous_database( - oracledatabase.StartAutonomousDatabaseRequest(), - name="name_value", + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest( + parent="parent_value", + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exascale_db_storage_vault), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_exascale_db_storage_vault(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[ + 0 + ] == gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest( + parent="parent_value", + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", ) -def test_restart_autonomous_database_rest_use_cached_wrapped_rpc(): +def test_create_exascale_db_storage_vault_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -17699,7 +20367,7 @@ def test_restart_autonomous_database_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.restart_autonomous_database + client._transport.create_exascale_db_storage_vault in client._transport._wrapped_methods ) @@ -17709,1674 +20377,27688 @@ def test_restart_autonomous_database_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.restart_autonomous_database + client._transport.create_exascale_db_storage_vault ] = mock_rpc - request = {} - client.restart_autonomous_database(request) + client.create_exascale_db_storage_vault(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.restart_autonomous_database(request) + client.create_exascale_db_storage_vault(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_restart_autonomous_database_rest_required_fields( - request_type=oracledatabase.RestartAutonomousDatabaseRequest, +@pytest.mark.asyncio +async def test_create_exascale_db_storage_vault_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.OracleDatabaseRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_exascale_db_storage_vault + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_exascale_db_storage_vault + ] = mock_rpc + + request = {} + await client.create_exascale_db_storage_vault(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_exascale_db_storage_vault(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_exascale_db_storage_vault_async( + transport: str = "grpc_asyncio", + request_type=gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # verify fields with default values are dropped + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).restart_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_exascale_db_storage_vault(request) - # verify required fields with default values are now present + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest() + assert args[0] == request - jsonified_request["name"] = "name_value" + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).restart_autonomous_database._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +@pytest.mark.asyncio +async def test_create_exascale_db_storage_vault_async_from_dict(): + await test_create_exascale_db_storage_vault_async(request_type=dict) + +def test_create_exascale_db_storage_vault_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + request.parent = "parent_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exascale_db_storage_vault), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_exascale_db_storage_vault(request) - response = client.restart_autonomous_database(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_restart_autonomous_database_rest_unset_required_fields(): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_create_exascale_db_storage_vault_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.restart_autonomous_database._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest() + request.parent = "parent_value" -def test_restart_autonomous_database_rest_flattened(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exascale_db_storage_vault), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_exascale_db_storage_vault(request) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} +def test_create_exascale_db_storage_vault_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) - client.restart_autonomous_database(**mock_args) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_exascale_db_storage_vault( + parent="parent_value", + exascale_db_storage_vault=gco_exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value" + ), + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:restart" - % client.transport._host, - args[1], + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].exascale_db_storage_vault + mock_val = gco_exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value" ) + assert arg == mock_val + arg = args[0].exascale_db_storage_vault_id + mock_val = "exascale_db_storage_vault_id_value" + assert arg == mock_val -def test_restart_autonomous_database_rest_flattened_error(transport: str = "rest"): +def test_create_exascale_db_storage_vault_flattened_error(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.restart_autonomous_database( - oracledatabase.RestartAutonomousDatabaseRequest(), - name="name_value", + client.create_exascale_db_storage_vault( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest(), + parent="parent_value", + exascale_db_storage_vault=gco_exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value" + ), + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", ) -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.OracleDatabaseGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.OracleDatabaseGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), +@pytest.mark.asyncio +async def test_create_exascale_db_storage_vault_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - with pytest.raises(ValueError): - client = OracleDatabaseClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - # It is an error to provide an api_key and a transport instance. - transport = transports.OracleDatabaseGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = OracleDatabaseClient( - client_options=options, - transport=transport, - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = OracleDatabaseClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") ) - - # It is an error to provide scopes and a transport instance. - transport = transports.OracleDatabaseGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = OracleDatabaseClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_exascale_db_storage_vault( + parent="parent_value", + exascale_db_storage_vault=gco_exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value" + ), + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", ) - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.OracleDatabaseGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = OracleDatabaseClient(transport=transport) - assert client.transport is transport + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].exascale_db_storage_vault + mock_val = gco_exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value" + ) + assert arg == mock_val + arg = args[0].exascale_db_storage_vault_id + mock_val = "exascale_db_storage_vault_id_value" + assert arg == mock_val -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.OracleDatabaseGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), +@pytest.mark.asyncio +async def test_create_exascale_db_storage_vault_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - channel = transport.grpc_channel - assert channel - transport = transports.OracleDatabaseGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_exascale_db_storage_vault( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest(), + parent="parent_value", + exascale_db_storage_vault=gco_exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value" + ), + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", + ) @pytest.mark.parametrize( - "transport_class", + "request_type", [ - transports.OracleDatabaseGrpcTransport, - transports.OracleDatabaseGrpcAsyncIOTransport, - transports.OracleDatabaseRestTransport, + exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, + dict, ], ) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - - -def test_transport_kind_grpc(): - transport = OracleDatabaseClient.get_transport_class("grpc")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "grpc" - - -def test_initialize_client_w_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc" - ) - assert client is not None - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_cloud_exadata_infrastructures_empty_call_grpc(): +def test_delete_exascale_db_storage_vault(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport, ) - # Mock the actual call, and fake the request. + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_cloud_exadata_infrastructures), "__call__" + type(client.transport.delete_exascale_db_storage_vault), "__call__" ) as call: - call.return_value = oracledatabase.ListCloudExadataInfrastructuresResponse() - client.list_cloud_exadata_infrastructures(request=None) + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_exascale_db_storage_vault(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListCloudExadataInfrastructuresRequest() + request = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_cloud_exadata_infrastructure_empty_call_grpc(): +def test_delete_exascale_db_storage_vault_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) - # Mock the actual call, and fake the request. + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.get_cloud_exadata_infrastructure), "__call__" + type(client.transport.delete_exascale_db_storage_vault), "__call__" ) as call: - call.return_value = exadata_infra.CloudExadataInfrastructure() - client.get_cloud_exadata_infrastructure(request=None) - - # Establish that the underlying stub method was called. + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_exascale_db_storage_vault(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GetCloudExadataInfrastructureRequest() + assert args[0] == exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest( + name="name_value", + ) - assert args[0] == request_msg +def test_delete_exascale_db_storage_vault_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_cloud_exadata_infrastructure_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_cloud_exadata_infrastructure), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_cloud_exadata_infrastructure(request=None) + # Ensure method has been cached + assert ( + client._transport.delete_exascale_db_storage_vault + in client._transport._wrapped_methods + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.CreateCloudExadataInfrastructureRequest() + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_exascale_db_storage_vault + ] = mock_rpc + request = {} + client.delete_exascale_db_storage_vault(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_cloud_exadata_infrastructure_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + client.delete_exascale_db_storage_vault(request) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_cloud_exadata_infrastructure), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.delete_cloud_exadata_infrastructure(request=None) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.DeleteCloudExadataInfrastructureRequest() - assert args[0] == request_msg +@pytest.mark.asyncio +async def test_delete_exascale_db_storage_vault_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_cloud_vm_clusters_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Ensure method has been cached + assert ( + client._client._transport.delete_exascale_db_storage_vault + in client._client._transport._wrapped_methods + ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_cloud_vm_clusters), "__call__" - ) as call: - call.return_value = oracledatabase.ListCloudVmClustersResponse() - client.list_cloud_vm_clusters(request=None) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_exascale_db_storage_vault + ] = mock_rpc - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListCloudVmClustersRequest() + request = {} + await client.delete_exascale_db_storage_vault(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_cloud_vm_cluster_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + await client.delete_exascale_db_storage_vault(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_exascale_db_storage_vault_async( + transport: str = "grpc_asyncio", + request_type=exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # Mock the actual call, and fake the request. + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.get_cloud_vm_cluster), "__call__" + type(client.transport.delete_exascale_db_storage_vault), "__call__" ) as call: - call.return_value = vm_cluster.CloudVmCluster() - client.get_cloud_vm_cluster(request=None) + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_exascale_db_storage_vault(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GetCloudVmClusterRequest() + request = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_cloud_vm_cluster_empty_call_grpc(): +@pytest.mark.asyncio +async def test_delete_exascale_db_storage_vault_async_from_dict(): + await test_delete_exascale_db_storage_vault_async(request_type=dict) + + +def test_delete_exascale_db_storage_vault_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", ) - # Mock the actual call, and fake the request. + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.create_cloud_vm_cluster), "__call__" + type(client.transport.delete_exascale_db_storage_vault), "__call__" ) as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.create_cloud_vm_cluster(request=None) + client.delete_exascale_db_storage_vault(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.CreateCloudVmClusterRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_cloud_vm_cluster_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", +@pytest.mark.asyncio +async def test_delete_exascale_db_storage_vault_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the actual call, and fake the request. + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.delete_cloud_vm_cluster), "__call__" + type(client.transport.delete_exascale_db_storage_vault), "__call__" ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.delete_cloud_vm_cluster(request=None) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_exascale_db_storage_vault(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.DeleteCloudVmClusterRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_entitlements_empty_call_grpc(): +def test_delete_exascale_db_storage_vault_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_entitlements), "__call__" + type(client.transport.delete_exascale_db_storage_vault), "__call__" ) as call: - call.return_value = oracledatabase.ListEntitlementsResponse() - client.list_entitlements(request=None) + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_exascale_db_storage_vault( + name="name_value", + ) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListEntitlementsRequest() - - assert args[0] == request_msg + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_db_servers_empty_call_grpc(): +def test_delete_exascale_db_storage_vault_flattened_error(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_db_servers), "__call__") as call: - call.return_value = oracledatabase.ListDbServersResponse() - client.list_db_servers(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListDbServersRequest() - - assert args[0] == request_msg + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_exascale_db_storage_vault( + exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest(), + name="name_value", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_db_nodes_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", +@pytest.mark.asyncio +async def test_delete_exascale_db_storage_vault_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_db_nodes), "__call__") as call: - call.return_value = oracledatabase.ListDbNodesResponse() - client.list_db_nodes(request=None) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") - # Establish that the underlying stub method was called. - call.assert_called() + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_exascale_db_storage_vault( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListDbNodesRequest() + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - assert args[0] == request_msg +@pytest.mark.asyncio +async def test_delete_exascale_db_storage_vault_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_gi_versions_empty_call_grpc(): + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_exascale_db_storage_vault( + exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, + dict, + ], +) +def test_list_db_system_initial_storage_sizes(request_type, transport: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_gi_versions), "__call__") as call: - call.return_value = oracledatabase.ListGiVersionsResponse() - client.list_gi_versions(request=None) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Establish that the underlying stub method was called. - call.assert_called() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_db_system_initial_storage_sizes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + next_page_token="next_page_token_value", + ) + ) + response = client.list_db_system_initial_storage_sizes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListGiVersionsRequest() + request = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest() + ) + assert args[0] == request - assert args[0] == request_msg + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDbSystemInitialStorageSizesPager) + assert response.next_page_token == "next_page_token_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_db_system_shapes_empty_call_grpc(): +def test_list_db_system_initial_storage_sizes_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_db_system_shapes), "__call__" - ) as call: - call.return_value = oracledatabase.ListDbSystemShapesResponse() - client.list_db_system_shapes(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListDbSystemShapesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_autonomous_databases_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest( + parent="parent_value", + page_token="page_token_value", ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_autonomous_databases), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), "__call__" ) as call: - call.return_value = oracledatabase.ListAutonomousDatabasesResponse() - client.list_autonomous_databases(request=None) - - # Establish that the underlying stub method was called. + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_db_system_initial_storage_sizes(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDatabasesRequest() + assert args[ + 0 + ] == db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest( + parent="parent_value", + page_token="page_token_value", + ) - assert args[0] == request_msg +def test_list_db_system_initial_storage_sizes_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_autonomous_database_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_autonomous_database), "__call__" - ) as call: - call.return_value = autonomous_database.AutonomousDatabase() - client.get_autonomous_database(request=None) + # Ensure method has been cached + assert ( + client._transport.list_db_system_initial_storage_sizes + in client._transport._wrapped_methods + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GetAutonomousDatabaseRequest() + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_db_system_initial_storage_sizes + ] = mock_rpc + request = {} + client.list_db_system_initial_storage_sizes(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.list_db_system_initial_storage_sizes(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_autonomous_database_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_autonomous_database), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_autonomous_database(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.CreateAutonomousDatabaseRequest() +@pytest.mark.asyncio +async def test_list_db_system_initial_storage_sizes_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - assert args[0] == request_msg + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + # Ensure method has been cached + assert ( + client._client._transport.list_db_system_initial_storage_sizes + in client._client._transport._wrapped_methods + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_autonomous_database_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_db_system_initial_storage_sizes + ] = mock_rpc - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_autonomous_database), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.delete_autonomous_database(request=None) + request = {} + await client.list_db_system_initial_storage_sizes(request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.DeleteAutonomousDatabaseRequest() + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - assert args[0] == request_msg + await client.list_db_system_initial_storage_sizes(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_restore_autonomous_database_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", +@pytest.mark.asyncio +async def test_list_db_system_initial_storage_sizes_async( + transport: str = "grpc_asyncio", + request_type=db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # Mock the actual call, and fake the request. + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.restore_autonomous_database), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), "__call__" ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.restore_autonomous_database(request=None) + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_db_system_initial_storage_sizes(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.RestoreAutonomousDatabaseRequest() + request = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest() + ) + assert args[0] == request - assert args[0] == request_msg + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDbSystemInitialStorageSizesAsyncPager) + assert response.next_page_token == "next_page_token_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_generate_autonomous_database_wallet_empty_call_grpc(): +@pytest.mark.asyncio +async def test_list_db_system_initial_storage_sizes_async_from_dict(): + await test_list_db_system_initial_storage_sizes_async(request_type=dict) + + +def test_list_db_system_initial_storage_sizes_field_headers(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", ) - # Mock the actual call, and fake the request. + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_autonomous_database_wallet), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), "__call__" ) as call: - call.return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse() - client.generate_autonomous_database_wallet(request=None) + call.return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) + client.list_db_system_initial_storage_sizes(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GenerateAutonomousDatabaseWalletRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_autonomous_db_versions_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", +@pytest.mark.asyncio +async def test_list_db_system_initial_storage_sizes_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the actual call, and fake the request. + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_autonomous_db_versions), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), "__call__" ) as call: - call.return_value = oracledatabase.ListAutonomousDbVersionsResponse() - client.list_autonomous_db_versions(request=None) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) + await client.list_db_system_initial_storage_sizes(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDbVersionsRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_autonomous_database_character_sets_empty_call_grpc(): +def test_list_db_system_initial_storage_sizes_flattened(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_autonomous_database_character_sets), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), "__call__" ) as call: - call.return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() - client.list_autonomous_database_character_sets(request=None) + # Designate an appropriate return value for the call. + call.return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_db_system_initial_storage_sizes( + parent="parent_value", + ) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() - - assert args[0] == request_msg + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_autonomous_database_backups_empty_call_grpc(): +def test_list_db_system_initial_storage_sizes_flattened_error(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_autonomous_database_backups), "__call__" - ) as call: - call.return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse() - client.list_autonomous_database_backups(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDatabaseBackupsRequest() - - assert args[0] == request_msg + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_db_system_initial_storage_sizes( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest(), + parent="parent_value", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_stop_autonomous_database_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", +@pytest.mark.asyncio +async def test_list_db_system_initial_storage_sizes_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.stop_autonomous_database), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), "__call__" ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.stop_autonomous_database(request=None) + # Designate an appropriate return value for the call. + call.return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.StopAutonomousDatabaseRequest() + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_db_system_initial_storage_sizes( + parent="parent_value", + ) - assert args[0] == request_msg + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_start_autonomous_database_empty_call_grpc(): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", +@pytest.mark.asyncio +async def test_list_db_system_initial_storage_sizes_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.start_autonomous_database), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.start_autonomous_database(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.StartAutonomousDatabaseRequest() - - assert args[0] == request_msg + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_db_system_initial_storage_sizes( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest(), + parent="parent_value", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_restart_autonomous_database_empty_call_grpc(): +def test_list_db_system_initial_storage_sizes_pager(transport_name: str = "grpc"): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport_name, ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.restart_autonomous_database), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), "__call__" ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.restart_autonomous_database(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.RestartAutonomousDatabaseRequest() - - assert args[0] == request_msg - + # Set the response to a series of pages. + call.side_effect = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="abc", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[], + next_page_token="def", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="ghi", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + ), + RuntimeError, + ) -def test_transport_kind_grpc_asyncio(): - transport = OracleDatabaseAsyncClient.get_transport_class("grpc_asyncio")( - credentials=async_anonymous_credentials() - ) - assert transport.kind == "grpc_asyncio" + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_db_system_initial_storage_sizes( + request={}, retry=retry, timeout=timeout + ) + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout -def test_initialize_client_w_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), transport="grpc_asyncio" - ) - assert client is not None + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, db_system_initial_storage_size.DbSystemInitialStorageSize) + for i in results + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_cloud_exadata_infrastructures_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_list_db_system_initial_storage_sizes_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_cloud_exadata_infrastructures), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), "__call__" ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListCloudExadataInfrastructuresResponse( - next_page_token="next_page_token_value", - ) + # Set the response to a series of pages. + call.side_effect = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="abc", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[], + next_page_token="def", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="ghi", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + ), + RuntimeError, ) - await client.list_cloud_exadata_infrastructures(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListCloudExadataInfrastructuresRequest() - - assert args[0] == request_msg + pages = list(client.list_db_system_initial_storage_sizes(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_get_cloud_exadata_infrastructure_empty_call_grpc_asyncio(): +async def test_list_db_system_initial_storage_sizes_async_pager(): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.get_cloud_exadata_infrastructure), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), + "__call__", + new_callable=mock.AsyncMock, ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - exadata_infra.CloudExadataInfrastructure( - name="name_value", - display_name="display_name_value", - gcp_oracle_zone="gcp_oracle_zone_value", - entitlement_id="entitlement_id_value", - ) + # Set the response to a series of pages. + call.side_effect = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="abc", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[], + next_page_token="def", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="ghi", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + ), + RuntimeError, ) - await client.get_cloud_exadata_infrastructure(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GetCloudExadataInfrastructureRequest() + async_pager = await client.list_db_system_initial_storage_sizes( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) - assert args[0] == request_msg + assert len(responses) == 6 + assert all( + isinstance(i, db_system_initial_storage_size.DbSystemInitialStorageSize) + for i in responses + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_create_cloud_exadata_infrastructure_empty_call_grpc_asyncio(): +async def test_list_db_system_initial_storage_sizes_async_pages(): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.create_cloud_exadata_infrastructure), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), + "__call__", + new_callable=mock.AsyncMock, ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # Set the response to a series of pages. + call.side_effect = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="abc", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[], + next_page_token="def", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="ghi", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + ), + RuntimeError, ) - await client.create_cloud_exadata_infrastructure(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.CreateCloudExadataInfrastructureRequest() - - assert args[0] == request_msg + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_db_system_initial_storage_sizes(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_cloud_exadata_infrastructure_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +@pytest.mark.parametrize( + "request_type", + [ + database.ListDatabasesRequest, + dict, + ], +) +def test_list_databases(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_cloud_exadata_infrastructure), "__call__" - ) as call: + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + call.return_value = database.ListDatabasesResponse( + next_page_token="next_page_token_value", ) - await client.delete_cloud_exadata_infrastructure(request=None) + response = client.list_databases(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.DeleteCloudExadataInfrastructureRequest() + request = database.ListDatabasesRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatabasesPager) + assert response.next_page_token == "next_page_token_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_cloud_vm_clusters_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_list_databases_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_cloud_vm_clusters), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListCloudVmClustersResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_cloud_vm_clusters(request=None) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = database.ListDatabasesRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + ) - # Establish that the underlying stub method was called. + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_databases(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListCloudVmClustersRequest() + assert args[0] == database.ListDatabasesRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + ) - assert args[0] == request_msg +def test_list_databases_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_cloud_vm_cluster_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_cloud_vm_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - vm_cluster.CloudVmCluster( - name="name_value", - exadata_infrastructure="exadata_infrastructure_value", - display_name="display_name_value", - gcp_oracle_zone="gcp_oracle_zone_value", - cidr="cidr_value", - backup_subnet_cidr="backup_subnet_cidr_value", - network="network_value", - ) + # Ensure method has been cached + assert client._transport.list_databases in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - await client.get_cloud_vm_cluster(request=None) + client._transport._wrapped_methods[client._transport.list_databases] = mock_rpc + request = {} + client.list_databases(request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GetCloudVmClusterRequest() + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - assert args[0] == request_msg + client.list_databases(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_create_cloud_vm_cluster_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_cloud_vm_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") +async def test_list_databases_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - await client.create_cloud_vm_cluster(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.CreateCloudVmClusterRequest() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - assert args[0] == request_msg + # Ensure method has been cached + assert ( + client._client._transport.list_databases + in client._client._transport._wrapped_methods + ) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_databases + ] = mock_rpc -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_cloud_vm_cluster_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + request = {} + await client.list_databases(request) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_cloud_vm_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.delete_cloud_vm_cluster(request=None) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.DeleteCloudVmClusterRequest() + await client.list_databases(request) - assert args[0] == request_msg + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_list_entitlements_empty_call_grpc_asyncio(): +async def test_list_databases_async( + transport: str = "grpc_asyncio", request_type=database.ListDatabasesRequest +): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_entitlements), "__call__" - ) as call: + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListEntitlementsResponse( + database.ListDatabasesResponse( next_page_token="next_page_token_value", ) ) - await client.list_entitlements(request=None) + response = await client.list_databases(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListEntitlementsRequest() + request = database.ListDatabasesRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatabasesAsyncPager) + assert response.next_page_token == "next_page_token_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_list_db_servers_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +async def test_list_databases_async_from_dict(): + await test_list_databases_async(request_type=dict) + + +def test_list_databases_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_db_servers), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListDbServersResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_db_servers(request=None) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = database.ListDatabasesRequest() - # Establish that the underlying stub method was called. - call.assert_called() + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + call.return_value = database.ListDatabasesResponse() + client.list_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListDbServersRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_list_db_nodes_empty_call_grpc_asyncio(): +async def test_list_databases_field_headers_async(): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_db_nodes), "__call__") as call: - # Designate an appropriate return value for the call. + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = database.ListDatabasesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListDbNodesResponse( - next_page_token="next_page_token_value", - ) + database.ListDatabasesResponse() ) - await client.list_db_nodes(request=None) + await client.list_databases(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListDbNodesRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_gi_versions_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_list_databases_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_gi_versions), "__call__") as call: + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListGiVersionsResponse( - next_page_token="next_page_token_value", - ) + call.return_value = database.ListDatabasesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_databases( + parent="parent_value", ) - await client.list_gi_versions(request=None) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListGiVersionsRequest() + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val - assert args[0] == request_msg + +def test_list_databases_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_databases( + database.ListDatabasesRequest(), + parent="parent_value", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_list_db_system_shapes_empty_call_grpc_asyncio(): +async def test_list_databases_flattened_async(): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_db_system_shapes), "__call__" - ) as call: + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: # Designate an appropriate return value for the call. + call.return_value = database.ListDatabasesResponse() + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListDbSystemShapesResponse( - next_page_token="next_page_token_value", - ) + database.ListDatabasesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_databases( + parent="parent_value", ) - await client.list_db_system_shapes(request=None) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListDbSystemShapesRequest() - - assert args[0] == request_msg + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_list_autonomous_databases_empty_call_grpc_asyncio(): +async def test_list_databases_flattened_error_async(): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_autonomous_databases), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListAutonomousDatabasesResponse( - next_page_token="next_page_token_value", - ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_databases( + database.ListDatabasesRequest(), + parent="parent_value", ) - await client.list_autonomous_databases(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDatabasesRequest() - - assert args[0] == request_msg -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_autonomous_database_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_list_databases_pager(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_autonomous_database), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - autonomous_database.AutonomousDatabase( - name="name_value", - database="database_value", - display_name="display_name_value", - entitlement_id="entitlement_id_value", - admin_password="admin_password_value", - network="network_value", - cidr="cidr_value", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + database.Database(), + ], + next_page_token="abc", + ), + database.ListDatabasesResponse( + databases=[], + next_page_token="def", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + ], + next_page_token="ghi", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + ], + ), + RuntimeError, ) - await client.get_autonomous_database(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GetAutonomousDatabaseRequest() + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_databases(request={}, retry=retry, timeout=timeout) - assert args[0] == request_msg + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, database.Database) for i in results) + + +def test_list_databases_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + database.Database(), + ], + next_page_token="abc", + ), + database.ListDatabasesResponse( + databases=[], + next_page_token="def", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + ], + next_page_token="ghi", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + ], + ), + RuntimeError, + ) + pages = list(client.list_databases(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_create_autonomous_database_empty_call_grpc_asyncio(): +async def test_list_databases_async_pager(): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.create_autonomous_database), "__call__" + type(client.transport.list_databases), "__call__", new_callable=mock.AsyncMock ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_autonomous_database(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.CreateAutonomousDatabaseRequest() + # Set the response to a series of pages. + call.side_effect = ( + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + database.Database(), + ], + next_page_token="abc", + ), + database.ListDatabasesResponse( + databases=[], + next_page_token="def", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + ], + next_page_token="ghi", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_databases( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) - assert args[0] == request_msg + assert len(responses) == 6 + assert all(isinstance(i, database.Database) for i in responses) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_delete_autonomous_database_empty_call_grpc_asyncio(): +async def test_list_databases_async_pages(): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", ) - # Mock the actual call, and fake the request. + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.delete_autonomous_database), "__call__" + type(client.transport.list_databases), "__call__", new_callable=mock.AsyncMock ) as call: + # Set the response to a series of pages. + call.side_effect = ( + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + database.Database(), + ], + next_page_token="abc", + ), + database.ListDatabasesResponse( + databases=[], + next_page_token="def", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + ], + next_page_token="ghi", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_databases(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + database.GetDatabaseRequest, + dict, + ], +) +def test_get_database(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + call.return_value = database.Database( + name="name_value", + db_name="db_name_value", + db_unique_name="db_unique_name_value", + admin_password="admin_password_value", + tde_wallet_password="tde_wallet_password_value", + character_set="character_set_value", + ncharacter_set="ncharacter_set_value", + oci_url="oci_url_value", + database_id="database_id_value", + db_home_name="db_home_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + ops_insights_status=database.Database.OperationsInsightsStatus.ENABLING, ) - await client.delete_autonomous_database(request=None) + response = client.get_database(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.DeleteAutonomousDatabaseRequest() + request = database.GetDatabaseRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the response is the type that we expect. + assert isinstance(response, database.Database) + assert response.name == "name_value" + assert response.db_name == "db_name_value" + assert response.db_unique_name == "db_unique_name_value" + assert response.admin_password == "admin_password_value" + assert response.tde_wallet_password == "tde_wallet_password_value" + assert response.character_set == "character_set_value" + assert response.ncharacter_set == "ncharacter_set_value" + assert response.oci_url == "oci_url_value" + assert response.database_id == "database_id_value" + assert response.db_home_name == "db_home_name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert ( + response.ops_insights_status + == database.Database.OperationsInsightsStatus.ENABLING + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_restore_autonomous_database_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_get_database_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.restore_autonomous_database), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.restore_autonomous_database(request=None) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = database.GetDatabaseRequest( + name="name_value", + ) - # Establish that the underlying stub method was called. + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_database(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.RestoreAutonomousDatabaseRequest() + assert args[0] == database.GetDatabaseRequest( + name="name_value", + ) - assert args[0] == request_msg +def test_get_database_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_generate_autonomous_database_wallet_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.generate_autonomous_database_wallet), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.GenerateAutonomousDatabaseWalletResponse( - archive_content=b"archive_content_blob", - ) + # Ensure method has been cached + assert client._transport.get_database in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - await client.generate_autonomous_database_wallet(request=None) + client._transport._wrapped_methods[client._transport.get_database] = mock_rpc + request = {} + client.get_database(request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GenerateAutonomousDatabaseWalletRequest() + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - assert args[0] == request_msg + client.get_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_list_autonomous_db_versions_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) +async def test_get_database_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_autonomous_db_versions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListAutonomousDbVersionsResponse( - next_page_token="next_page_token_value", - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_database + in client._client._transport._wrapped_methods ) - await client.list_autonomous_db_versions(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDbVersionsRequest() + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_database + ] = mock_rpc - assert args[0] == request_msg + request = {} + await client.get_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_list_autonomous_database_character_sets_empty_call_grpc_asyncio(): +async def test_get_database_async( + transport: str = "grpc_asyncio", request_type=database.GetDatabaseRequest +): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_autonomous_database_character_sets), "__call__" - ) as call: + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( - next_page_token="next_page_token_value", + database.Database( + name="name_value", + db_name="db_name_value", + db_unique_name="db_unique_name_value", + admin_password="admin_password_value", + tde_wallet_password="tde_wallet_password_value", + character_set="character_set_value", + ncharacter_set="ncharacter_set_value", + oci_url="oci_url_value", + database_id="database_id_value", + db_home_name="db_home_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + ops_insights_status=database.Database.OperationsInsightsStatus.ENABLING, ) ) - await client.list_autonomous_database_character_sets(request=None) + response = await client.get_database(request) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() + request = database.GetDatabaseRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the response is the type that we expect. + assert isinstance(response, database.Database) + assert response.name == "name_value" + assert response.db_name == "db_name_value" + assert response.db_unique_name == "db_unique_name_value" + assert response.admin_password == "admin_password_value" + assert response.tde_wallet_password == "tde_wallet_password_value" + assert response.character_set == "character_set_value" + assert response.ncharacter_set == "ncharacter_set_value" + assert response.oci_url == "oci_url_value" + assert response.database_id == "database_id_value" + assert response.db_home_name == "db_home_name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert ( + response.ops_insights_status + == database.Database.OperationsInsightsStatus.ENABLING + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_list_autonomous_database_backups_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +async def test_get_database_async_from_dict(): + await test_get_database_async(request_type=dict) + + +def test_get_database_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_autonomous_database_backups), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - oracledatabase.ListAutonomousDatabaseBackupsResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_autonomous_database_backups(request=None) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = database.GetDatabaseRequest() - # Establish that the underlying stub method was called. - call.assert_called() + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + call.return_value = database.Database() + client.get_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDatabaseBackupsRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_stop_autonomous_database_empty_call_grpc_asyncio(): +async def test_get_database_field_headers_async(): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.stop_autonomous_database), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.stop_autonomous_database(request=None) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = database.GetDatabaseRequest() - # Establish that the underlying stub method was called. - call.assert_called() + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(database.Database()) + await client.get_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.StopAutonomousDatabaseRequest() + assert args[0] == request - assert args[0] == request_msg + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_start_autonomous_database_empty_call_grpc_asyncio(): - client = OracleDatabaseAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_get_database_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.start_autonomous_database), "__call__" - ) as call: + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + call.return_value = database.Database() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_database( + name="name_value", ) - await client.start_autonomous_database(request=None) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.StartAutonomousDatabaseRequest() + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - assert args[0] == request_msg + +def test_get_database_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_database( + database.GetDatabaseRequest(), + name="name_value", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_restart_autonomous_database_empty_call_grpc_asyncio(): +async def test_get_database_flattened_async(): client = OracleDatabaseAsyncClient( credentials=async_anonymous_credentials(), - transport="grpc_asyncio", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.restart_autonomous_database), "__call__" - ) as call: + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + call.return_value = database.Database() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(database.Database()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_database( + name="name_value", ) - await client.restart_autonomous_database(request=None) - # Establish that the underlying stub method was called. - call.assert_called() + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.RestartAutonomousDatabaseRequest() - - assert args[0] == request_msg - - -def test_transport_kind_rest(): - transport = OracleDatabaseClient.get_transport_class("rest")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "rest" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_list_cloud_exadata_infrastructures_rest_bad_request( - request_type=oracledatabase.ListCloudExadataInfrastructuresRequest, -): - client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" +@pytest.mark.asyncio +async def test_get_database_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} - request = request_type(**request_init) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_cloud_exadata_infrastructures(request) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_database( + database.GetDatabaseRequest(), + name="name_value", + ) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListCloudExadataInfrastructuresRequest, + pluggable_database.ListPluggableDatabasesRequest, dict, ], ) -def test_list_cloud_exadata_infrastructures_rest_call_success(request_type): +def test_list_pluggable_databases(request_type, transport: str = "grpc"): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} - request = request_type(**request_init) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListCloudExadataInfrastructuresResponse( + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pluggable_database.ListPluggableDatabasesResponse( next_page_token="next_page_token_value", ) + response = client.list_pluggable_databases(request) - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = oracledatabase.ListCloudExadataInfrastructuresResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_cloud_exadata_infrastructures(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = pluggable_database.ListPluggableDatabasesRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCloudExadataInfrastructuresPager) + assert isinstance(response, pagers.ListPluggableDatabasesPager) assert response.next_page_token == "next_page_token_value" -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_cloud_exadata_infrastructures_rest_interceptors(null_interceptor): - transport = transports.OracleDatabaseRestTransport( +def test_list_pluggable_databases_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.OracleDatabaseRestInterceptor(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = pluggable_database.ListPluggableDatabasesRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", ) - client = OracleDatabaseClient(transport=transport) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_list_cloud_exadata_infrastructures", - ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_list_cloud_exadata_infrastructures_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "pre_list_cloud_exadata_infrastructures", - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListCloudExadataInfrastructuresRequest.pb( - oracledatabase.ListCloudExadataInfrastructuresRequest() - ) - transcode.return_value = { + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_pluggable_databases(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pluggable_database.ListPluggableDatabasesRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + ) + + +def test_list_pluggable_databases_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_pluggable_databases + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_pluggable_databases + ] = mock_rpc + request = {} + client.list_pluggable_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_pluggable_databases(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_pluggable_databases_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_pluggable_databases + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_pluggable_databases + ] = mock_rpc + + request = {} + await client.list_pluggable_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_pluggable_databases(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_pluggable_databases_async( + transport: str = "grpc_asyncio", + request_type=pluggable_database.ListPluggableDatabasesRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pluggable_database.ListPluggableDatabasesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_pluggable_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = pluggable_database.ListPluggableDatabasesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPluggableDatabasesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_pluggable_databases_async_from_dict(): + await test_list_pluggable_databases_async(request_type=dict) + + +def test_list_pluggable_databases_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pluggable_database.ListPluggableDatabasesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + call.return_value = pluggable_database.ListPluggableDatabasesResponse() + client.list_pluggable_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_pluggable_databases_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pluggable_database.ListPluggableDatabasesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pluggable_database.ListPluggableDatabasesResponse() + ) + await client.list_pluggable_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_pluggable_databases_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pluggable_database.ListPluggableDatabasesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_pluggable_databases( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_pluggable_databases_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_pluggable_databases( + pluggable_database.ListPluggableDatabasesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_pluggable_databases_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pluggable_database.ListPluggableDatabasesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pluggable_database.ListPluggableDatabasesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_pluggable_databases( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_pluggable_databases_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_pluggable_databases( + pluggable_database.ListPluggableDatabasesRequest(), + parent="parent_value", + ) + + +def test_list_pluggable_databases_pager(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + next_page_token="abc", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[], + next_page_token="def", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + ], + next_page_token="ghi", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_pluggable_databases( + request={}, retry=retry, timeout=timeout + ) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, pluggable_database.PluggableDatabase) for i in results) + + +def test_list_pluggable_databases_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + next_page_token="abc", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[], + next_page_token="def", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + ], + next_page_token="ghi", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + ), + RuntimeError, + ) + pages = list(client.list_pluggable_databases(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_pluggable_databases_async_pager(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + next_page_token="abc", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[], + next_page_token="def", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + ], + next_page_token="ghi", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_pluggable_databases( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, pluggable_database.PluggableDatabase) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_pluggable_databases_async_pages(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + next_page_token="abc", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[], + next_page_token="def", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + ], + next_page_token="ghi", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_pluggable_databases(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + pluggable_database.GetPluggableDatabaseRequest, + dict, + ], +) +def test_get_pluggable_database(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pluggable_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pluggable_database.PluggableDatabase( + name="name_value", + oci_url="oci_url_value", + ) + response = client.get_pluggable_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = pluggable_database.GetPluggableDatabaseRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pluggable_database.PluggableDatabase) + assert response.name == "name_value" + assert response.oci_url == "oci_url_value" + + +def test_get_pluggable_database_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = pluggable_database.GetPluggableDatabaseRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pluggable_database), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_pluggable_database(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pluggable_database.GetPluggableDatabaseRequest( + name="name_value", + ) + + +def test_get_pluggable_database_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_pluggable_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_pluggable_database + ] = mock_rpc + request = {} + client.get_pluggable_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_pluggable_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_pluggable_database_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_pluggable_database + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_pluggable_database + ] = mock_rpc + + request = {} + await client.get_pluggable_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_pluggable_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_pluggable_database_async( + transport: str = "grpc_asyncio", + request_type=pluggable_database.GetPluggableDatabaseRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pluggable_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pluggable_database.PluggableDatabase( + name="name_value", + oci_url="oci_url_value", + ) + ) + response = await client.get_pluggable_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = pluggable_database.GetPluggableDatabaseRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pluggable_database.PluggableDatabase) + assert response.name == "name_value" + assert response.oci_url == "oci_url_value" + + +@pytest.mark.asyncio +async def test_get_pluggable_database_async_from_dict(): + await test_get_pluggable_database_async(request_type=dict) + + +def test_get_pluggable_database_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pluggable_database.GetPluggableDatabaseRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pluggable_database), "__call__" + ) as call: + call.return_value = pluggable_database.PluggableDatabase() + client.get_pluggable_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_pluggable_database_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pluggable_database.GetPluggableDatabaseRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pluggable_database), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pluggable_database.PluggableDatabase() + ) + await client.get_pluggable_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_pluggable_database_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pluggable_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pluggable_database.PluggableDatabase() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_pluggable_database( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_pluggable_database_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_pluggable_database( + pluggable_database.GetPluggableDatabaseRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_pluggable_database_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pluggable_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pluggable_database.PluggableDatabase() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pluggable_database.PluggableDatabase() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_pluggable_database( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_pluggable_database_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_pluggable_database( + pluggable_database.GetPluggableDatabaseRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + db_system.ListDbSystemsRequest, + dict, + ], +) +def test_list_db_systems(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = db_system.ListDbSystemsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_db_systems(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = db_system.ListDbSystemsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDbSystemsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_db_systems_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = db_system.ListDbSystemsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_db_systems(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == db_system.ListDbSystemsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + order_by="order_by_value", + ) + + +def test_list_db_systems_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_db_systems in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_db_systems] = mock_rpc + request = {} + client.list_db_systems(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_db_systems(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_db_systems_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_db_systems + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_db_systems + ] = mock_rpc + + request = {} + await client.list_db_systems(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_db_systems(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_db_systems_async( + transport: str = "grpc_asyncio", request_type=db_system.ListDbSystemsRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system.ListDbSystemsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_db_systems(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = db_system.ListDbSystemsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDbSystemsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_db_systems_async_from_dict(): + await test_list_db_systems_async(request_type=dict) + + +def test_list_db_systems_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_system.ListDbSystemsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + call.return_value = db_system.ListDbSystemsResponse() + client.list_db_systems(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_db_systems_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_system.ListDbSystemsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system.ListDbSystemsResponse() + ) + await client.list_db_systems(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_db_systems_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = db_system.ListDbSystemsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_db_systems( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_db_systems_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_db_systems( + db_system.ListDbSystemsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_db_systems_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = db_system.ListDbSystemsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system.ListDbSystemsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_db_systems( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_db_systems_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_db_systems( + db_system.ListDbSystemsRequest(), + parent="parent_value", + ) + + +def test_list_db_systems_pager(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + db_system.DbSystem(), + ], + next_page_token="abc", + ), + db_system.ListDbSystemsResponse( + db_systems=[], + next_page_token="def", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + ], + next_page_token="ghi", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_db_systems(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, db_system.DbSystem) for i in results) + + +def test_list_db_systems_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + db_system.DbSystem(), + ], + next_page_token="abc", + ), + db_system.ListDbSystemsResponse( + db_systems=[], + next_page_token="def", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + ], + next_page_token="ghi", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + ], + ), + RuntimeError, + ) + pages = list(client.list_db_systems(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_db_systems_async_pager(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_db_systems), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + db_system.DbSystem(), + ], + next_page_token="abc", + ), + db_system.ListDbSystemsResponse( + db_systems=[], + next_page_token="def", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + ], + next_page_token="ghi", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_db_systems( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, db_system.DbSystem) for i in responses) + + +@pytest.mark.asyncio +async def test_list_db_systems_async_pages(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_db_systems), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + db_system.DbSystem(), + ], + next_page_token="abc", + ), + db_system.ListDbSystemsResponse( + db_systems=[], + next_page_token="def", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + ], + next_page_token="ghi", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_db_systems(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + db_system.GetDbSystemRequest, + dict, + ], +) +def test_get_db_system(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = db_system.DbSystem( + name="name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + entitlement_id="entitlement_id_value", + display_name="display_name_value", + oci_url="oci_url_value", + ) + response = client.get_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = db_system.GetDbSystemRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, db_system.DbSystem) + assert response.name == "name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.entitlement_id == "entitlement_id_value" + assert response.display_name == "display_name_value" + assert response.oci_url == "oci_url_value" + + +def test_get_db_system_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = db_system.GetDbSystemRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_db_system(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == db_system.GetDbSystemRequest( + name="name_value", + ) + + +def test_get_db_system_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_db_system in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_db_system] = mock_rpc + request = {} + client.get_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_db_system(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_db_system_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_db_system + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_db_system + ] = mock_rpc + + request = {} + await client.get_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_db_system(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_db_system_async( + transport: str = "grpc_asyncio", request_type=db_system.GetDbSystemRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system.DbSystem( + name="name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + entitlement_id="entitlement_id_value", + display_name="display_name_value", + oci_url="oci_url_value", + ) + ) + response = await client.get_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = db_system.GetDbSystemRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, db_system.DbSystem) + assert response.name == "name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.entitlement_id == "entitlement_id_value" + assert response.display_name == "display_name_value" + assert response.oci_url == "oci_url_value" + + +@pytest.mark.asyncio +async def test_get_db_system_async_from_dict(): + await test_get_db_system_async(request_type=dict) + + +def test_get_db_system_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_system.GetDbSystemRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + call.return_value = db_system.DbSystem() + client.get_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_db_system_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_system.GetDbSystemRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(db_system.DbSystem()) + await client.get_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_db_system_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = db_system.DbSystem() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_db_system( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_db_system_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_db_system( + db_system.GetDbSystemRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_db_system_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = db_system.DbSystem() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(db_system.DbSystem()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_db_system( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_db_system_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_db_system( + db_system.GetDbSystemRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + gco_db_system.CreateDbSystemRequest, + dict, + ], +) +def test_create_db_system(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = gco_db_system.CreateDbSystemRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_db_system_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = gco_db_system.CreateDbSystemRequest( + parent="parent_value", + db_system_id="db_system_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_db_system(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == gco_db_system.CreateDbSystemRequest( + parent="parent_value", + db_system_id="db_system_id_value", + ) + + +def test_create_db_system_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_db_system in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_db_system + ] = mock_rpc + request = {} + client.create_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_db_system(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_db_system_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_db_system + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_db_system + ] = mock_rpc + + request = {} + await client.create_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_db_system(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_db_system_async( + transport: str = "grpc_asyncio", request_type=gco_db_system.CreateDbSystemRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = gco_db_system.CreateDbSystemRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_db_system_async_from_dict(): + await test_create_db_system_async(request_type=dict) + + +def test_create_db_system_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gco_db_system.CreateDbSystemRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_db_system_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gco_db_system.CreateDbSystemRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_db_system_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_db_system( + parent="parent_value", + db_system=gco_db_system.DbSystem(name="name_value"), + db_system_id="db_system_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].db_system + mock_val = gco_db_system.DbSystem(name="name_value") + assert arg == mock_val + arg = args[0].db_system_id + mock_val = "db_system_id_value" + assert arg == mock_val + + +def test_create_db_system_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_db_system( + gco_db_system.CreateDbSystemRequest(), + parent="parent_value", + db_system=gco_db_system.DbSystem(name="name_value"), + db_system_id="db_system_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_db_system_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_db_system( + parent="parent_value", + db_system=gco_db_system.DbSystem(name="name_value"), + db_system_id="db_system_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].db_system + mock_val = gco_db_system.DbSystem(name="name_value") + assert arg == mock_val + arg = args[0].db_system_id + mock_val = "db_system_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_db_system_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_db_system( + gco_db_system.CreateDbSystemRequest(), + parent="parent_value", + db_system=gco_db_system.DbSystem(name="name_value"), + db_system_id="db_system_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + db_system.DeleteDbSystemRequest, + dict, + ], +) +def test_delete_db_system(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = db_system.DeleteDbSystemRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_db_system_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = db_system.DeleteDbSystemRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_db_system(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == db_system.DeleteDbSystemRequest( + name="name_value", + ) + + +def test_delete_db_system_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_db_system in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_db_system + ] = mock_rpc + request = {} + client.delete_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_db_system(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_db_system_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_db_system + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_db_system + ] = mock_rpc + + request = {} + await client.delete_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.delete_db_system(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_db_system_async( + transport: str = "grpc_asyncio", request_type=db_system.DeleteDbSystemRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = db_system.DeleteDbSystemRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_db_system_async_from_dict(): + await test_delete_db_system_async(request_type=dict) + + +def test_delete_db_system_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_system.DeleteDbSystemRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_db_system_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_system.DeleteDbSystemRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_db_system_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_db_system( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_db_system_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_db_system( + db_system.DeleteDbSystemRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_db_system_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_db_system( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_db_system_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_db_system( + db_system.DeleteDbSystemRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + db_version.ListDbVersionsRequest, + dict, + ], +) +def test_list_db_versions(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = db_version.ListDbVersionsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_db_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = db_version.ListDbVersionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDbVersionsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_db_versions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = db_version.ListDbVersionsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_db_versions(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == db_version.ListDbVersionsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + ) + + +def test_list_db_versions_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_db_versions in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_db_versions + ] = mock_rpc + request = {} + client.list_db_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_db_versions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_db_versions_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_db_versions + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_db_versions + ] = mock_rpc + + request = {} + await client.list_db_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_db_versions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_db_versions_async( + transport: str = "grpc_asyncio", request_type=db_version.ListDbVersionsRequest +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_version.ListDbVersionsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_db_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = db_version.ListDbVersionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDbVersionsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_db_versions_async_from_dict(): + await test_list_db_versions_async(request_type=dict) + + +def test_list_db_versions_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_version.ListDbVersionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + call.return_value = db_version.ListDbVersionsResponse() + client.list_db_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_db_versions_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = db_version.ListDbVersionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_version.ListDbVersionsResponse() + ) + await client.list_db_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_db_versions_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = db_version.ListDbVersionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_db_versions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_db_versions_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_db_versions( + db_version.ListDbVersionsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_db_versions_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = db_version.ListDbVersionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_version.ListDbVersionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_db_versions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_db_versions_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_db_versions( + db_version.ListDbVersionsRequest(), + parent="parent_value", + ) + + +def test_list_db_versions_pager(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + db_version.DbVersion(), + ], + next_page_token="abc", + ), + db_version.ListDbVersionsResponse( + db_versions=[], + next_page_token="def", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + ], + next_page_token="ghi", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_db_versions(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, db_version.DbVersion) for i in results) + + +def test_list_db_versions_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + db_version.DbVersion(), + ], + next_page_token="abc", + ), + db_version.ListDbVersionsResponse( + db_versions=[], + next_page_token="def", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + ], + next_page_token="ghi", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + ], + ), + RuntimeError, + ) + pages = list(client.list_db_versions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_db_versions_async_pager(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_db_versions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + db_version.DbVersion(), + ], + next_page_token="abc", + ), + db_version.ListDbVersionsResponse( + db_versions=[], + next_page_token="def", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + ], + next_page_token="ghi", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_db_versions( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, db_version.DbVersion) for i in responses) + + +@pytest.mark.asyncio +async def test_list_db_versions_async_pages(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_db_versions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + db_version.DbVersion(), + ], + next_page_token="abc", + ), + db_version.ListDbVersionsResponse( + db_versions=[], + next_page_token="def", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + ], + next_page_token="ghi", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_db_versions(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + database_character_set.ListDatabaseCharacterSetsRequest, + dict, + ], +) +def test_list_database_character_sets(request_type, transport: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = database_character_set.ListDatabaseCharacterSetsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_database_character_sets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = database_character_set.ListDatabaseCharacterSetsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatabaseCharacterSetsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_database_character_sets_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = database_character_set.ListDatabaseCharacterSetsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_database_character_sets(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == database_character_set.ListDatabaseCharacterSetsRequest( + parent="parent_value", + page_token="page_token_value", + filter="filter_value", + ) + + +def test_list_database_character_sets_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_database_character_sets + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_database_character_sets + ] = mock_rpc + request = {} + client.list_database_character_sets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_database_character_sets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_database_character_sets_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_database_character_sets + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_database_character_sets + ] = mock_rpc + + request = {} + await client.list_database_character_sets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_database_character_sets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_database_character_sets_async( + transport: str = "grpc_asyncio", + request_type=database_character_set.ListDatabaseCharacterSetsRequest, +): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + database_character_set.ListDatabaseCharacterSetsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_database_character_sets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = database_character_set.ListDatabaseCharacterSetsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatabaseCharacterSetsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_database_character_sets_async_from_dict(): + await test_list_database_character_sets_async(request_type=dict) + + +def test_list_database_character_sets_field_headers(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = database_character_set.ListDatabaseCharacterSetsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + call.return_value = database_character_set.ListDatabaseCharacterSetsResponse() + client.list_database_character_sets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_database_character_sets_field_headers_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = database_character_set.ListDatabaseCharacterSetsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + database_character_set.ListDatabaseCharacterSetsResponse() + ) + await client.list_database_character_sets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_database_character_sets_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = database_character_set.ListDatabaseCharacterSetsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_database_character_sets( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_database_character_sets_flattened_error(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_database_character_sets( + database_character_set.ListDatabaseCharacterSetsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_database_character_sets_flattened_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = database_character_set.ListDatabaseCharacterSetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + database_character_set.ListDatabaseCharacterSetsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_database_character_sets( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_database_character_sets_flattened_error_async(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_database_character_sets( + database_character_set.ListDatabaseCharacterSetsRequest(), + parent="parent_value", + ) + + +def test_list_database_character_sets_pager(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="abc", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[], + next_page_token="def", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="ghi", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_database_character_sets( + request={}, retry=retry, timeout=timeout + ) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, database_character_set.DatabaseCharacterSet) for i in results + ) + + +def test_list_database_character_sets_pages(transport_name: str = "grpc"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="abc", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[], + next_page_token="def", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="ghi", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + ), + RuntimeError, + ) + pages = list(client.list_database_character_sets(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_database_character_sets_async_pager(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="abc", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[], + next_page_token="def", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="ghi", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_database_character_sets( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, database_character_set.DatabaseCharacterSet) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_database_character_sets_async_pages(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="abc", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[], + next_page_token="def", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="ghi", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_database_character_sets(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_cloud_exadata_infrastructures_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_cloud_exadata_infrastructures + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_cloud_exadata_infrastructures + ] = mock_rpc + + request = {} + client.list_cloud_exadata_infrastructures(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_cloud_exadata_infrastructures(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_cloud_exadata_infrastructures_rest_required_fields( + request_type=oracledatabase.ListCloudExadataInfrastructuresRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_cloud_exadata_infrastructures._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_cloud_exadata_infrastructures._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListCloudExadataInfrastructuresResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListCloudExadataInfrastructuresResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_cloud_exadata_infrastructures(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_cloud_exadata_infrastructures_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.list_cloud_exadata_infrastructures._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_cloud_exadata_infrastructures_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListCloudExadataInfrastructuresResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListCloudExadataInfrastructuresResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_cloud_exadata_infrastructures(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/cloudExadataInfrastructures" + % client.transport._host, + args[1], + ) + + +def test_list_cloud_exadata_infrastructures_rest_flattened_error( + transport: str = "rest", +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_cloud_exadata_infrastructures( + oracledatabase.ListCloudExadataInfrastructuresRequest(), + parent="parent_value", + ) + + +def test_list_cloud_exadata_infrastructures_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListCloudExadataInfrastructuresResponse( + cloud_exadata_infrastructures=[ + exadata_infra.CloudExadataInfrastructure(), + exadata_infra.CloudExadataInfrastructure(), + exadata_infra.CloudExadataInfrastructure(), + ], + next_page_token="abc", + ), + oracledatabase.ListCloudExadataInfrastructuresResponse( + cloud_exadata_infrastructures=[], + next_page_token="def", + ), + oracledatabase.ListCloudExadataInfrastructuresResponse( + cloud_exadata_infrastructures=[ + exadata_infra.CloudExadataInfrastructure(), + ], + next_page_token="ghi", + ), + oracledatabase.ListCloudExadataInfrastructuresResponse( + cloud_exadata_infrastructures=[ + exadata_infra.CloudExadataInfrastructure(), + exadata_infra.CloudExadataInfrastructure(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListCloudExadataInfrastructuresResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_cloud_exadata_infrastructures(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, exadata_infra.CloudExadataInfrastructure) for i in results + ) + + pages = list( + client.list_cloud_exadata_infrastructures(request=sample_request).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_cloud_exadata_infrastructure + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_cloud_exadata_infrastructure + ] = mock_rpc + + request = {} + client.get_cloud_exadata_infrastructure(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cloud_exadata_infrastructure(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_cloud_exadata_infrastructure_rest_required_fields( + request_type=oracledatabase.GetCloudExadataInfrastructureRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = exadata_infra.CloudExadataInfrastructure() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = exadata_infra.CloudExadataInfrastructure.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_cloud_exadata_infrastructure(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_cloud_exadata_infrastructure_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.get_cloud_exadata_infrastructure._get_unset_required_fields({}) + ) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_cloud_exadata_infrastructure_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = exadata_infra.CloudExadataInfrastructure() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = exadata_infra.CloudExadataInfrastructure.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_cloud_exadata_infrastructure(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/cloudExadataInfrastructures/*}" + % client.transport._host, + args[1], + ) + + +def test_get_cloud_exadata_infrastructure_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cloud_exadata_infrastructure( + oracledatabase.GetCloudExadataInfrastructureRequest(), + name="name_value", + ) + + +def test_create_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_cloud_exadata_infrastructure + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_cloud_exadata_infrastructure + ] = mock_rpc + + request = {} + client.create_cloud_exadata_infrastructure(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cloud_exadata_infrastructure(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_cloud_exadata_infrastructure_rest_required_fields( + request_type=oracledatabase.CreateCloudExadataInfrastructureRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["cloud_exadata_infrastructure_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "cloudExadataInfrastructureId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "cloudExadataInfrastructureId" in jsonified_request + assert ( + jsonified_request["cloudExadataInfrastructureId"] + == request_init["cloud_exadata_infrastructure_id"] + ) + + jsonified_request["parent"] = "parent_value" + jsonified_request[ + "cloudExadataInfrastructureId" + ] = "cloud_exadata_infrastructure_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "cloud_exadata_infrastructure_id", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "cloudExadataInfrastructureId" in jsonified_request + assert ( + jsonified_request["cloudExadataInfrastructureId"] + == "cloud_exadata_infrastructure_id_value" + ) + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_cloud_exadata_infrastructure(request) + + expected_params = [ + ( + "cloudExadataInfrastructureId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_cloud_exadata_infrastructure_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.create_cloud_exadata_infrastructure._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set( + ( + "cloudExadataInfrastructureId", + "requestId", + ) + ) + & set( + ( + "parent", + "cloudExadataInfrastructureId", + "cloudExadataInfrastructure", + ) + ) + ) + + +def test_create_cloud_exadata_infrastructure_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + cloud_exadata_infrastructure=exadata_infra.CloudExadataInfrastructure( + name="name_value" + ), + cloud_exadata_infrastructure_id="cloud_exadata_infrastructure_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_cloud_exadata_infrastructure(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/cloudExadataInfrastructures" + % client.transport._host, + args[1], + ) + + +def test_create_cloud_exadata_infrastructure_rest_flattened_error( + transport: str = "rest", +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cloud_exadata_infrastructure( + oracledatabase.CreateCloudExadataInfrastructureRequest(), + parent="parent_value", + cloud_exadata_infrastructure=exadata_infra.CloudExadataInfrastructure( + name="name_value" + ), + cloud_exadata_infrastructure_id="cloud_exadata_infrastructure_id_value", + ) + + +def test_delete_cloud_exadata_infrastructure_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_cloud_exadata_infrastructure + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_cloud_exadata_infrastructure + ] = mock_rpc + + request = {} + client.delete_cloud_exadata_infrastructure(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_cloud_exadata_infrastructure(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_cloud_exadata_infrastructure_rest_required_fields( + request_type=oracledatabase.DeleteCloudExadataInfrastructureRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cloud_exadata_infrastructure._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "force", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_cloud_exadata_infrastructure(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_cloud_exadata_infrastructure_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.delete_cloud_exadata_infrastructure._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set( + ( + "force", + "requestId", + ) + ) + & set(("name",)) + ) + + +def test_delete_cloud_exadata_infrastructure_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_cloud_exadata_infrastructure(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/cloudExadataInfrastructures/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_cloud_exadata_infrastructure_rest_flattened_error( + transport: str = "rest", +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cloud_exadata_infrastructure( + oracledatabase.DeleteCloudExadataInfrastructureRequest(), + name="name_value", + ) + + +def test_list_cloud_vm_clusters_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_cloud_vm_clusters + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_cloud_vm_clusters + ] = mock_rpc + + request = {} + client.list_cloud_vm_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_cloud_vm_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_cloud_vm_clusters_rest_required_fields( + request_type=oracledatabase.ListCloudVmClustersRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_cloud_vm_clusters._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_cloud_vm_clusters._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListCloudVmClustersResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListCloudVmClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_cloud_vm_clusters(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_cloud_vm_clusters_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_cloud_vm_clusters._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_cloud_vm_clusters_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListCloudVmClustersResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListCloudVmClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_cloud_vm_clusters(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/cloudVmClusters" + % client.transport._host, + args[1], + ) + + +def test_list_cloud_vm_clusters_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_cloud_vm_clusters( + oracledatabase.ListCloudVmClustersRequest(), + parent="parent_value", + ) + + +def test_list_cloud_vm_clusters_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListCloudVmClustersResponse( + cloud_vm_clusters=[ + vm_cluster.CloudVmCluster(), + vm_cluster.CloudVmCluster(), + vm_cluster.CloudVmCluster(), + ], + next_page_token="abc", + ), + oracledatabase.ListCloudVmClustersResponse( + cloud_vm_clusters=[], + next_page_token="def", + ), + oracledatabase.ListCloudVmClustersResponse( + cloud_vm_clusters=[ + vm_cluster.CloudVmCluster(), + ], + next_page_token="ghi", + ), + oracledatabase.ListCloudVmClustersResponse( + cloud_vm_clusters=[ + vm_cluster.CloudVmCluster(), + vm_cluster.CloudVmCluster(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListCloudVmClustersResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_cloud_vm_clusters(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, vm_cluster.CloudVmCluster) for i in results) + + pages = list(client.list_cloud_vm_clusters(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_cloud_vm_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_cloud_vm_cluster in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_cloud_vm_cluster + ] = mock_rpc + + request = {} + client.get_cloud_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cloud_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_cloud_vm_cluster_rest_required_fields( + request_type=oracledatabase.GetCloudVmClusterRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cloud_vm_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cloud_vm_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = vm_cluster.CloudVmCluster() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = vm_cluster.CloudVmCluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_cloud_vm_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_cloud_vm_cluster_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_cloud_vm_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_cloud_vm_cluster_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vm_cluster.CloudVmCluster() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vm_cluster.CloudVmCluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_cloud_vm_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/cloudVmClusters/*}" + % client.transport._host, + args[1], + ) + + +def test_get_cloud_vm_cluster_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cloud_vm_cluster( + oracledatabase.GetCloudVmClusterRequest(), + name="name_value", + ) + + +def test_create_cloud_vm_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_cloud_vm_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_cloud_vm_cluster + ] = mock_rpc + + request = {} + client.create_cloud_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cloud_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_cloud_vm_cluster_rest_required_fields( + request_type=oracledatabase.CreateCloudVmClusterRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["cloud_vm_cluster_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "cloudVmClusterId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cloud_vm_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "cloudVmClusterId" in jsonified_request + assert jsonified_request["cloudVmClusterId"] == request_init["cloud_vm_cluster_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["cloudVmClusterId"] = "cloud_vm_cluster_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cloud_vm_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "cloud_vm_cluster_id", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "cloudVmClusterId" in jsonified_request + assert jsonified_request["cloudVmClusterId"] == "cloud_vm_cluster_id_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_cloud_vm_cluster(request) + + expected_params = [ + ( + "cloudVmClusterId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_cloud_vm_cluster_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_cloud_vm_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "cloudVmClusterId", + "requestId", + ) + ) + & set( + ( + "parent", + "cloudVmClusterId", + "cloudVmCluster", + ) + ) + ) + + +def test_create_cloud_vm_cluster_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + cloud_vm_cluster=vm_cluster.CloudVmCluster(name="name_value"), + cloud_vm_cluster_id="cloud_vm_cluster_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_cloud_vm_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/cloudVmClusters" + % client.transport._host, + args[1], + ) + + +def test_create_cloud_vm_cluster_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cloud_vm_cluster( + oracledatabase.CreateCloudVmClusterRequest(), + parent="parent_value", + cloud_vm_cluster=vm_cluster.CloudVmCluster(name="name_value"), + cloud_vm_cluster_id="cloud_vm_cluster_id_value", + ) + + +def test_delete_cloud_vm_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_cloud_vm_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_cloud_vm_cluster + ] = mock_rpc + + request = {} + client.delete_cloud_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_cloud_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_cloud_vm_cluster_rest_required_fields( + request_type=oracledatabase.DeleteCloudVmClusterRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cloud_vm_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cloud_vm_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "force", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_cloud_vm_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_cloud_vm_cluster_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_cloud_vm_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "force", + "requestId", + ) + ) + & set(("name",)) + ) + + +def test_delete_cloud_vm_cluster_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_cloud_vm_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/cloudVmClusters/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_cloud_vm_cluster_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cloud_vm_cluster( + oracledatabase.DeleteCloudVmClusterRequest(), + name="name_value", + ) + + +def test_list_entitlements_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_entitlements in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_entitlements + ] = mock_rpc + + request = {} + client.list_entitlements(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_entitlements(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_entitlements_rest_required_fields( + request_type=oracledatabase.ListEntitlementsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_entitlements._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_entitlements._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListEntitlementsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListEntitlementsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_entitlements(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_entitlements_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_entitlements._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_entitlements_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListEntitlementsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListEntitlementsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_entitlements(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/entitlements" + % client.transport._host, + args[1], + ) + + +def test_list_entitlements_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_entitlements( + oracledatabase.ListEntitlementsRequest(), + parent="parent_value", + ) + + +def test_list_entitlements_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListEntitlementsResponse( + entitlements=[ + entitlement.Entitlement(), + entitlement.Entitlement(), + entitlement.Entitlement(), + ], + next_page_token="abc", + ), + oracledatabase.ListEntitlementsResponse( + entitlements=[], + next_page_token="def", + ), + oracledatabase.ListEntitlementsResponse( + entitlements=[ + entitlement.Entitlement(), + ], + next_page_token="ghi", + ), + oracledatabase.ListEntitlementsResponse( + entitlements=[ + entitlement.Entitlement(), + entitlement.Entitlement(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListEntitlementsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_entitlements(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, entitlement.Entitlement) for i in results) + + pages = list(client.list_entitlements(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_db_servers_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_db_servers in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_db_servers] = mock_rpc + + request = {} + client.list_db_servers(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_db_servers(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_db_servers_rest_required_fields( + request_type=oracledatabase.ListDbServersRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_servers._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_servers._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListDbServersResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListDbServersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_db_servers(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_db_servers_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_db_servers._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_db_servers_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListDbServersResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListDbServersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_db_servers(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*/cloudExadataInfrastructures/*}/dbServers" + % client.transport._host, + args[1], + ) + + +def test_list_db_servers_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_db_servers( + oracledatabase.ListDbServersRequest(), + parent="parent_value", + ) + + +def test_list_db_servers_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListDbServersResponse( + db_servers=[ + db_server.DbServer(), + db_server.DbServer(), + db_server.DbServer(), + ], + next_page_token="abc", + ), + oracledatabase.ListDbServersResponse( + db_servers=[], + next_page_token="def", + ), + oracledatabase.ListDbServersResponse( + db_servers=[ + db_server.DbServer(), + ], + next_page_token="ghi", + ), + oracledatabase.ListDbServersResponse( + db_servers=[ + db_server.DbServer(), + db_server.DbServer(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListDbServersResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + + pager = client.list_db_servers(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, db_server.DbServer) for i in results) + + pages = list(client.list_db_servers(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_db_nodes_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_db_nodes in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_db_nodes] = mock_rpc + + request = {} + client.list_db_nodes(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_db_nodes(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_db_nodes_rest_required_fields( + request_type=oracledatabase.ListDbNodesRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_nodes._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_nodes._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListDbNodesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListDbNodesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_db_nodes(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_db_nodes_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_db_nodes._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_db_nodes_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListDbNodesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListDbNodesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_db_nodes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*/cloudVmClusters/*}/dbNodes" + % client.transport._host, + args[1], + ) + + +def test_list_db_nodes_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_db_nodes( + oracledatabase.ListDbNodesRequest(), + parent="parent_value", + ) + + +def test_list_db_nodes_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListDbNodesResponse( + db_nodes=[ + db_node.DbNode(), + db_node.DbNode(), + db_node.DbNode(), + ], + next_page_token="abc", + ), + oracledatabase.ListDbNodesResponse( + db_nodes=[], + next_page_token="def", + ), + oracledatabase.ListDbNodesResponse( + db_nodes=[ + db_node.DbNode(), + ], + next_page_token="ghi", + ), + oracledatabase.ListDbNodesResponse( + db_nodes=[ + db_node.DbNode(), + db_node.DbNode(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListDbNodesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + + pager = client.list_db_nodes(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, db_node.DbNode) for i in results) + + pages = list(client.list_db_nodes(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_gi_versions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_gi_versions in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_gi_versions + ] = mock_rpc + + request = {} + client.list_gi_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_gi_versions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_gi_versions_rest_required_fields( + request_type=oracledatabase.ListGiVersionsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_gi_versions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_gi_versions._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListGiVersionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListGiVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_gi_versions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_gi_versions_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_gi_versions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_gi_versions_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListGiVersionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListGiVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_gi_versions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/giVersions" % client.transport._host, + args[1], + ) + + +def test_list_gi_versions_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_gi_versions( + oracledatabase.ListGiVersionsRequest(), + parent="parent_value", + ) + + +def test_list_gi_versions_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListGiVersionsResponse( + gi_versions=[ + gi_version.GiVersion(), + gi_version.GiVersion(), + gi_version.GiVersion(), + ], + next_page_token="abc", + ), + oracledatabase.ListGiVersionsResponse( + gi_versions=[], + next_page_token="def", + ), + oracledatabase.ListGiVersionsResponse( + gi_versions=[ + gi_version.GiVersion(), + ], + next_page_token="ghi", + ), + oracledatabase.ListGiVersionsResponse( + gi_versions=[ + gi_version.GiVersion(), + gi_version.GiVersion(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListGiVersionsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_gi_versions(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, gi_version.GiVersion) for i in results) + + pages = list(client.list_gi_versions(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_minor_versions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_minor_versions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_minor_versions + ] = mock_rpc + + request = {} + client.list_minor_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_minor_versions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_minor_versions_rest_required_fields( + request_type=minor_version.ListMinorVersionsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_minor_versions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_minor_versions._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = minor_version.ListMinorVersionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = minor_version.ListMinorVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_minor_versions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_minor_versions_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_minor_versions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_minor_versions_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = minor_version.ListMinorVersionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/giVersions/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = minor_version.ListMinorVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_minor_versions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*/giVersions/*}/minorVersions" + % client.transport._host, + args[1], + ) + + +def test_list_minor_versions_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_minor_versions( + minor_version.ListMinorVersionsRequest(), + parent="parent_value", + ) + + +def test_list_minor_versions_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + next_page_token="abc", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[], + next_page_token="def", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + ], + next_page_token="ghi", + ), + minor_version.ListMinorVersionsResponse( + minor_versions=[ + minor_version.MinorVersion(), + minor_version.MinorVersion(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + minor_version.ListMinorVersionsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/locations/sample2/giVersions/sample3" + } + + pager = client.list_minor_versions(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, minor_version.MinorVersion) for i in results) + + pages = list(client.list_minor_versions(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_db_system_shapes_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_db_system_shapes + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_db_system_shapes + ] = mock_rpc + + request = {} + client.list_db_system_shapes(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_db_system_shapes(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_db_system_shapes_rest_required_fields( + request_type=oracledatabase.ListDbSystemShapesRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_system_shapes._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_system_shapes._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListDbSystemShapesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListDbSystemShapesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_db_system_shapes(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_db_system_shapes_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_db_system_shapes._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_db_system_shapes_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListDbSystemShapesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListDbSystemShapesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_db_system_shapes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/dbSystemShapes" + % client.transport._host, + args[1], + ) + + +def test_list_db_system_shapes_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_db_system_shapes( + oracledatabase.ListDbSystemShapesRequest(), + parent="parent_value", + ) + + +def test_list_db_system_shapes_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListDbSystemShapesResponse( + db_system_shapes=[ + db_system_shape.DbSystemShape(), + db_system_shape.DbSystemShape(), + db_system_shape.DbSystemShape(), + ], + next_page_token="abc", + ), + oracledatabase.ListDbSystemShapesResponse( + db_system_shapes=[], + next_page_token="def", + ), + oracledatabase.ListDbSystemShapesResponse( + db_system_shapes=[ + db_system_shape.DbSystemShape(), + ], + next_page_token="ghi", + ), + oracledatabase.ListDbSystemShapesResponse( + db_system_shapes=[ + db_system_shape.DbSystemShape(), + db_system_shape.DbSystemShape(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListDbSystemShapesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_db_system_shapes(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, db_system_shape.DbSystemShape) for i in results) + + pages = list(client.list_db_system_shapes(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_autonomous_databases_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_autonomous_databases + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_autonomous_databases + ] = mock_rpc + + request = {} + client.list_autonomous_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_autonomous_databases(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_autonomous_databases_rest_required_fields( + request_type=oracledatabase.ListAutonomousDatabasesRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_autonomous_databases._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_autonomous_databases._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDatabasesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDatabasesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_autonomous_databases(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_autonomous_databases_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_autonomous_databases._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_autonomous_databases_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDatabasesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDatabasesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_autonomous_databases(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/autonomousDatabases" + % client.transport._host, + args[1], + ) + + +def test_list_autonomous_databases_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_autonomous_databases( + oracledatabase.ListAutonomousDatabasesRequest(), + parent="parent_value", + ) + + +def test_list_autonomous_databases_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListAutonomousDatabasesResponse( + autonomous_databases=[ + autonomous_database.AutonomousDatabase(), + autonomous_database.AutonomousDatabase(), + autonomous_database.AutonomousDatabase(), + ], + next_page_token="abc", + ), + oracledatabase.ListAutonomousDatabasesResponse( + autonomous_databases=[], + next_page_token="def", + ), + oracledatabase.ListAutonomousDatabasesResponse( + autonomous_databases=[ + autonomous_database.AutonomousDatabase(), + ], + next_page_token="ghi", + ), + oracledatabase.ListAutonomousDatabasesResponse( + autonomous_databases=[ + autonomous_database.AutonomousDatabase(), + autonomous_database.AutonomousDatabase(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListAutonomousDatabasesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_autonomous_databases(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, autonomous_database.AutonomousDatabase) for i in results + ) + + pages = list(client.list_autonomous_databases(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_autonomous_database + ] = mock_rpc + + request = {} + client.get_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_autonomous_database_rest_required_fields( + request_type=oracledatabase.GetAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = autonomous_database.AutonomousDatabase() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = autonomous_database.AutonomousDatabase.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_autonomous_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_autonomous_database._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = autonomous_database.AutonomousDatabase() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = autonomous_database.AutonomousDatabase.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}" + % client.transport._host, + args[1], + ) + + +def test_get_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_autonomous_database( + oracledatabase.GetAutonomousDatabaseRequest(), + name="name_value", + ) + + +def test_create_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_autonomous_database + ] = mock_rpc + + request = {} + client.create_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_autonomous_database_rest_required_fields( + request_type=oracledatabase.CreateAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["autonomous_database_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "autonomousDatabaseId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "autonomousDatabaseId" in jsonified_request + assert ( + jsonified_request["autonomousDatabaseId"] + == request_init["autonomous_database_id"] + ) + + jsonified_request["parent"] = "parent_value" + jsonified_request["autonomousDatabaseId"] = "autonomous_database_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_autonomous_database._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "autonomous_database_id", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "autonomousDatabaseId" in jsonified_request + assert jsonified_request["autonomousDatabaseId"] == "autonomous_database_id_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_autonomous_database(request) + + expected_params = [ + ( + "autonomousDatabaseId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_autonomous_database._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "autonomousDatabaseId", + "requestId", + ) + ) + & set( + ( + "parent", + "autonomousDatabaseId", + "autonomousDatabase", + ) + ) + ) + + +def test_create_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + autonomous_database=gco_autonomous_database.AutonomousDatabase( + name="name_value" + ), + autonomous_database_id="autonomous_database_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/autonomousDatabases" + % client.transport._host, + args[1], + ) + + +def test_create_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_autonomous_database( + oracledatabase.CreateAutonomousDatabaseRequest(), + parent="parent_value", + autonomous_database=gco_autonomous_database.AutonomousDatabase( + name="name_value" + ), + autonomous_database_id="autonomous_database_id_value", + ) + + +def test_update_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_autonomous_database + ] = mock_rpc + + request = {} + client.update_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_autonomous_database_rest_required_fields( + request_type=oracledatabase.UpdateAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_autonomous_database._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "request_id", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_autonomous_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_autonomous_database._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "requestId", + "updateMask", + ) + ) + & set(("autonomousDatabase",)) + ) + + +def test_update_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "autonomous_database": { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + autonomous_database=gco_autonomous_database.AutonomousDatabase( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{autonomous_database.name=projects/*/locations/*/autonomousDatabases/*}" + % client.transport._host, + args[1], + ) + + +def test_update_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_autonomous_database( + oracledatabase.UpdateAutonomousDatabaseRequest(), + autonomous_database=gco_autonomous_database.AutonomousDatabase( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_autonomous_database + ] = mock_rpc + + request = {} + client.delete_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_autonomous_database_rest_required_fields( + request_type=oracledatabase.DeleteAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_autonomous_database._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("request_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_autonomous_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_autonomous_database._get_unset_required_fields({}) + assert set(unset_fields) == (set(("requestId",)) & set(("name",))) + + +def test_delete_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_autonomous_database( + oracledatabase.DeleteAutonomousDatabaseRequest(), + name="name_value", + ) + + +def test_restore_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.restore_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.restore_autonomous_database + ] = mock_rpc + + request = {} + client.restore_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.restore_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_restore_autonomous_database_rest_required_fields( + request_type=oracledatabase.RestoreAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restore_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restore_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.restore_autonomous_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_restore_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.restore_autonomous_database._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "restoreTime", + ) + ) + ) + + +def test_restore_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + restore_time=timestamp_pb2.Timestamp(seconds=751), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.restore_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:restore" + % client.transport._host, + args[1], + ) + + +def test_restore_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.restore_autonomous_database( + oracledatabase.RestoreAutonomousDatabaseRequest(), + name="name_value", + restore_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +def test_generate_autonomous_database_wallet_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.generate_autonomous_database_wallet + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.generate_autonomous_database_wallet + ] = mock_rpc + + request = {} + client.generate_autonomous_database_wallet(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.generate_autonomous_database_wallet(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_generate_autonomous_database_wallet_rest_required_fields( + request_type=oracledatabase.GenerateAutonomousDatabaseWalletRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request_init["password"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_autonomous_database_wallet._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["password"] = "password_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_autonomous_database_wallet._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "password" in jsonified_request + assert jsonified_request["password"] == "password_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.generate_autonomous_database_wallet(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_autonomous_database_wallet_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.generate_autonomous_database_wallet._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "password", + ) + ) + ) + + +def test_generate_autonomous_database_wallet_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + type_=autonomous_database.GenerateType.ALL, + is_regional=True, + password="password_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.generate_autonomous_database_wallet(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:generateWallet" + % client.transport._host, + args[1], + ) + + +def test_generate_autonomous_database_wallet_rest_flattened_error( + transport: str = "rest", +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_autonomous_database_wallet( + oracledatabase.GenerateAutonomousDatabaseWalletRequest(), + name="name_value", + type_=autonomous_database.GenerateType.ALL, + is_regional=True, + password="password_value", + ) + + +def test_list_autonomous_db_versions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_autonomous_db_versions + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_autonomous_db_versions + ] = mock_rpc + + request = {} + client.list_autonomous_db_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_autonomous_db_versions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_autonomous_db_versions_rest_required_fields( + request_type=oracledatabase.ListAutonomousDbVersionsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_autonomous_db_versions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_autonomous_db_versions._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDbVersionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDbVersionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_autonomous_db_versions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_autonomous_db_versions_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_autonomous_db_versions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_autonomous_db_versions_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDbVersionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDbVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_autonomous_db_versions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/autonomousDbVersions" + % client.transport._host, + args[1], + ) + + +def test_list_autonomous_db_versions_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_autonomous_db_versions( + oracledatabase.ListAutonomousDbVersionsRequest(), + parent="parent_value", + ) + + +def test_list_autonomous_db_versions_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListAutonomousDbVersionsResponse( + autonomous_db_versions=[ + autonomous_db_version.AutonomousDbVersion(), + autonomous_db_version.AutonomousDbVersion(), + autonomous_db_version.AutonomousDbVersion(), + ], + next_page_token="abc", + ), + oracledatabase.ListAutonomousDbVersionsResponse( + autonomous_db_versions=[], + next_page_token="def", + ), + oracledatabase.ListAutonomousDbVersionsResponse( + autonomous_db_versions=[ + autonomous_db_version.AutonomousDbVersion(), + ], + next_page_token="ghi", + ), + oracledatabase.ListAutonomousDbVersionsResponse( + autonomous_db_versions=[ + autonomous_db_version.AutonomousDbVersion(), + autonomous_db_version.AutonomousDbVersion(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListAutonomousDbVersionsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_autonomous_db_versions(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, autonomous_db_version.AutonomousDbVersion) for i in results + ) + + pages = list(client.list_autonomous_db_versions(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_autonomous_database_character_sets_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_autonomous_database_character_sets + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_autonomous_database_character_sets + ] = mock_rpc + + request = {} + client.list_autonomous_database_character_sets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_autonomous_database_character_sets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_autonomous_database_character_sets_rest_required_fields( + request_type=oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_autonomous_database_character_sets._get_unset_required_fields( + jsonified_request + ) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_autonomous_database_character_sets._get_unset_required_fields( + jsonified_request + ) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = ( + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.pb( + return_value + ) + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_autonomous_database_character_sets(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_autonomous_database_character_sets_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.list_autonomous_database_character_sets._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_autonomous_database_character_sets_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_autonomous_database_character_sets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/autonomousDatabaseCharacterSets" + % client.transport._host, + args[1], + ) + + +def test_list_autonomous_database_character_sets_rest_flattened_error( + transport: str = "rest", +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_autonomous_database_character_sets( + oracledatabase.ListAutonomousDatabaseCharacterSetsRequest(), + parent="parent_value", + ) + + +def test_list_autonomous_database_character_sets_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( + autonomous_database_character_sets=[ + autonomous_database_character_set.AutonomousDatabaseCharacterSet(), + autonomous_database_character_set.AutonomousDatabaseCharacterSet(), + autonomous_database_character_set.AutonomousDatabaseCharacterSet(), + ], + next_page_token="abc", + ), + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( + autonomous_database_character_sets=[], + next_page_token="def", + ), + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( + autonomous_database_character_sets=[ + autonomous_database_character_set.AutonomousDatabaseCharacterSet(), + ], + next_page_token="ghi", + ), + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( + autonomous_database_character_sets=[ + autonomous_database_character_set.AutonomousDatabaseCharacterSet(), + autonomous_database_character_set.AutonomousDatabaseCharacterSet(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_autonomous_database_character_sets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance( + i, autonomous_database_character_set.AutonomousDatabaseCharacterSet + ) + for i in results + ) + + pages = list( + client.list_autonomous_database_character_sets(request=sample_request).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_autonomous_database_backups_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_autonomous_database_backups + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_autonomous_database_backups + ] = mock_rpc + + request = {} + client.list_autonomous_database_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_autonomous_database_backups(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_autonomous_database_backups_rest_required_fields( + request_type=oracledatabase.ListAutonomousDatabaseBackupsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_autonomous_database_backups._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_autonomous_database_backups._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_autonomous_database_backups(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_autonomous_database_backups_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.list_autonomous_database_backups._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_autonomous_database_backups_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_autonomous_database_backups(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/autonomousDatabaseBackups" + % client.transport._host, + args[1], + ) + + +def test_list_autonomous_database_backups_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_autonomous_database_backups( + oracledatabase.ListAutonomousDatabaseBackupsRequest(), + parent="parent_value", + ) + + +def test_list_autonomous_database_backups_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListAutonomousDatabaseBackupsResponse( + autonomous_database_backups=[ + autonomous_db_backup.AutonomousDatabaseBackup(), + autonomous_db_backup.AutonomousDatabaseBackup(), + autonomous_db_backup.AutonomousDatabaseBackup(), + ], + next_page_token="abc", + ), + oracledatabase.ListAutonomousDatabaseBackupsResponse( + autonomous_database_backups=[], + next_page_token="def", + ), + oracledatabase.ListAutonomousDatabaseBackupsResponse( + autonomous_database_backups=[ + autonomous_db_backup.AutonomousDatabaseBackup(), + ], + next_page_token="ghi", + ), + oracledatabase.ListAutonomousDatabaseBackupsResponse( + autonomous_database_backups=[ + autonomous_db_backup.AutonomousDatabaseBackup(), + autonomous_db_backup.AutonomousDatabaseBackup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListAutonomousDatabaseBackupsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_autonomous_database_backups(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, autonomous_db_backup.AutonomousDatabaseBackup) + for i in results + ) + + pages = list( + client.list_autonomous_database_backups(request=sample_request).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_stop_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.stop_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.stop_autonomous_database + ] = mock_rpc + + request = {} + client.stop_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.stop_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_stop_autonomous_database_rest_required_fields( + request_type=oracledatabase.StopAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).stop_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).stop_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.stop_autonomous_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_stop_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.stop_autonomous_database._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_stop_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.stop_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:stop" + % client.transport._host, + args[1], + ) + + +def test_stop_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.stop_autonomous_database( + oracledatabase.StopAutonomousDatabaseRequest(), + name="name_value", + ) + + +def test_start_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.start_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.start_autonomous_database + ] = mock_rpc + + request = {} + client.start_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.start_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_start_autonomous_database_rest_required_fields( + request_type=oracledatabase.StartAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).start_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).start_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.start_autonomous_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_start_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.start_autonomous_database._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_start_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.start_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:start" + % client.transport._host, + args[1], + ) + + +def test_start_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_autonomous_database( + oracledatabase.StartAutonomousDatabaseRequest(), + name="name_value", + ) + + +def test_restart_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.restart_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.restart_autonomous_database + ] = mock_rpc + + request = {} + client.restart_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.restart_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_restart_autonomous_database_rest_required_fields( + request_type=oracledatabase.RestartAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restart_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restart_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.restart_autonomous_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_restart_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.restart_autonomous_database._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_restart_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.restart_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:restart" + % client.transport._host, + args[1], + ) + + +def test_restart_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.restart_autonomous_database( + oracledatabase.RestartAutonomousDatabaseRequest(), + name="name_value", + ) + + +def test_switchover_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.switchover_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.switchover_autonomous_database + ] = mock_rpc + + request = {} + client.switchover_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.switchover_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_switchover_autonomous_database_rest_required_fields( + request_type=oracledatabase.SwitchoverAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request_init["peer_autonomous_database"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).switchover_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["peerAutonomousDatabase"] = "peer_autonomous_database_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).switchover_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "peerAutonomousDatabase" in jsonified_request + assert ( + jsonified_request["peerAutonomousDatabase"] == "peer_autonomous_database_value" + ) + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.switchover_autonomous_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_switchover_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.switchover_autonomous_database._get_unset_required_fields( + {} + ) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "peerAutonomousDatabase", + ) + ) + ) + + +def test_switchover_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.switchover_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:switchover" + % client.transport._host, + args[1], + ) + + +def test_switchover_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.switchover_autonomous_database( + oracledatabase.SwitchoverAutonomousDatabaseRequest(), + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + +def test_failover_autonomous_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.failover_autonomous_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.failover_autonomous_database + ] = mock_rpc + + request = {} + client.failover_autonomous_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.failover_autonomous_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_failover_autonomous_database_rest_required_fields( + request_type=oracledatabase.FailoverAutonomousDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request_init["peer_autonomous_database"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).failover_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["peerAutonomousDatabase"] = "peer_autonomous_database_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).failover_autonomous_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "peerAutonomousDatabase" in jsonified_request + assert ( + jsonified_request["peerAutonomousDatabase"] == "peer_autonomous_database_value" + ) + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.failover_autonomous_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_failover_autonomous_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.failover_autonomous_database._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "peerAutonomousDatabase", + ) + ) + ) + + +def test_failover_autonomous_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.failover_autonomous_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/autonomousDatabases/*}:failover" + % client.transport._host, + args[1], + ) + + +def test_failover_autonomous_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.failover_autonomous_database( + oracledatabase.FailoverAutonomousDatabaseRequest(), + name="name_value", + peer_autonomous_database="peer_autonomous_database_value", + ) + + +def test_list_odb_networks_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_odb_networks in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_odb_networks + ] = mock_rpc + + request = {} + client.list_odb_networks(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_odb_networks(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_odb_networks_rest_required_fields( + request_type=odb_network.ListOdbNetworksRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_odb_networks._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_odb_networks._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = odb_network.ListOdbNetworksResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = odb_network.ListOdbNetworksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_odb_networks(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_odb_networks_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_odb_networks._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_odb_networks_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = odb_network.ListOdbNetworksResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = odb_network.ListOdbNetworksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_odb_networks(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/odbNetworks" + % client.transport._host, + args[1], + ) + + +def test_list_odb_networks_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_odb_networks( + odb_network.ListOdbNetworksRequest(), + parent="parent_value", + ) + + +def test_list_odb_networks_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + ], + next_page_token="abc", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[], + next_page_token="def", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + ], + next_page_token="ghi", + ), + odb_network.ListOdbNetworksResponse( + odb_networks=[ + odb_network.OdbNetwork(), + odb_network.OdbNetwork(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + odb_network.ListOdbNetworksResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_odb_networks(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, odb_network.OdbNetwork) for i in results) + + pages = list(client.list_odb_networks(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_odb_network_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_odb_network in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_odb_network] = mock_rpc + + request = {} + client.get_odb_network(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_odb_network(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_odb_network_rest_required_fields( + request_type=odb_network.GetOdbNetworkRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_odb_network._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_odb_network._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = odb_network.OdbNetwork() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = odb_network.OdbNetwork.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_odb_network(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_odb_network_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_odb_network._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_odb_network_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = odb_network.OdbNetwork() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/odbNetworks/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = odb_network.OdbNetwork.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_odb_network(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/odbNetworks/*}" + % client.transport._host, + args[1], + ) + + +def test_get_odb_network_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_odb_network( + odb_network.GetOdbNetworkRequest(), + name="name_value", + ) + + +def test_create_odb_network_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_odb_network in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_odb_network + ] = mock_rpc + + request = {} + client.create_odb_network(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_odb_network(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_odb_network_rest_required_fields( + request_type=gco_odb_network.CreateOdbNetworkRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["odb_network_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "odbNetworkId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_odb_network._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "odbNetworkId" in jsonified_request + assert jsonified_request["odbNetworkId"] == request_init["odb_network_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["odbNetworkId"] = "odb_network_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_odb_network._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "odb_network_id", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "odbNetworkId" in jsonified_request + assert jsonified_request["odbNetworkId"] == "odb_network_id_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_odb_network(request) + + expected_params = [ + ( + "odbNetworkId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_odb_network_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_odb_network._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "odbNetworkId", + "requestId", + ) + ) + & set( + ( + "parent", + "odbNetworkId", + "odbNetwork", + ) + ) + ) + + +def test_create_odb_network_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + odb_network=gco_odb_network.OdbNetwork(name="name_value"), + odb_network_id="odb_network_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_odb_network(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/odbNetworks" + % client.transport._host, + args[1], + ) + + +def test_create_odb_network_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_odb_network( + gco_odb_network.CreateOdbNetworkRequest(), + parent="parent_value", + odb_network=gco_odb_network.OdbNetwork(name="name_value"), + odb_network_id="odb_network_id_value", + ) + + +def test_delete_odb_network_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_odb_network in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_odb_network + ] = mock_rpc + + request = {} + client.delete_odb_network(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_odb_network(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_odb_network_rest_required_fields( + request_type=odb_network.DeleteOdbNetworkRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_odb_network._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_odb_network._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("request_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_odb_network(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_odb_network_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_odb_network._get_unset_required_fields({}) + assert set(unset_fields) == (set(("requestId",)) & set(("name",))) + + +def test_delete_odb_network_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/odbNetworks/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_odb_network(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/odbNetworks/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_odb_network_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_odb_network( + odb_network.DeleteOdbNetworkRequest(), + name="name_value", + ) + + +def test_list_odb_subnets_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_odb_subnets in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_odb_subnets + ] = mock_rpc + + request = {} + client.list_odb_subnets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_odb_subnets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_odb_subnets_rest_required_fields( + request_type=odb_subnet.ListOdbSubnetsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_odb_subnets._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_odb_subnets._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = odb_subnet.ListOdbSubnetsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = odb_subnet.ListOdbSubnetsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_odb_subnets(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_odb_subnets_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_odb_subnets._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_odb_subnets_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = odb_subnet.ListOdbSubnetsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/odbNetworks/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = odb_subnet.ListOdbSubnetsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_odb_subnets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*/odbNetworks/*}/odbSubnets" + % client.transport._host, + args[1], + ) + + +def test_list_odb_subnets_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_odb_subnets( + odb_subnet.ListOdbSubnetsRequest(), + parent="parent_value", + ) + + +def test_list_odb_subnets_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + ], + next_page_token="abc", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[], + next_page_token="def", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + ], + next_page_token="ghi", + ), + odb_subnet.ListOdbSubnetsResponse( + odb_subnets=[ + odb_subnet.OdbSubnet(), + odb_subnet.OdbSubnet(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(odb_subnet.ListOdbSubnetsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/locations/sample2/odbNetworks/sample3" + } + + pager = client.list_odb_subnets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, odb_subnet.OdbSubnet) for i in results) + + pages = list(client.list_odb_subnets(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_odb_subnet_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_odb_subnet in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_odb_subnet] = mock_rpc + + request = {} + client.get_odb_subnet(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_odb_subnet(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_odb_subnet_rest_required_fields( + request_type=odb_subnet.GetOdbSubnetRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_odb_subnet._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_odb_subnet._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = odb_subnet.OdbSubnet() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = odb_subnet.OdbSubnet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_odb_subnet(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_odb_subnet_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_odb_subnet._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_odb_subnet_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = odb_subnet.OdbSubnet() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/odbNetworks/sample3/odbSubnets/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = odb_subnet.OdbSubnet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_odb_subnet(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/odbNetworks/*/odbSubnets/*}" + % client.transport._host, + args[1], + ) + + +def test_get_odb_subnet_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_odb_subnet( + odb_subnet.GetOdbSubnetRequest(), + name="name_value", + ) + + +def test_create_odb_subnet_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_odb_subnet in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_odb_subnet + ] = mock_rpc + + request = {} + client.create_odb_subnet(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_odb_subnet(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_odb_subnet_rest_required_fields( + request_type=gco_odb_subnet.CreateOdbSubnetRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["odb_subnet_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "odbSubnetId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_odb_subnet._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "odbSubnetId" in jsonified_request + assert jsonified_request["odbSubnetId"] == request_init["odb_subnet_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["odbSubnetId"] = "odb_subnet_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_odb_subnet._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "odb_subnet_id", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "odbSubnetId" in jsonified_request + assert jsonified_request["odbSubnetId"] == "odb_subnet_id_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_odb_subnet(request) + + expected_params = [ + ( + "odbSubnetId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_odb_subnet_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_odb_subnet._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "odbSubnetId", + "requestId", + ) + ) + & set( + ( + "parent", + "odbSubnetId", + "odbSubnet", + ) + ) + ) + + +def test_create_odb_subnet_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/odbNetworks/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + odb_subnet=gco_odb_subnet.OdbSubnet(name="name_value"), + odb_subnet_id="odb_subnet_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_odb_subnet(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*/odbNetworks/*}/odbSubnets" + % client.transport._host, + args[1], + ) + + +def test_create_odb_subnet_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_odb_subnet( + gco_odb_subnet.CreateOdbSubnetRequest(), + parent="parent_value", + odb_subnet=gco_odb_subnet.OdbSubnet(name="name_value"), + odb_subnet_id="odb_subnet_id_value", + ) + + +def test_delete_odb_subnet_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_odb_subnet in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_odb_subnet + ] = mock_rpc + + request = {} + client.delete_odb_subnet(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_odb_subnet(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_odb_subnet_rest_required_fields( + request_type=odb_subnet.DeleteOdbSubnetRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_odb_subnet._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_odb_subnet._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("request_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_odb_subnet(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_odb_subnet_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_odb_subnet._get_unset_required_fields({}) + assert set(unset_fields) == (set(("requestId",)) & set(("name",))) + + +def test_delete_odb_subnet_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/odbNetworks/sample3/odbSubnets/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_odb_subnet(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/odbNetworks/*/odbSubnets/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_odb_subnet_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_odb_subnet( + odb_subnet.DeleteOdbSubnetRequest(), + name="name_value", + ) + + +def test_list_exadb_vm_clusters_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_exadb_vm_clusters + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_exadb_vm_clusters + ] = mock_rpc + + request = {} + client.list_exadb_vm_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_exadb_vm_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_exadb_vm_clusters_rest_required_fields( + request_type=oracledatabase.ListExadbVmClustersRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_exadb_vm_clusters._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_exadb_vm_clusters._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListExadbVmClustersResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListExadbVmClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_exadb_vm_clusters(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_exadb_vm_clusters_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_exadb_vm_clusters._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_exadb_vm_clusters_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListExadbVmClustersResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = oracledatabase.ListExadbVmClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_exadb_vm_clusters(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/exadbVmClusters" + % client.transport._host, + args[1], + ) + + +def test_list_exadb_vm_clusters_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_exadb_vm_clusters( + oracledatabase.ListExadbVmClustersRequest(), + parent="parent_value", + ) + + +def test_list_exadb_vm_clusters_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="abc", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[], + next_page_token="def", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + ], + next_page_token="ghi", + ), + oracledatabase.ListExadbVmClustersResponse( + exadb_vm_clusters=[ + exadb_vm_cluster.ExadbVmCluster(), + exadb_vm_cluster.ExadbVmCluster(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + oracledatabase.ListExadbVmClustersResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_exadb_vm_clusters(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, exadb_vm_cluster.ExadbVmCluster) for i in results) + + pages = list(client.list_exadb_vm_clusters(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_exadb_vm_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_exadb_vm_cluster in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_exadb_vm_cluster + ] = mock_rpc + + request = {} + client.get_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_exadb_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_exadb_vm_cluster_rest_required_fields( + request_type=oracledatabase.GetExadbVmClusterRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_exadb_vm_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_exadb_vm_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = exadb_vm_cluster.ExadbVmCluster() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = exadb_vm_cluster.ExadbVmCluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_exadb_vm_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_exadb_vm_cluster_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_exadb_vm_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_exadb_vm_cluster_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = exadb_vm_cluster.ExadbVmCluster() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = exadb_vm_cluster.ExadbVmCluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_exadb_vm_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/exadbVmClusters/*}" + % client.transport._host, + args[1], + ) + + +def test_get_exadb_vm_cluster_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_exadb_vm_cluster( + oracledatabase.GetExadbVmClusterRequest(), + name="name_value", + ) + + +def test_create_exadb_vm_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_exadb_vm_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_exadb_vm_cluster + ] = mock_rpc + + request = {} + client.create_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_exadb_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_exadb_vm_cluster_rest_required_fields( + request_type=oracledatabase.CreateExadbVmClusterRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["exadb_vm_cluster_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "exadbVmClusterId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_exadb_vm_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "exadbVmClusterId" in jsonified_request + assert jsonified_request["exadbVmClusterId"] == request_init["exadb_vm_cluster_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["exadbVmClusterId"] = "exadb_vm_cluster_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_exadb_vm_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "exadb_vm_cluster_id", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "exadbVmClusterId" in jsonified_request + assert jsonified_request["exadbVmClusterId"] == "exadb_vm_cluster_id_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_exadb_vm_cluster(request) + + expected_params = [ + ( + "exadbVmClusterId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_exadb_vm_cluster_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_exadb_vm_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "exadbVmClusterId", + "requestId", + ) + ) + & set( + ( + "parent", + "exadbVmClusterId", + "exadbVmCluster", + ) + ) + ) + + +def test_create_exadb_vm_cluster_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_exadb_vm_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/exadbVmClusters" + % client.transport._host, + args[1], + ) + + +def test_create_exadb_vm_cluster_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_exadb_vm_cluster( + oracledatabase.CreateExadbVmClusterRequest(), + parent="parent_value", + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + exadb_vm_cluster_id="exadb_vm_cluster_id_value", + ) + + +def test_delete_exadb_vm_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_exadb_vm_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_exadb_vm_cluster + ] = mock_rpc + + request = {} + client.delete_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_exadb_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_exadb_vm_cluster_rest_required_fields( + request_type=oracledatabase.DeleteExadbVmClusterRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_exadb_vm_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_exadb_vm_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("request_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_exadb_vm_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_exadb_vm_cluster_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_exadb_vm_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(("requestId",)) & set(("name",))) + + +def test_delete_exadb_vm_cluster_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_exadb_vm_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/exadbVmClusters/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_exadb_vm_cluster_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_exadb_vm_cluster( + oracledatabase.DeleteExadbVmClusterRequest(), + name="name_value", + ) + + +def test_update_exadb_vm_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_exadb_vm_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_exadb_vm_cluster + ] = mock_rpc + + request = {} + client.update_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_exadb_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_exadb_vm_cluster_rest_required_fields( + request_type=oracledatabase.UpdateExadbVmClusterRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_exadb_vm_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_exadb_vm_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "request_id", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_exadb_vm_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_exadb_vm_cluster_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_exadb_vm_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "requestId", + "updateMask", + ) + ) + & set(("exadbVmCluster",)) + ) + + +def test_update_exadb_vm_cluster_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "exadb_vm_cluster": { + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_exadb_vm_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{exadb_vm_cluster.name=projects/*/locations/*/exadbVmClusters/*}" + % client.transport._host, + args[1], + ) + + +def test_update_exadb_vm_cluster_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_exadb_vm_cluster( + oracledatabase.UpdateExadbVmClusterRequest(), + exadb_vm_cluster=gco_exadb_vm_cluster.ExadbVmCluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_remove_virtual_machine_exadb_vm_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.remove_virtual_machine_exadb_vm_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.remove_virtual_machine_exadb_vm_cluster + ] = mock_rpc + + request = {} + client.remove_virtual_machine_exadb_vm_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.remove_virtual_machine_exadb_vm_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_remove_virtual_machine_exadb_vm_cluster_rest_required_fields( + request_type=oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request_init["hostnames"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).remove_virtual_machine_exadb_vm_cluster._get_unset_required_fields( + jsonified_request + ) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["hostnames"] = "hostnames_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).remove_virtual_machine_exadb_vm_cluster._get_unset_required_fields( + jsonified_request + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "hostnames" in jsonified_request + assert jsonified_request["hostnames"] == "hostnames_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.remove_virtual_machine_exadb_vm_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_remove_virtual_machine_exadb_vm_cluster_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.remove_virtual_machine_exadb_vm_cluster._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "hostnames", + ) + ) + ) + + +def test_remove_virtual_machine_exadb_vm_cluster_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + hostnames=["hostnames_value"], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.remove_virtual_machine_exadb_vm_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/exadbVmClusters/*}:removeVirtualMachine" + % client.transport._host, + args[1], + ) + + +def test_remove_virtual_machine_exadb_vm_cluster_rest_flattened_error( + transport: str = "rest", +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_virtual_machine_exadb_vm_cluster( + oracledatabase.RemoveVirtualMachineExadbVmClusterRequest(), + name="name_value", + hostnames=["hostnames_value"], + ) + + +def test_list_exascale_db_storage_vaults_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_exascale_db_storage_vaults + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_exascale_db_storage_vaults + ] = mock_rpc + + request = {} + client.list_exascale_db_storage_vaults(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_exascale_db_storage_vaults(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_exascale_db_storage_vaults_rest_required_fields( + request_type=exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_exascale_db_storage_vaults._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_exascale_db_storage_vaults._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse.pb( + return_value + ) + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_exascale_db_storage_vaults(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_exascale_db_storage_vaults_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_exascale_db_storage_vaults._get_unset_required_fields( + {} + ) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_exascale_db_storage_vaults_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_exascale_db_storage_vaults(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/exascaleDbStorageVaults" + % client.transport._host, + args[1], + ) + + +def test_list_exascale_db_storage_vaults_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_exascale_db_storage_vaults( + exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest(), + parent="parent_value", + ) + + +def test_list_exascale_db_storage_vaults_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + next_page_token="abc", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[], + next_page_token="def", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + next_page_token="ghi", + ), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + exascale_db_storage_vaults=[ + exascale_db_storage_vault.ExascaleDbStorageVault(), + exascale_db_storage_vault.ExascaleDbStorageVault(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_exascale_db_storage_vaults(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, exascale_db_storage_vault.ExascaleDbStorageVault) + for i in results + ) + + pages = list( + client.list_exascale_db_storage_vaults(request=sample_request).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_exascale_db_storage_vault_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_exascale_db_storage_vault + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_exascale_db_storage_vault + ] = mock_rpc + + request = {} + client.get_exascale_db_storage_vault(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_exascale_db_storage_vault(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_exascale_db_storage_vault_rest_required_fields( + request_type=exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_exascale_db_storage_vault._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_exascale_db_storage_vault._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = exascale_db_storage_vault.ExascaleDbStorageVault() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = exascale_db_storage_vault.ExascaleDbStorageVault.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_exascale_db_storage_vault(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_exascale_db_storage_vault_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_exascale_db_storage_vault._get_unset_required_fields( + {} + ) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_exascale_db_storage_vault_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = exascale_db_storage_vault.ExascaleDbStorageVault() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/exascaleDbStorageVaults/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = exascale_db_storage_vault.ExascaleDbStorageVault.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_exascale_db_storage_vault(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/exascaleDbStorageVaults/*}" + % client.transport._host, + args[1], + ) + + +def test_get_exascale_db_storage_vault_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_exascale_db_storage_vault( + exascale_db_storage_vault.GetExascaleDbStorageVaultRequest(), + name="name_value", + ) + + +def test_create_exascale_db_storage_vault_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_exascale_db_storage_vault + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_exascale_db_storage_vault + ] = mock_rpc + + request = {} + client.create_exascale_db_storage_vault(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_exascale_db_storage_vault(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_exascale_db_storage_vault_rest_required_fields( + request_type=gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["exascale_db_storage_vault_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "exascaleDbStorageVaultId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_exascale_db_storage_vault._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "exascaleDbStorageVaultId" in jsonified_request + assert ( + jsonified_request["exascaleDbStorageVaultId"] + == request_init["exascale_db_storage_vault_id"] + ) + + jsonified_request["parent"] = "parent_value" + jsonified_request["exascaleDbStorageVaultId"] = "exascale_db_storage_vault_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_exascale_db_storage_vault._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "exascale_db_storage_vault_id", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "exascaleDbStorageVaultId" in jsonified_request + assert ( + jsonified_request["exascaleDbStorageVaultId"] + == "exascale_db_storage_vault_id_value" + ) + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_exascale_db_storage_vault(request) + + expected_params = [ + ( + "exascaleDbStorageVaultId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_exascale_db_storage_vault_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.create_exascale_db_storage_vault._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set( + ( + "exascaleDbStorageVaultId", + "requestId", + ) + ) + & set( + ( + "parent", + "exascaleDbStorageVaultId", + "exascaleDbStorageVault", + ) + ) + ) + + +def test_create_exascale_db_storage_vault_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + exascale_db_storage_vault=gco_exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value" + ), + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_exascale_db_storage_vault(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/exascaleDbStorageVaults" + % client.transport._host, + args[1], + ) + + +def test_create_exascale_db_storage_vault_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_exascale_db_storage_vault( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest(), + parent="parent_value", + exascale_db_storage_vault=gco_exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value" + ), + exascale_db_storage_vault_id="exascale_db_storage_vault_id_value", + ) + + +def test_delete_exascale_db_storage_vault_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_exascale_db_storage_vault + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_exascale_db_storage_vault + ] = mock_rpc + + request = {} + client.delete_exascale_db_storage_vault(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_exascale_db_storage_vault(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_exascale_db_storage_vault_rest_required_fields( + request_type=exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_exascale_db_storage_vault._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_exascale_db_storage_vault._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("request_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_exascale_db_storage_vault(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_exascale_db_storage_vault_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.delete_exascale_db_storage_vault._get_unset_required_fields({}) + ) + assert set(unset_fields) == (set(("requestId",)) & set(("name",))) + + +def test_delete_exascale_db_storage_vault_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/exascaleDbStorageVaults/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_exascale_db_storage_vault(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/exascaleDbStorageVaults/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_exascale_db_storage_vault_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_exascale_db_storage_vault( + exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest(), + name="name_value", + ) + + +def test_list_db_system_initial_storage_sizes_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_db_system_initial_storage_sizes + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_db_system_initial_storage_sizes + ] = mock_rpc + + request = {} + client.list_db_system_initial_storage_sizes(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_db_system_initial_storage_sizes(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_db_system_initial_storage_sizes_rest_required_fields( + request_type=db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_system_initial_storage_sizes._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_system_initial_storage_sizes._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_db_system_initial_storage_sizes(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_db_system_initial_storage_sizes_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.list_db_system_initial_storage_sizes._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_db_system_initial_storage_sizes_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse.pb( + return_value + ) + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_db_system_initial_storage_sizes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/dbSystemInitialStorageSizes" + % client.transport._host, + args[1], + ) + + +def test_list_db_system_initial_storage_sizes_rest_flattened_error( + transport: str = "rest", +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_db_system_initial_storage_sizes( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest(), + parent="parent_value", + ) + + +def test_list_db_system_initial_storage_sizes_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="abc", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[], + next_page_token="def", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + next_page_token="ghi", + ), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + db_system_initial_storage_sizes=[ + db_system_initial_storage_size.DbSystemInitialStorageSize(), + db_system_initial_storage_size.DbSystemInitialStorageSize(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse.to_json( + x + ) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_db_system_initial_storage_sizes(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, db_system_initial_storage_size.DbSystemInitialStorageSize) + for i in results + ) + + pages = list( + client.list_db_system_initial_storage_sizes(request=sample_request).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_databases_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_databases in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_databases] = mock_rpc + + request = {} + client.list_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_databases(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_databases_rest_required_fields( + request_type=database.ListDatabasesRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_databases._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_databases._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = database.ListDatabasesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = database.ListDatabasesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_databases(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_databases_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_databases._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_databases_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = database.ListDatabasesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = database.ListDatabasesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_databases(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/databases" % client.transport._host, + args[1], + ) + + +def test_list_databases_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_databases( + database.ListDatabasesRequest(), + parent="parent_value", + ) + + +def test_list_databases_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + database.Database(), + ], + next_page_token="abc", + ), + database.ListDatabasesResponse( + databases=[], + next_page_token="def", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + ], + next_page_token="ghi", + ), + database.ListDatabasesResponse( + databases=[ + database.Database(), + database.Database(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(database.ListDatabasesResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_databases(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, database.Database) for i in results) + + pages = list(client.list_databases(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_database in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_database] = mock_rpc + + request = {} + client.get_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_database_rest_required_fields(request_type=database.GetDatabaseRequest): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = database.Database() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = database.Database.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_database._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = database.Database() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/databases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = database.Database.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/databases/*}" % client.transport._host, + args[1], + ) + + +def test_get_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_database( + database.GetDatabaseRequest(), + name="name_value", + ) + + +def test_list_pluggable_databases_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_pluggable_databases + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_pluggable_databases + ] = mock_rpc + + request = {} + client.list_pluggable_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_pluggable_databases(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_pluggable_databases_rest_required_fields( + request_type=pluggable_database.ListPluggableDatabasesRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_pluggable_databases._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_pluggable_databases._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = pluggable_database.ListPluggableDatabasesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = pluggable_database.ListPluggableDatabasesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_pluggable_databases(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_pluggable_databases_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_pluggable_databases._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_pluggable_databases_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = pluggable_database.ListPluggableDatabasesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = pluggable_database.ListPluggableDatabasesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_pluggable_databases(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/pluggableDatabases" + % client.transport._host, + args[1], + ) + + +def test_list_pluggable_databases_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_pluggable_databases( + pluggable_database.ListPluggableDatabasesRequest(), + parent="parent_value", + ) + + +def test_list_pluggable_databases_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + next_page_token="abc", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[], + next_page_token="def", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + ], + next_page_token="ghi", + ), + pluggable_database.ListPluggableDatabasesResponse( + pluggable_databases=[ + pluggable_database.PluggableDatabase(), + pluggable_database.PluggableDatabase(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + pluggable_database.ListPluggableDatabasesResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_pluggable_databases(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, pluggable_database.PluggableDatabase) for i in results) + + pages = list(client.list_pluggable_databases(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_pluggable_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_pluggable_database + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_pluggable_database + ] = mock_rpc + + request = {} + client.get_pluggable_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_pluggable_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_pluggable_database_rest_required_fields( + request_type=pluggable_database.GetPluggableDatabaseRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_pluggable_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_pluggable_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = pluggable_database.PluggableDatabase() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = pluggable_database.PluggableDatabase.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_pluggable_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_pluggable_database_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_pluggable_database._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_pluggable_database_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = pluggable_database.PluggableDatabase() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/pluggableDatabases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = pluggable_database.PluggableDatabase.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_pluggable_database(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/pluggableDatabases/*}" + % client.transport._host, + args[1], + ) + + +def test_get_pluggable_database_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_pluggable_database( + pluggable_database.GetPluggableDatabaseRequest(), + name="name_value", + ) + + +def test_list_db_systems_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_db_systems in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_db_systems] = mock_rpc + + request = {} + client.list_db_systems(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_db_systems(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_db_systems_rest_required_fields( + request_type=db_system.ListDbSystemsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_systems._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_systems._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = db_system.ListDbSystemsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = db_system.ListDbSystemsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_db_systems(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_db_systems_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_db_systems._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_db_systems_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = db_system.ListDbSystemsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = db_system.ListDbSystemsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_db_systems(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/dbSystems" % client.transport._host, + args[1], + ) + + +def test_list_db_systems_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_db_systems( + db_system.ListDbSystemsRequest(), + parent="parent_value", + ) + + +def test_list_db_systems_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + db_system.DbSystem(), + ], + next_page_token="abc", + ), + db_system.ListDbSystemsResponse( + db_systems=[], + next_page_token="def", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + ], + next_page_token="ghi", + ), + db_system.ListDbSystemsResponse( + db_systems=[ + db_system.DbSystem(), + db_system.DbSystem(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(db_system.ListDbSystemsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_db_systems(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, db_system.DbSystem) for i in results) + + pages = list(client.list_db_systems(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_db_system_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_db_system in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_db_system] = mock_rpc + + request = {} + client.get_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_db_system(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_db_system_rest_required_fields(request_type=db_system.GetDbSystemRequest): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_db_system._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_db_system._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = db_system.DbSystem() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = db_system.DbSystem.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_db_system(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_db_system_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_db_system._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_db_system_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = db_system.DbSystem() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/dbSystems/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = db_system.DbSystem.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_db_system(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/dbSystems/*}" % client.transport._host, + args[1], + ) + + +def test_get_db_system_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_db_system( + db_system.GetDbSystemRequest(), + name="name_value", + ) + + +def test_create_db_system_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_db_system in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_db_system + ] = mock_rpc + + request = {} + client.create_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_db_system(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_db_system_rest_required_fields( + request_type=gco_db_system.CreateDbSystemRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["db_system_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "dbSystemId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_db_system._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "dbSystemId" in jsonified_request + assert jsonified_request["dbSystemId"] == request_init["db_system_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["dbSystemId"] = "db_system_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_db_system._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "db_system_id", + "request_id", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "dbSystemId" in jsonified_request + assert jsonified_request["dbSystemId"] == "db_system_id_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_db_system(request) + + expected_params = [ + ( + "dbSystemId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_db_system_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_db_system._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "dbSystemId", + "requestId", + ) + ) + & set( + ( + "parent", + "dbSystemId", + "dbSystem", + ) + ) + ) + + +def test_create_db_system_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + db_system=gco_db_system.DbSystem(name="name_value"), + db_system_id="db_system_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_db_system(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/dbSystems" % client.transport._host, + args[1], + ) + + +def test_create_db_system_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_db_system( + gco_db_system.CreateDbSystemRequest(), + parent="parent_value", + db_system=gco_db_system.DbSystem(name="name_value"), + db_system_id="db_system_id_value", + ) + + +def test_delete_db_system_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_db_system in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_db_system + ] = mock_rpc + + request = {} + client.delete_db_system(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_db_system(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_db_system_rest_required_fields( + request_type=db_system.DeleteDbSystemRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_db_system._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_db_system._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("request_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_db_system(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_db_system_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_db_system._get_unset_required_fields({}) + assert set(unset_fields) == (set(("requestId",)) & set(("name",))) + + +def test_delete_db_system_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/dbSystems/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_db_system(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/dbSystems/*}" % client.transport._host, + args[1], + ) + + +def test_delete_db_system_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_db_system( + db_system.DeleteDbSystemRequest(), + name="name_value", + ) + + +def test_list_db_versions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_db_versions in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_db_versions + ] = mock_rpc + + request = {} + client.list_db_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_db_versions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_db_versions_rest_required_fields( + request_type=db_version.ListDbVersionsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_versions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_db_versions._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = db_version.ListDbVersionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = db_version.ListDbVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_db_versions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_db_versions_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_db_versions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_db_versions_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = db_version.ListDbVersionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = db_version.ListDbVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_db_versions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/dbVersions" % client.transport._host, + args[1], + ) + + +def test_list_db_versions_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_db_versions( + db_version.ListDbVersionsRequest(), + parent="parent_value", + ) + + +def test_list_db_versions_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + db_version.DbVersion(), + ], + next_page_token="abc", + ), + db_version.ListDbVersionsResponse( + db_versions=[], + next_page_token="def", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + ], + next_page_token="ghi", + ), + db_version.ListDbVersionsResponse( + db_versions=[ + db_version.DbVersion(), + db_version.DbVersion(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(db_version.ListDbVersionsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_db_versions(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, db_version.DbVersion) for i in results) + + pages = list(client.list_db_versions(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_database_character_sets_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_database_character_sets + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_database_character_sets + ] = mock_rpc + + request = {} + client.list_database_character_sets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_database_character_sets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_database_character_sets_rest_required_fields( + request_type=database_character_set.ListDatabaseCharacterSetsRequest, +): + transport_class = transports.OracleDatabaseRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_database_character_sets._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_database_character_sets._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = database_character_set.ListDatabaseCharacterSetsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = database_character_set.ListDatabaseCharacterSetsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_database_character_sets(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_database_character_sets_rest_unset_required_fields(): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_database_character_sets._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_database_character_sets_rest_flattened(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = database_character_set.ListDatabaseCharacterSetsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = database_character_set.ListDatabaseCharacterSetsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_database_character_sets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/databaseCharacterSets" + % client.transport._host, + args[1], + ) + + +def test_list_database_character_sets_rest_flattened_error(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_database_character_sets( + database_character_set.ListDatabaseCharacterSetsRequest(), + parent="parent_value", + ) + + +def test_list_database_character_sets_rest_pager(transport: str = "rest"): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="abc", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[], + next_page_token="def", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + ], + next_page_token="ghi", + ), + database_character_set.ListDatabaseCharacterSetsResponse( + database_character_sets=[ + database_character_set.DatabaseCharacterSet(), + database_character_set.DatabaseCharacterSet(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + database_character_set.ListDatabaseCharacterSetsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_database_character_sets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, database_character_set.DatabaseCharacterSet) for i in results + ) + + pages = list(client.list_database_character_sets(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.OracleDatabaseGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.OracleDatabaseGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = OracleDatabaseClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.OracleDatabaseGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = OracleDatabaseClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = OracleDatabaseClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.OracleDatabaseGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = OracleDatabaseClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.OracleDatabaseGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = OracleDatabaseClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.OracleDatabaseGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.OracleDatabaseGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.OracleDatabaseGrpcTransport, + transports.OracleDatabaseGrpcAsyncIOTransport, + transports.OracleDatabaseRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = OracleDatabaseClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_cloud_exadata_infrastructures_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_cloud_exadata_infrastructures), "__call__" + ) as call: + call.return_value = oracledatabase.ListCloudExadataInfrastructuresResponse() + client.list_cloud_exadata_infrastructures(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListCloudExadataInfrastructuresRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cloud_exadata_infrastructure_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_cloud_exadata_infrastructure), "__call__" + ) as call: + call.return_value = exadata_infra.CloudExadataInfrastructure() + client.get_cloud_exadata_infrastructure(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetCloudExadataInfrastructureRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cloud_exadata_infrastructure_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_cloud_exadata_infrastructure), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_cloud_exadata_infrastructure(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateCloudExadataInfrastructureRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cloud_exadata_infrastructure_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_cloud_exadata_infrastructure), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_cloud_exadata_infrastructure(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteCloudExadataInfrastructureRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_cloud_vm_clusters_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_cloud_vm_clusters), "__call__" + ) as call: + call.return_value = oracledatabase.ListCloudVmClustersResponse() + client.list_cloud_vm_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListCloudVmClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cloud_vm_cluster_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_cloud_vm_cluster), "__call__" + ) as call: + call.return_value = vm_cluster.CloudVmCluster() + client.get_cloud_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetCloudVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cloud_vm_cluster_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_cloud_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_cloud_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateCloudVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cloud_vm_cluster_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_cloud_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_cloud_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteCloudVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_entitlements_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_entitlements), "__call__" + ) as call: + call.return_value = oracledatabase.ListEntitlementsResponse() + client.list_entitlements(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListEntitlementsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_db_servers_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_servers), "__call__") as call: + call.return_value = oracledatabase.ListDbServersResponse() + client.list_db_servers(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListDbServersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_db_nodes_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_nodes), "__call__") as call: + call.return_value = oracledatabase.ListDbNodesResponse() + client.list_db_nodes(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListDbNodesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_gi_versions_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_gi_versions), "__call__") as call: + call.return_value = oracledatabase.ListGiVersionsResponse() + client.list_gi_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListGiVersionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_minor_versions_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + call.return_value = minor_version.ListMinorVersionsResponse() + client.list_minor_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = minor_version.ListMinorVersionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_db_system_shapes_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_db_system_shapes), "__call__" + ) as call: + call.return_value = oracledatabase.ListDbSystemShapesResponse() + client.list_db_system_shapes(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListDbSystemShapesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_autonomous_databases_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_databases), "__call__" + ) as call: + call.return_value = oracledatabase.ListAutonomousDatabasesResponse() + client.list_autonomous_databases(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDatabasesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_autonomous_database), "__call__" + ) as call: + call.return_value = autonomous_database.AutonomousDatabase() + client.get_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.UpdateAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_restore_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.restore_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.restore_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.RestoreAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_autonomous_database_wallet_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_autonomous_database_wallet), "__call__" + ) as call: + call.return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse() + client.generate_autonomous_database_wallet(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GenerateAutonomousDatabaseWalletRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_autonomous_db_versions_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_db_versions), "__call__" + ) as call: + call.return_value = oracledatabase.ListAutonomousDbVersionsResponse() + client.list_autonomous_db_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDbVersionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_autonomous_database_character_sets_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_database_character_sets), "__call__" + ) as call: + call.return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() + client.list_autonomous_database_character_sets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_autonomous_database_backups_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_database_backups), "__call__" + ) as call: + call.return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse() + client.list_autonomous_database_backups(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDatabaseBackupsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_stop_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.stop_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.stop_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.StopAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_start_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.start_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.start_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.StartAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_restart_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.restart_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.restart_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.RestartAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_switchover_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.switchover_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.SwitchoverAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_failover_autonomous_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.failover_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.FailoverAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_odb_networks_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + call.return_value = odb_network.ListOdbNetworksResponse() + client.list_odb_networks(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_network.ListOdbNetworksRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_odb_network_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + call.return_value = odb_network.OdbNetwork() + client.get_odb_network(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_network.GetOdbNetworkRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_odb_network_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_odb_network(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = gco_odb_network.CreateOdbNetworkRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_odb_network_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_network), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_odb_network(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_network.DeleteOdbNetworkRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_odb_subnets_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + call.return_value = odb_subnet.ListOdbSubnetsResponse() + client.list_odb_subnets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_subnet.ListOdbSubnetsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_odb_subnet_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + call.return_value = odb_subnet.OdbSubnet() + client.get_odb_subnet(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_subnet.GetOdbSubnetRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_odb_subnet_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_subnet), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_odb_subnet(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = gco_odb_subnet.CreateOdbSubnetRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_odb_subnet_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_subnet), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_odb_subnet(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_subnet.DeleteOdbSubnetRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_exadb_vm_clusters_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + call.return_value = oracledatabase.ListExadbVmClustersResponse() + client.list_exadb_vm_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListExadbVmClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_exadb_vm_cluster_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = exadb_vm_cluster.ExadbVmCluster() + client.get_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_exadb_vm_cluster_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_exadb_vm_cluster_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_exadb_vm_cluster_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.UpdateExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_remove_virtual_machine_exadb_vm_cluster_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.remove_virtual_machine_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_exascale_db_storage_vaults_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + call.return_value = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() + ) + client.list_exascale_db_storage_vaults(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_exascale_db_storage_vault_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_exascale_db_storage_vault), "__call__" + ) as call: + call.return_value = exascale_db_storage_vault.ExascaleDbStorageVault() + client.get_exascale_db_storage_vault(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_exascale_db_storage_vault_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_exascale_db_storage_vault), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_exascale_db_storage_vault(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = ( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest() + ) + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_exascale_db_storage_vault_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_exascale_db_storage_vault), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_exascale_db_storage_vault(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_db_system_initial_storage_sizes_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_db_system_initial_storage_sizes), "__call__" + ) as call: + call.return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) + client.list_db_system_initial_storage_sizes(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest() + ) + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_databases_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + call.return_value = database.ListDatabasesResponse() + client.list_databases(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = database.ListDatabasesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + call.return_value = database.Database() + client.get_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = database.GetDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_pluggable_databases_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + call.return_value = pluggable_database.ListPluggableDatabasesResponse() + client.list_pluggable_databases(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = pluggable_database.ListPluggableDatabasesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_pluggable_database_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_pluggable_database), "__call__" + ) as call: + call.return_value = pluggable_database.PluggableDatabase() + client.get_pluggable_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = pluggable_database.GetPluggableDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_db_systems_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + call.return_value = db_system.ListDbSystemsResponse() + client.list_db_systems(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = db_system.ListDbSystemsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_db_system_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + call.return_value = db_system.DbSystem() + client.get_db_system(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = db_system.GetDbSystemRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_db_system_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_db_system(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = gco_db_system.CreateDbSystemRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_db_system_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_db_system(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = db_system.DeleteDbSystemRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_db_versions_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + call.return_value = db_version.ListDbVersionsResponse() + client.list_db_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = db_version.ListDbVersionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_database_character_sets_empty_call_grpc(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + call.return_value = database_character_set.ListDatabaseCharacterSetsResponse() + client.list_database_character_sets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = database_character_set.ListDatabaseCharacterSetsRequest() + + assert args[0] == request_msg + + +def test_transport_kind_grpc_asyncio(): + transport = OracleDatabaseAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_cloud_exadata_infrastructures_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_cloud_exadata_infrastructures), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListCloudExadataInfrastructuresResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_cloud_exadata_infrastructures(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListCloudExadataInfrastructuresRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_cloud_exadata_infrastructure_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_cloud_exadata_infrastructure), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exadata_infra.CloudExadataInfrastructure( + name="name_value", + display_name="display_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + entitlement_id="entitlement_id_value", + ) + ) + await client.get_cloud_exadata_infrastructure(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetCloudExadataInfrastructureRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_cloud_exadata_infrastructure_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_cloud_exadata_infrastructure), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_cloud_exadata_infrastructure(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateCloudExadataInfrastructureRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_cloud_exadata_infrastructure_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_cloud_exadata_infrastructure), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.delete_cloud_exadata_infrastructure(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteCloudExadataInfrastructureRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_cloud_vm_clusters_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_cloud_vm_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListCloudVmClustersResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_cloud_vm_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListCloudVmClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_cloud_vm_cluster_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_cloud_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vm_cluster.CloudVmCluster( + name="name_value", + exadata_infrastructure="exadata_infrastructure_value", + display_name="display_name_value", + cidr="cidr_value", + backup_subnet_cidr="backup_subnet_cidr_value", + network="network_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + backup_odb_subnet="backup_odb_subnet_value", + ) + ) + await client.get_cloud_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetCloudVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_cloud_vm_cluster_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_cloud_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_cloud_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateCloudVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_cloud_vm_cluster_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_cloud_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.delete_cloud_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteCloudVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_entitlements_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_entitlements), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListEntitlementsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_entitlements(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListEntitlementsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_db_servers_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_servers), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListDbServersResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_db_servers(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListDbServersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_db_nodes_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_nodes), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListDbNodesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_db_nodes(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListDbNodesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_gi_versions_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_gi_versions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListGiVersionsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_gi_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListGiVersionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_minor_versions_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_minor_versions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + minor_version.ListMinorVersionsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_minor_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = minor_version.ListMinorVersionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_db_system_shapes_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_db_system_shapes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListDbSystemShapesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_db_system_shapes(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListDbSystemShapesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_autonomous_databases_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_databases), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListAutonomousDatabasesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_autonomous_databases(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDatabasesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autonomous_database.AutonomousDatabase( + name="name_value", + database="database_value", + display_name="display_name_value", + entitlement_id="entitlement_id_value", + admin_password="admin_password_value", + network="network_value", + cidr="cidr_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + peer_autonomous_databases=["peer_autonomous_databases_value"], + disaster_recovery_supported_locations=[ + "disaster_recovery_supported_locations_value" + ], + ) + ) + await client.get_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.UpdateAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.delete_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_restore_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.restore_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.restore_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.RestoreAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_generate_autonomous_database_wallet_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_autonomous_database_wallet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.GenerateAutonomousDatabaseWalletResponse( + archive_content=b"archive_content_blob", + ) + ) + await client.generate_autonomous_database_wallet(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GenerateAutonomousDatabaseWalletRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_autonomous_db_versions_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_db_versions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListAutonomousDbVersionsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_autonomous_db_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDbVersionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_autonomous_database_character_sets_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_database_character_sets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_autonomous_database_character_sets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_autonomous_database_backups_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_database_backups), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListAutonomousDatabaseBackupsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_autonomous_database_backups(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDatabaseBackupsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_stop_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.stop_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.stop_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.StopAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_start_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.start_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.start_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.StartAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_restart_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.restart_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.restart_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.RestartAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_switchover_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.switchover_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.SwitchoverAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_failover_autonomous_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.failover_autonomous_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.FailoverAutonomousDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_odb_networks_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_network.ListOdbNetworksResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], + ) + ) + await client.list_odb_networks(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_network.ListOdbNetworksRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_odb_network_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_network.OdbNetwork( + name="name_value", + network="network_value", + state=odb_network.OdbNetwork.State.PROVISIONING, + entitlement_id="entitlement_id_value", + gcp_oracle_zone="gcp_oracle_zone_value", + ) + ) + await client.get_odb_network(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_network.GetOdbNetworkRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_odb_network_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_odb_network(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = gco_odb_network.CreateOdbNetworkRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_odb_network_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_network), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.delete_odb_network(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_network.DeleteOdbNetworkRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_odb_subnets_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_subnet.ListOdbSubnetsResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], + ) + ) + await client.list_odb_subnets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_subnet.ListOdbSubnetsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_odb_subnet_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + odb_subnet.OdbSubnet( + name="name_value", + cidr_range="cidr_range_value", + purpose=odb_subnet.OdbSubnet.Purpose.CLIENT_SUBNET, + state=odb_subnet.OdbSubnet.State.PROVISIONING, + ) + ) + await client.get_odb_subnet(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_subnet.GetOdbSubnetRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_odb_subnet_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_odb_subnet(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = gco_odb_subnet.CreateOdbSubnetRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_odb_subnet_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_odb_subnet), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.delete_odb_subnet(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_subnet.DeleteOdbSubnetRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_exadb_vm_clusters_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_exadb_vm_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + oracledatabase.ListExadbVmClustersResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_exadb_vm_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListExadbVmClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_exadb_vm_cluster_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exadb_vm_cluster.ExadbVmCluster( + name="name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + backup_odb_subnet="backup_odb_subnet_value", + display_name="display_name_value", + entitlement_id="entitlement_id_value", + ) + ) + await client.get_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_exadb_vm_cluster_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_exadb_vm_cluster_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.delete_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_exadb_vm_cluster_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.UpdateExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_remove_virtual_machine_exadb_vm_cluster_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.remove_virtual_machine_exadb_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_exascale_db_storage_vaults_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_exascale_db_storage_vaults(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_exascale_db_storage_vault_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + exascale_db_storage_vault.ExascaleDbStorageVault( + name="name_value", + display_name="display_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + entitlement_id="entitlement_id_value", + ) + ) + await client.get_exascale_db_storage_vault(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_exascale_db_storage_vault_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_exascale_db_storage_vault(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = ( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest() + ) + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_exascale_db_storage_vault_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_exascale_db_storage_vault), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.delete_exascale_db_storage_vault(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_db_system_initial_storage_sizes_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_db_system_initial_storage_sizes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_db_system_initial_storage_sizes(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest() + ) + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_databases_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + database.ListDatabasesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_databases(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = database.ListDatabasesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + database.Database( + name="name_value", + db_name="db_name_value", + db_unique_name="db_unique_name_value", + admin_password="admin_password_value", + tde_wallet_password="tde_wallet_password_value", + character_set="character_set_value", + ncharacter_set="ncharacter_set_value", + oci_url="oci_url_value", + database_id="database_id_value", + db_home_name="db_home_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + ops_insights_status=database.Database.OperationsInsightsStatus.ENABLING, + ) + ) + await client.get_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = database.GetDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_pluggable_databases_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_pluggable_databases), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pluggable_database.ListPluggableDatabasesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_pluggable_databases(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = pluggable_database.ListPluggableDatabasesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_pluggable_database_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_pluggable_database), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pluggable_database.PluggableDatabase( + name="name_value", + oci_url="oci_url_value", + ) + ) + await client.get_pluggable_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = pluggable_database.GetPluggableDatabaseRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_db_systems_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system.ListDbSystemsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_db_systems(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = db_system.ListDbSystemsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_db_system_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_system.DbSystem( + name="name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + entitlement_id="entitlement_id_value", + display_name="display_name_value", + oci_url="oci_url_value", + ) + ) + await client.get_db_system(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = db_system.GetDbSystemRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_db_system_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_db_system(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = gco_db_system.CreateDbSystemRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_db_system_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.delete_db_system(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = db_system.DeleteDbSystemRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_db_versions_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + db_version.ListDbVersionsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_db_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = db_version.ListDbVersionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_database_character_sets_empty_call_grpc_asyncio(): + client = OracleDatabaseAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_database_character_sets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + database_character_set.ListDatabaseCharacterSetsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_database_character_sets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = database_character_set.ListDatabaseCharacterSetsRequest() + + assert args[0] == request_msg + + +def test_transport_kind_rest(): + transport = OracleDatabaseClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + + +def test_list_cloud_exadata_infrastructures_rest_bad_request( + request_type=oracledatabase.ListCloudExadataInfrastructuresRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_cloud_exadata_infrastructures(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListCloudExadataInfrastructuresRequest, + dict, + ], +) +def test_list_cloud_exadata_infrastructures_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListCloudExadataInfrastructuresResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListCloudExadataInfrastructuresResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_cloud_exadata_infrastructures(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCloudExadataInfrastructuresPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_cloud_exadata_infrastructures_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_cloud_exadata_infrastructures", + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_cloud_exadata_infrastructures_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "pre_list_cloud_exadata_infrastructures", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListCloudExadataInfrastructuresRequest.pb( + oracledatabase.ListCloudExadataInfrastructuresRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListCloudExadataInfrastructuresResponse.to_json( + oracledatabase.ListCloudExadataInfrastructuresResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListCloudExadataInfrastructuresRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListCloudExadataInfrastructuresResponse() + post_with_metadata.return_value = ( + oracledatabase.ListCloudExadataInfrastructuresResponse(), + metadata, + ) + + client.list_cloud_exadata_infrastructures( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_cloud_exadata_infrastructure_rest_bad_request( + request_type=oracledatabase.GetCloudExadataInfrastructureRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_cloud_exadata_infrastructure(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.GetCloudExadataInfrastructureRequest, + dict, + ], +) +def test_get_cloud_exadata_infrastructure_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = exadata_infra.CloudExadataInfrastructure( + name="name_value", + display_name="display_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + entitlement_id="entitlement_id_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = exadata_infra.CloudExadataInfrastructure.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_cloud_exadata_infrastructure(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, exadata_infra.CloudExadataInfrastructure) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.entitlement_id == "entitlement_id_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_cloud_exadata_infrastructure_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_get_cloud_exadata_infrastructure", + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_get_cloud_exadata_infrastructure_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_get_cloud_exadata_infrastructure" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.GetCloudExadataInfrastructureRequest.pb( + oracledatabase.GetCloudExadataInfrastructureRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = exadata_infra.CloudExadataInfrastructure.to_json( + exadata_infra.CloudExadataInfrastructure() + ) + req.return_value.content = return_value + + request = oracledatabase.GetCloudExadataInfrastructureRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = exadata_infra.CloudExadataInfrastructure() + post_with_metadata.return_value = ( + exadata_infra.CloudExadataInfrastructure(), + metadata, + ) + + client.get_cloud_exadata_infrastructure( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_create_cloud_exadata_infrastructure_rest_bad_request( + request_type=oracledatabase.CreateCloudExadataInfrastructureRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_cloud_exadata_infrastructure(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.CreateCloudExadataInfrastructureRequest, + dict, + ], +) +def test_create_cloud_exadata_infrastructure_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["cloud_exadata_infrastructure"] = { + "name": "name_value", + "display_name": "display_name_value", + "gcp_oracle_zone": "gcp_oracle_zone_value", + "entitlement_id": "entitlement_id_value", + "properties": { + "ocid": "ocid_value", + "compute_count": 1413, + "storage_count": 1405, + "total_storage_size_gb": 2234, + "available_storage_size_gb": 2615, + "maintenance_window": { + "preference": 1, + "months": [1], + "weeks_of_month": [1497, 1498], + "days_of_week": [1], + "hours_of_day": [1283, 1284], + "lead_time_week": 1455, + "patching_mode": 1, + "custom_action_timeout_mins": 2804, + "is_custom_action_timeout_enabled": True, + }, + "state": 1, + "shape": "shape_value", + "oci_url": "oci_url_value", + "cpu_count": 976, + "max_cpu_count": 1397, + "memory_size_gb": 1499, + "max_memory_gb": 1382, + "db_node_storage_size_gb": 2401, + "max_db_node_storage_size_gb": 2822, + "data_storage_size_tb": 0.2109, + "max_data_storage_tb": 0.19920000000000002, + "activated_storage_count": 2449, + "additional_storage_count": 2549, + "db_server_version": "db_server_version_value", + "storage_server_version": "storage_server_version_value", + "next_maintenance_run_id": "next_maintenance_run_id_value", + "next_maintenance_run_time": {"seconds": 751, "nanos": 543}, + "next_security_maintenance_run_time": {}, + "customer_contacts": [{"email": "email_value"}], + "monthly_storage_server_version": "monthly_storage_server_version_value", + "monthly_db_server_version": "monthly_db_server_version_value", + "compute_model": 1, + "database_server_type": "database_server_type_value", + "storage_server_type": "storage_server_type_value", + }, + "labels": {}, + "create_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = oracledatabase.CreateCloudExadataInfrastructureRequest.meta.fields[ + "cloud_exadata_infrastructure" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "cloud_exadata_infrastructure" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range( + 0, len(request_init["cloud_exadata_infrastructure"][field]) + ): + del request_init["cloud_exadata_infrastructure"][field][i][subfield] + else: + del request_init["cloud_exadata_infrastructure"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_cloud_exadata_infrastructure(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_cloud_exadata_infrastructure_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_create_cloud_exadata_infrastructure", + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_create_cloud_exadata_infrastructure_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "pre_create_cloud_exadata_infrastructure", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.CreateCloudExadataInfrastructureRequest.pb( + oracledatabase.CreateCloudExadataInfrastructureRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.CreateCloudExadataInfrastructureRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_cloud_exadata_infrastructure( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_delete_cloud_exadata_infrastructure_rest_bad_request( + request_type=oracledatabase.DeleteCloudExadataInfrastructureRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_cloud_exadata_infrastructure(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.DeleteCloudExadataInfrastructureRequest, + dict, + ], +) +def test_delete_cloud_exadata_infrastructure_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_cloud_exadata_infrastructure(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_cloud_exadata_infrastructure_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_delete_cloud_exadata_infrastructure", + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_delete_cloud_exadata_infrastructure_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "pre_delete_cloud_exadata_infrastructure", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.DeleteCloudExadataInfrastructureRequest.pb( + oracledatabase.DeleteCloudExadataInfrastructureRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.DeleteCloudExadataInfrastructureRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.delete_cloud_exadata_infrastructure( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_cloud_vm_clusters_rest_bad_request( + request_type=oracledatabase.ListCloudVmClustersRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_cloud_vm_clusters(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListCloudVmClustersRequest, + dict, + ], +) +def test_list_cloud_vm_clusters_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListCloudVmClustersResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListCloudVmClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_cloud_vm_clusters(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCloudVmClustersPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_cloud_vm_clusters_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_cloud_vm_clusters" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_cloud_vm_clusters_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_cloud_vm_clusters" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListCloudVmClustersRequest.pb( + oracledatabase.ListCloudVmClustersRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListCloudVmClustersResponse.to_json( + oracledatabase.ListCloudVmClustersResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListCloudVmClustersRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListCloudVmClustersResponse() + post_with_metadata.return_value = ( + oracledatabase.ListCloudVmClustersResponse(), + metadata, + ) + + client.list_cloud_vm_clusters( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_cloud_vm_cluster_rest_bad_request( + request_type=oracledatabase.GetCloudVmClusterRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_cloud_vm_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.GetCloudVmClusterRequest, + dict, + ], +) +def test_get_cloud_vm_cluster_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vm_cluster.CloudVmCluster( + name="name_value", + exadata_infrastructure="exadata_infrastructure_value", + display_name="display_name_value", + cidr="cidr_value", + backup_subnet_cidr="backup_subnet_cidr_value", + network="network_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + backup_odb_subnet="backup_odb_subnet_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = vm_cluster.CloudVmCluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_cloud_vm_cluster(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, vm_cluster.CloudVmCluster) + assert response.name == "name_value" + assert response.exadata_infrastructure == "exadata_infrastructure_value" + assert response.display_name == "display_name_value" + assert response.cidr == "cidr_value" + assert response.backup_subnet_cidr == "backup_subnet_cidr_value" + assert response.network == "network_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.backup_odb_subnet == "backup_odb_subnet_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_cloud_vm_cluster_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_get_cloud_vm_cluster" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_get_cloud_vm_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_get_cloud_vm_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.GetCloudVmClusterRequest.pb( + oracledatabase.GetCloudVmClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = vm_cluster.CloudVmCluster.to_json(vm_cluster.CloudVmCluster()) + req.return_value.content = return_value + + request = oracledatabase.GetCloudVmClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = vm_cluster.CloudVmCluster() + post_with_metadata.return_value = vm_cluster.CloudVmCluster(), metadata + + client.get_cloud_vm_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_create_cloud_vm_cluster_rest_bad_request( + request_type=oracledatabase.CreateCloudVmClusterRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_cloud_vm_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.CreateCloudVmClusterRequest, + dict, + ], +) +def test_create_cloud_vm_cluster_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["cloud_vm_cluster"] = { + "name": "name_value", + "exadata_infrastructure": "exadata_infrastructure_value", + "display_name": "display_name_value", + "properties": { + "ocid": "ocid_value", + "license_type": 1, + "gi_version": "gi_version_value", + "time_zone": {"id": "id_value", "version": "version_value"}, + "ssh_public_keys": ["ssh_public_keys_value1", "ssh_public_keys_value2"], + "node_count": 1070, + "shape": "shape_value", + "ocpu_count": 0.1087, + "memory_size_gb": 1499, + "db_node_storage_size_gb": 2401, + "storage_size_gb": 1591, + "data_storage_size_tb": 0.2109, + "disk_redundancy": 1, + "sparse_diskgroup_enabled": True, + "local_backup_enabled": True, + "hostname_prefix": "hostname_prefix_value", + "diagnostics_data_collection_options": { + "diagnostics_events_enabled": True, + "health_monitoring_enabled": True, + "incident_logs_enabled": True, + }, + "state": 1, + "scan_listener_port_tcp": 2356, + "scan_listener_port_tcp_ssl": 2789, + "domain": "domain_value", + "scan_dns": "scan_dns_value", + "hostname": "hostname_value", + "cpu_core_count": 1496, + "system_version": "system_version_value", + "scan_ip_ids": ["scan_ip_ids_value1", "scan_ip_ids_value2"], + "scan_dns_record_id": "scan_dns_record_id_value", + "oci_url": "oci_url_value", + "db_server_ocids": ["db_server_ocids_value1", "db_server_ocids_value2"], + "compartment_id": "compartment_id_value", + "dns_listener_ip": "dns_listener_ip_value", + "cluster_name": "cluster_name_value", + "compute_model": 1, + }, + "labels": {}, + "create_time": {"seconds": 751, "nanos": 543}, + "cidr": "cidr_value", + "backup_subnet_cidr": "backup_subnet_cidr_value", + "network": "network_value", + "gcp_oracle_zone": "gcp_oracle_zone_value", + "odb_network": "odb_network_value", + "odb_subnet": "odb_subnet_value", + "backup_odb_subnet": "backup_odb_subnet_value", + "identity_connector": { + "service_agent_email": "service_agent_email_value", + "connection_state": 1, + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = oracledatabase.CreateCloudVmClusterRequest.meta.fields[ + "cloud_vm_cluster" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cloud_vm_cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cloud_vm_cluster"][field])): + del request_init["cloud_vm_cluster"][field][i][subfield] + else: + del request_init["cloud_vm_cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_cloud_vm_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_cloud_vm_cluster_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_create_cloud_vm_cluster" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_create_cloud_vm_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_create_cloud_vm_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.CreateCloudVmClusterRequest.pb( + oracledatabase.CreateCloudVmClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.CreateCloudVmClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_cloud_vm_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_delete_cloud_vm_cluster_rest_bad_request( + request_type=oracledatabase.DeleteCloudVmClusterRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_cloud_vm_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.DeleteCloudVmClusterRequest, + dict, + ], +) +def test_delete_cloud_vm_cluster_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_cloud_vm_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_cloud_vm_cluster_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_delete_cloud_vm_cluster" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_delete_cloud_vm_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_delete_cloud_vm_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.DeleteCloudVmClusterRequest.pb( + oracledatabase.DeleteCloudVmClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.DeleteCloudVmClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.delete_cloud_vm_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_entitlements_rest_bad_request( + request_type=oracledatabase.ListEntitlementsRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_entitlements(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListEntitlementsRequest, + dict, + ], +) +def test_list_entitlements_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListEntitlementsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListEntitlementsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_entitlements(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntitlementsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_entitlements_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_entitlements" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_entitlements_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_entitlements" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListEntitlementsRequest.pb( + oracledatabase.ListEntitlementsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListEntitlementsResponse.to_json( + oracledatabase.ListEntitlementsResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListEntitlementsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListEntitlementsResponse() + post_with_metadata.return_value = ( + oracledatabase.ListEntitlementsResponse(), + metadata, + ) + + client.list_entitlements( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_db_servers_rest_bad_request( + request_type=oracledatabase.ListDbServersRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "parent": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_db_servers(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListDbServersRequest, + dict, + ], +) +def test_list_db_servers_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "parent": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListDbServersResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListDbServersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_db_servers(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDbServersPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_db_servers_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_db_servers" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_db_servers_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_db_servers" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListDbServersRequest.pb( + oracledatabase.ListDbServersRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListDbServersResponse.to_json( + oracledatabase.ListDbServersResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListDbServersRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListDbServersResponse() + post_with_metadata.return_value = ( + oracledatabase.ListDbServersResponse(), + metadata, + ) + + client.list_db_servers( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_db_nodes_rest_bad_request(request_type=oracledatabase.ListDbNodesRequest): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "parent": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_db_nodes(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListDbNodesRequest, + dict, + ], +) +def test_list_db_nodes_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "parent": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListDbNodesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListDbNodesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_db_nodes(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDbNodesPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_db_nodes_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_db_nodes" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_db_nodes_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_db_nodes" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListDbNodesRequest.pb( + oracledatabase.ListDbNodesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListDbNodesResponse.to_json( + oracledatabase.ListDbNodesResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListDbNodesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListDbNodesResponse() + post_with_metadata.return_value = oracledatabase.ListDbNodesResponse(), metadata + + client.list_db_nodes( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_gi_versions_rest_bad_request( + request_type=oracledatabase.ListGiVersionsRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_gi_versions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListGiVersionsRequest, + dict, + ], +) +def test_list_gi_versions_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListGiVersionsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListGiVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_gi_versions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListGiVersionsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_gi_versions_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_gi_versions" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_gi_versions_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_gi_versions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListGiVersionsRequest.pb( + oracledatabase.ListGiVersionsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListGiVersionsResponse.to_json( + oracledatabase.ListGiVersionsResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListGiVersionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListGiVersionsResponse() + post_with_metadata.return_value = ( + oracledatabase.ListGiVersionsResponse(), + metadata, + ) + + client.list_gi_versions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_minor_versions_rest_bad_request( + request_type=minor_version.ListMinorVersionsRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/giVersions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_minor_versions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + minor_version.ListMinorVersionsRequest, + dict, + ], +) +def test_list_minor_versions_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/giVersions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = minor_version.ListMinorVersionsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = minor_version.ListMinorVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_minor_versions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMinorVersionsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_minor_versions_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_minor_versions" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_minor_versions_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_minor_versions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = minor_version.ListMinorVersionsRequest.pb( + minor_version.ListMinorVersionsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = minor_version.ListMinorVersionsResponse.to_json( + minor_version.ListMinorVersionsResponse() + ) + req.return_value.content = return_value + + request = minor_version.ListMinorVersionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = minor_version.ListMinorVersionsResponse() + post_with_metadata.return_value = ( + minor_version.ListMinorVersionsResponse(), + metadata, + ) + + client.list_minor_versions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_db_system_shapes_rest_bad_request( + request_type=oracledatabase.ListDbSystemShapesRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_db_system_shapes(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListDbSystemShapesRequest, + dict, + ], +) +def test_list_db_system_shapes_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListDbSystemShapesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListDbSystemShapesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_db_system_shapes(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDbSystemShapesPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_db_system_shapes_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_db_system_shapes" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_db_system_shapes_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_db_system_shapes" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListDbSystemShapesRequest.pb( + oracledatabase.ListDbSystemShapesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListDbSystemShapesResponse.to_json( + oracledatabase.ListDbSystemShapesResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListDbSystemShapesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListDbSystemShapesResponse() + post_with_metadata.return_value = ( + oracledatabase.ListDbSystemShapesResponse(), + metadata, + ) + + client.list_db_system_shapes( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_autonomous_databases_rest_bad_request( + request_type=oracledatabase.ListAutonomousDatabasesRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_autonomous_databases(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListAutonomousDatabasesRequest, + dict, + ], +) +def test_list_autonomous_databases_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDatabasesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDatabasesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_autonomous_databases(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutonomousDatabasesPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_autonomous_databases_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_autonomous_databases" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_autonomous_databases_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_autonomous_databases" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListAutonomousDatabasesRequest.pb( + oracledatabase.ListAutonomousDatabasesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListAutonomousDatabasesResponse.to_json( + oracledatabase.ListAutonomousDatabasesResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListAutonomousDatabasesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListAutonomousDatabasesResponse() + post_with_metadata.return_value = ( + oracledatabase.ListAutonomousDatabasesResponse(), + metadata, + ) + + client.list_autonomous_databases( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_autonomous_database_rest_bad_request( + request_type=oracledatabase.GetAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.GetAutonomousDatabaseRequest, + dict, + ], +) +def test_get_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = autonomous_database.AutonomousDatabase( + name="name_value", + database="database_value", + display_name="display_name_value", + entitlement_id="entitlement_id_value", + admin_password="admin_password_value", + network="network_value", + cidr="cidr_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + peer_autonomous_databases=["peer_autonomous_databases_value"], + disaster_recovery_supported_locations=[ + "disaster_recovery_supported_locations_value" + ], + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = autonomous_database.AutonomousDatabase.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_autonomous_database(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, autonomous_database.AutonomousDatabase) + assert response.name == "name_value" + assert response.database == "database_value" + assert response.display_name == "display_name_value" + assert response.entitlement_id == "entitlement_id_value" + assert response.admin_password == "admin_password_value" + assert response.network == "network_value" + assert response.cidr == "cidr_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.peer_autonomous_databases == ["peer_autonomous_databases_value"] + assert response.disaster_recovery_supported_locations == [ + "disaster_recovery_supported_locations_value" + ] + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_get_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_get_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_get_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.GetAutonomousDatabaseRequest.pb( + oracledatabase.GetAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = autonomous_database.AutonomousDatabase.to_json( + autonomous_database.AutonomousDatabase() + ) + req.return_value.content = return_value + + request = oracledatabase.GetAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = autonomous_database.AutonomousDatabase() + post_with_metadata.return_value = ( + autonomous_database.AutonomousDatabase(), + metadata, + ) + + client.get_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_create_autonomous_database_rest_bad_request( + request_type=oracledatabase.CreateAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.CreateAutonomousDatabaseRequest, + dict, + ], +) +def test_create_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["autonomous_database"] = { + "name": "name_value", + "database": "database_value", + "display_name": "display_name_value", + "entitlement_id": "entitlement_id_value", + "admin_password": "admin_password_value", + "properties": { + "ocid": "ocid_value", + "compute_count": 0.1413, + "cpu_core_count": 1496, + "data_storage_size_tb": 2109, + "data_storage_size_gb": 2096, + "db_workload": 1, + "db_edition": 1, + "character_set": "character_set_value", + "n_character_set": "n_character_set_value", + "private_endpoint_ip": "private_endpoint_ip_value", + "private_endpoint_label": "private_endpoint_label_value", + "db_version": "db_version_value", + "is_auto_scaling_enabled": True, + "is_storage_auto_scaling_enabled": True, + "license_type": 1, + "customer_contacts": [{"email": "email_value"}], + "secret_id": "secret_id_value", + "vault_id": "vault_id_value", + "maintenance_schedule_type": 1, + "mtls_connection_required": True, + "backup_retention_period_days": 2975, + "actual_used_data_storage_size_tb": 0.3366, + "allocated_storage_size_tb": 0.2636, + "apex_details": { + "apex_version": "apex_version_value", + "ords_version": "ords_version_value", + }, + "are_primary_allowlisted_ips_used": True, + "lifecycle_details": "lifecycle_details_value", + "state": 1, + "autonomous_container_database_id": "autonomous_container_database_id_value", + "available_upgrade_versions": [ + "available_upgrade_versions_value1", + "available_upgrade_versions_value2", + ], + "connection_strings": { + "all_connection_strings": { + "high": "high_value", + "low": "low_value", + "medium": "medium_value", + }, + "dedicated": "dedicated_value", + "high": "high_value", + "low": "low_value", + "medium": "medium_value", + "profiles": [ + { + "consumer_group": 1, + "display_name": "display_name_value", + "host_format": 1, + "is_regional": True, + "protocol": 1, + "session_mode": 1, + "syntax_format": 1, + "tls_authentication": 1, + "value": "value_value", + } + ], + }, + "connection_urls": { + "apex_uri": "apex_uri_value", + "database_transforms_uri": "database_transforms_uri_value", + "graph_studio_uri": "graph_studio_uri_value", + "machine_learning_notebook_uri": "machine_learning_notebook_uri_value", + "machine_learning_user_management_uri": "machine_learning_user_management_uri_value", + "mongo_db_uri": "mongo_db_uri_value", + "ords_uri": "ords_uri_value", + "sql_dev_web_uri": "sql_dev_web_uri_value", + }, + "failed_data_recovery_duration": {"seconds": 751, "nanos": 543}, + "memory_table_gbs": 1691, + "is_local_data_guard_enabled": True, + "local_adg_auto_failover_max_data_loss_limit": 4513, + "local_standby_db": { + "lag_time_duration": {}, + "lifecycle_details": "lifecycle_details_value", + "state": 1, + "data_guard_role_changed_time": {"seconds": 751, "nanos": 543}, + "disaster_recovery_role_changed_time": {}, + }, + "memory_per_oracle_compute_unit_gbs": 3626, + "local_disaster_recovery_type": 1, + "data_safe_state": 1, + "database_management_state": 1, + "open_mode": 1, + "operations_insights_state": 1, + "peer_db_ids": ["peer_db_ids_value1", "peer_db_ids_value2"], + "permission_level": 1, + "private_endpoint": "private_endpoint_value", + "refreshable_mode": 1, + "refreshable_state": 1, + "role": 1, + "scheduled_operation_details": [ + { + "day_of_week": 1, + "start_time": { + "hours": 561, + "minutes": 773, + "seconds": 751, + "nanos": 543, + }, + "stop_time": {}, + } + ], + "sql_web_developer_url": "sql_web_developer_url_value", + "supported_clone_regions": [ + "supported_clone_regions_value1", + "supported_clone_regions_value2", + ], + "used_data_storage_size_tbs": 2752, + "oci_url": "oci_url_value", + "total_auto_backup_storage_size_gbs": 0.36100000000000004, + "next_long_term_backup_time": {}, + "data_guard_role_changed_time": {}, + "disaster_recovery_role_changed_time": {}, + "maintenance_begin_time": {}, + "maintenance_end_time": {}, + "allowlisted_ips": ["allowlisted_ips_value1", "allowlisted_ips_value2"], + "encryption_key": {"provider": 1, "kms_key": "kms_key_value"}, + "encryption_key_history_entries": [ + {"encryption_key": {}, "activation_time": {}} + ], + "service_agent_email": "service_agent_email_value", + }, + "labels": {}, + "network": "network_value", + "cidr": "cidr_value", + "odb_network": "odb_network_value", + "odb_subnet": "odb_subnet_value", + "source_config": { + "autonomous_database": "autonomous_database_value", + "automatic_backups_replication_enabled": True, + }, + "peer_autonomous_databases": [ + "peer_autonomous_databases_value1", + "peer_autonomous_databases_value2", + ], + "create_time": {}, + "disaster_recovery_supported_locations": [ + "disaster_recovery_supported_locations_value1", + "disaster_recovery_supported_locations_value2", + ], + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = oracledatabase.CreateAutonomousDatabaseRequest.meta.fields[ + "autonomous_database" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["autonomous_database"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["autonomous_database"][field])): + del request_init["autonomous_database"][field][i][subfield] + else: + del request_init["autonomous_database"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_autonomous_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_create_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_create_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_create_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.CreateAutonomousDatabaseRequest.pb( + oracledatabase.CreateAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.CreateAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_update_autonomous_database_rest_bad_request( + request_type=oracledatabase.UpdateAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "autonomous_database": { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.UpdateAutonomousDatabaseRequest, + dict, + ], +) +def test_update_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "autonomous_database": { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + } + request_init["autonomous_database"] = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3", + "database": "database_value", + "display_name": "display_name_value", + "entitlement_id": "entitlement_id_value", + "admin_password": "admin_password_value", + "properties": { + "ocid": "ocid_value", + "compute_count": 0.1413, + "cpu_core_count": 1496, + "data_storage_size_tb": 2109, + "data_storage_size_gb": 2096, + "db_workload": 1, + "db_edition": 1, + "character_set": "character_set_value", + "n_character_set": "n_character_set_value", + "private_endpoint_ip": "private_endpoint_ip_value", + "private_endpoint_label": "private_endpoint_label_value", + "db_version": "db_version_value", + "is_auto_scaling_enabled": True, + "is_storage_auto_scaling_enabled": True, + "license_type": 1, + "customer_contacts": [{"email": "email_value"}], + "secret_id": "secret_id_value", + "vault_id": "vault_id_value", + "maintenance_schedule_type": 1, + "mtls_connection_required": True, + "backup_retention_period_days": 2975, + "actual_used_data_storage_size_tb": 0.3366, + "allocated_storage_size_tb": 0.2636, + "apex_details": { + "apex_version": "apex_version_value", + "ords_version": "ords_version_value", + }, + "are_primary_allowlisted_ips_used": True, + "lifecycle_details": "lifecycle_details_value", + "state": 1, + "autonomous_container_database_id": "autonomous_container_database_id_value", + "available_upgrade_versions": [ + "available_upgrade_versions_value1", + "available_upgrade_versions_value2", + ], + "connection_strings": { + "all_connection_strings": { + "high": "high_value", + "low": "low_value", + "medium": "medium_value", + }, + "dedicated": "dedicated_value", + "high": "high_value", + "low": "low_value", + "medium": "medium_value", + "profiles": [ + { + "consumer_group": 1, + "display_name": "display_name_value", + "host_format": 1, + "is_regional": True, + "protocol": 1, + "session_mode": 1, + "syntax_format": 1, + "tls_authentication": 1, + "value": "value_value", + } + ], + }, + "connection_urls": { + "apex_uri": "apex_uri_value", + "database_transforms_uri": "database_transforms_uri_value", + "graph_studio_uri": "graph_studio_uri_value", + "machine_learning_notebook_uri": "machine_learning_notebook_uri_value", + "machine_learning_user_management_uri": "machine_learning_user_management_uri_value", + "mongo_db_uri": "mongo_db_uri_value", + "ords_uri": "ords_uri_value", + "sql_dev_web_uri": "sql_dev_web_uri_value", + }, + "failed_data_recovery_duration": {"seconds": 751, "nanos": 543}, + "memory_table_gbs": 1691, + "is_local_data_guard_enabled": True, + "local_adg_auto_failover_max_data_loss_limit": 4513, + "local_standby_db": { + "lag_time_duration": {}, + "lifecycle_details": "lifecycle_details_value", + "state": 1, + "data_guard_role_changed_time": {"seconds": 751, "nanos": 543}, + "disaster_recovery_role_changed_time": {}, + }, + "memory_per_oracle_compute_unit_gbs": 3626, + "local_disaster_recovery_type": 1, + "data_safe_state": 1, + "database_management_state": 1, + "open_mode": 1, + "operations_insights_state": 1, + "peer_db_ids": ["peer_db_ids_value1", "peer_db_ids_value2"], + "permission_level": 1, + "private_endpoint": "private_endpoint_value", + "refreshable_mode": 1, + "refreshable_state": 1, + "role": 1, + "scheduled_operation_details": [ + { + "day_of_week": 1, + "start_time": { + "hours": 561, + "minutes": 773, + "seconds": 751, + "nanos": 543, + }, + "stop_time": {}, + } + ], + "sql_web_developer_url": "sql_web_developer_url_value", + "supported_clone_regions": [ + "supported_clone_regions_value1", + "supported_clone_regions_value2", + ], + "used_data_storage_size_tbs": 2752, + "oci_url": "oci_url_value", + "total_auto_backup_storage_size_gbs": 0.36100000000000004, + "next_long_term_backup_time": {}, + "data_guard_role_changed_time": {}, + "disaster_recovery_role_changed_time": {}, + "maintenance_begin_time": {}, + "maintenance_end_time": {}, + "allowlisted_ips": ["allowlisted_ips_value1", "allowlisted_ips_value2"], + "encryption_key": {"provider": 1, "kms_key": "kms_key_value"}, + "encryption_key_history_entries": [ + {"encryption_key": {}, "activation_time": {}} + ], + "service_agent_email": "service_agent_email_value", + }, + "labels": {}, + "network": "network_value", + "cidr": "cidr_value", + "odb_network": "odb_network_value", + "odb_subnet": "odb_subnet_value", + "source_config": { + "autonomous_database": "autonomous_database_value", + "automatic_backups_replication_enabled": True, + }, + "peer_autonomous_databases": [ + "peer_autonomous_databases_value1", + "peer_autonomous_databases_value2", + ], + "create_time": {}, + "disaster_recovery_supported_locations": [ + "disaster_recovery_supported_locations_value1", + "disaster_recovery_supported_locations_value2", + ], + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = oracledatabase.UpdateAutonomousDatabaseRequest.meta.fields[ + "autonomous_database" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["autonomous_database"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["autonomous_database"][field])): + del request_init["autonomous_database"][field][i][subfield] + else: + del request_init["autonomous_database"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_autonomous_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_update_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_update_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_update_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.UpdateAutonomousDatabaseRequest.pb( + oracledatabase.UpdateAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.UpdateAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.update_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_delete_autonomous_database_rest_bad_request( + request_type=oracledatabase.DeleteAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.DeleteAutonomousDatabaseRequest, + dict, + ], +) +def test_delete_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_autonomous_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_delete_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_delete_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_delete_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.DeleteAutonomousDatabaseRequest.pb( + oracledatabase.DeleteAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.DeleteAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.delete_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_restore_autonomous_database_rest_bad_request( + request_type=oracledatabase.RestoreAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.restore_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.RestoreAutonomousDatabaseRequest, + dict, + ], +) +def test_restore_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.restore_autonomous_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_restore_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_restore_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_restore_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_restore_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.RestoreAutonomousDatabaseRequest.pb( + oracledatabase.RestoreAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.RestoreAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.restore_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_generate_autonomous_database_wallet_rest_bad_request( + request_type=oracledatabase.GenerateAutonomousDatabaseWalletRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.generate_autonomous_database_wallet(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.GenerateAutonomousDatabaseWalletRequest, + dict, + ], +) +def test_generate_autonomous_database_wallet_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse( + archive_content=b"archive_content_blob", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.generate_autonomous_database_wallet(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, oracledatabase.GenerateAutonomousDatabaseWalletResponse) + assert response.archive_content == b"archive_content_blob" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_autonomous_database_wallet_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_generate_autonomous_database_wallet", + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_generate_autonomous_database_wallet_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "pre_generate_autonomous_database_wallet", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.GenerateAutonomousDatabaseWalletRequest.pb( + oracledatabase.GenerateAutonomousDatabaseWalletRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse.to_json( + oracledatabase.GenerateAutonomousDatabaseWalletResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.GenerateAutonomousDatabaseWalletRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse() + post_with_metadata.return_value = ( + oracledatabase.GenerateAutonomousDatabaseWalletResponse(), + metadata, + ) + + client.generate_autonomous_database_wallet( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_autonomous_db_versions_rest_bad_request( + request_type=oracledatabase.ListAutonomousDbVersionsRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_autonomous_db_versions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListAutonomousDbVersionsRequest, + dict, + ], +) +def test_list_autonomous_db_versions_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDbVersionsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDbVersionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_autonomous_db_versions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutonomousDbVersionsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_autonomous_db_versions_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_autonomous_db_versions" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_autonomous_db_versions_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_autonomous_db_versions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListAutonomousDbVersionsRequest.pb( + oracledatabase.ListAutonomousDbVersionsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListAutonomousDbVersionsResponse.to_json( + oracledatabase.ListAutonomousDbVersionsResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListAutonomousDbVersionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListAutonomousDbVersionsResponse() + post_with_metadata.return_value = ( + oracledatabase.ListAutonomousDbVersionsResponse(), + metadata, + ) + + client.list_autonomous_db_versions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_autonomous_database_character_sets_rest_bad_request( + request_type=oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_autonomous_database_character_sets(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, + dict, + ], +) +def test_list_autonomous_database_character_sets_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_autonomous_database_character_sets(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutonomousDatabaseCharacterSetsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_autonomous_database_character_sets_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_autonomous_database_character_sets", + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_autonomous_database_character_sets_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "pre_list_autonomous_database_character_sets", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest.pb( + oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = ( + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.to_json( + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() + ) + ) + req.return_value.content = return_value + + request = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() + post_with_metadata.return_value = ( + oracledatabase.ListAutonomousDatabaseCharacterSetsResponse(), + metadata, + ) + + client.list_autonomous_database_character_sets( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_autonomous_database_backups_rest_bad_request( + request_type=oracledatabase.ListAutonomousDatabaseBackupsRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_autonomous_database_backups(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListAutonomousDatabaseBackupsRequest, + dict, + ], +) +def test_list_autonomous_database_backups_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_autonomous_database_backups(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutonomousDatabaseBackupsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_autonomous_database_backups_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_autonomous_database_backups", + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_autonomous_database_backups_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_autonomous_database_backups" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListAutonomousDatabaseBackupsRequest.pb( + oracledatabase.ListAutonomousDatabaseBackupsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse.to_json( + oracledatabase.ListAutonomousDatabaseBackupsResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListAutonomousDatabaseBackupsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse() + post_with_metadata.return_value = ( + oracledatabase.ListAutonomousDatabaseBackupsResponse(), + metadata, + ) + + client.list_autonomous_database_backups( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_stop_autonomous_database_rest_bad_request( + request_type=oracledatabase.StopAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.stop_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.StopAutonomousDatabaseRequest, + dict, + ], +) +def test_stop_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.stop_autonomous_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_stop_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_stop_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_stop_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_stop_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.StopAutonomousDatabaseRequest.pb( + oracledatabase.StopAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.StopAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.stop_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_start_autonomous_database_rest_bad_request( + request_type=oracledatabase.StartAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.start_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.StartAutonomousDatabaseRequest, + dict, + ], +) +def test_start_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.start_autonomous_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_start_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_start_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_start_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_start_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.StartAutonomousDatabaseRequest.pb( + oracledatabase.StartAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.StartAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.start_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_restart_autonomous_database_rest_bad_request( + request_type=oracledatabase.RestartAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.restart_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.RestartAutonomousDatabaseRequest, + dict, + ], +) +def test_restart_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.restart_autonomous_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_restart_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_restart_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_restart_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_restart_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.RestartAutonomousDatabaseRequest.pb( + oracledatabase.RestartAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.RestartAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.restart_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_switchover_autonomous_database_rest_bad_request( + request_type=oracledatabase.SwitchoverAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.switchover_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.SwitchoverAutonomousDatabaseRequest, + dict, + ], +) +def test_switchover_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.switchover_autonomous_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_switchover_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_switchover_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_switchover_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_switchover_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.SwitchoverAutonomousDatabaseRequest.pb( + oracledatabase.SwitchoverAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.SwitchoverAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.switchover_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_failover_autonomous_database_rest_bad_request( + request_type=oracledatabase.FailoverAutonomousDatabaseRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.failover_autonomous_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.FailoverAutonomousDatabaseRequest, + dict, + ], +) +def test_failover_autonomous_database_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.failover_autonomous_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_failover_autonomous_database_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_failover_autonomous_database" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_failover_autonomous_database_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_failover_autonomous_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.FailoverAutonomousDatabaseRequest.pb( + oracledatabase.FailoverAutonomousDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.FailoverAutonomousDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.failover_autonomous_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_odb_networks_rest_bad_request( + request_type=odb_network.ListOdbNetworksRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_odb_networks(request) + + +@pytest.mark.parametrize( + "request_type", + [ + odb_network.ListOdbNetworksRequest, + dict, + ], +) +def test_list_odb_networks_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = odb_network.ListOdbNetworksResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = odb_network.ListOdbNetworksResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_odb_networks(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListOdbNetworksPager) + assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_odb_networks_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_odb_networks" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_odb_networks_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_odb_networks" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = odb_network.ListOdbNetworksRequest.pb( + odb_network.ListOdbNetworksRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = odb_network.ListOdbNetworksResponse.to_json( + odb_network.ListOdbNetworksResponse() + ) + req.return_value.content = return_value + + request = odb_network.ListOdbNetworksRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = odb_network.ListOdbNetworksResponse() + post_with_metadata.return_value = ( + odb_network.ListOdbNetworksResponse(), + metadata, + ) + + client.list_odb_networks( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_odb_network_rest_bad_request( + request_type=odb_network.GetOdbNetworkRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/odbNetworks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_odb_network(request) + + +@pytest.mark.parametrize( + "request_type", + [ + odb_network.GetOdbNetworkRequest, + dict, + ], +) +def test_get_odb_network_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/odbNetworks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = odb_network.OdbNetwork( + name="name_value", + network="network_value", + state=odb_network.OdbNetwork.State.PROVISIONING, + entitlement_id="entitlement_id_value", + gcp_oracle_zone="gcp_oracle_zone_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = odb_network.OdbNetwork.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_odb_network(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, odb_network.OdbNetwork) + assert response.name == "name_value" + assert response.network == "network_value" + assert response.state == odb_network.OdbNetwork.State.PROVISIONING + assert response.entitlement_id == "entitlement_id_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_odb_network_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_get_odb_network" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_get_odb_network_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_get_odb_network" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = odb_network.GetOdbNetworkRequest.pb( + odb_network.GetOdbNetworkRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = odb_network.OdbNetwork.to_json(odb_network.OdbNetwork()) + req.return_value.content = return_value + + request = odb_network.GetOdbNetworkRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = odb_network.OdbNetwork() + post_with_metadata.return_value = odb_network.OdbNetwork(), metadata + + client.get_odb_network( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_create_odb_network_rest_bad_request( + request_type=gco_odb_network.CreateOdbNetworkRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_odb_network(request) + + +@pytest.mark.parametrize( + "request_type", + [ + gco_odb_network.CreateOdbNetworkRequest, + dict, + ], +) +def test_create_odb_network_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["odb_network"] = { + "name": "name_value", + "network": "network_value", + "labels": {}, + "create_time": {"seconds": 751, "nanos": 543}, + "state": 1, + "entitlement_id": "entitlement_id_value", + "gcp_oracle_zone": "gcp_oracle_zone_value", + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = gco_odb_network.CreateOdbNetworkRequest.meta.fields["odb_network"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["odb_network"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["odb_network"][field])): + del request_init["odb_network"][field][i][subfield] + else: + del request_init["odb_network"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_odb_network(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_odb_network_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_create_odb_network" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_create_odb_network_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_create_odb_network" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = gco_odb_network.CreateOdbNetworkRequest.pb( + gco_odb_network.CreateOdbNetworkRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = gco_odb_network.CreateOdbNetworkRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_odb_network( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_delete_odb_network_rest_bad_request( + request_type=odb_network.DeleteOdbNetworkRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/odbNetworks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_odb_network(request) + + +@pytest.mark.parametrize( + "request_type", + [ + odb_network.DeleteOdbNetworkRequest, + dict, + ], +) +def test_delete_odb_network_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/odbNetworks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_odb_network(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_odb_network_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_delete_odb_network" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_delete_odb_network_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_delete_odb_network" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = odb_network.DeleteOdbNetworkRequest.pb( + odb_network.DeleteOdbNetworkRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = odb_network.DeleteOdbNetworkRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.delete_odb_network( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_odb_subnets_rest_bad_request( + request_type=odb_subnet.ListOdbSubnetsRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/odbNetworks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_odb_subnets(request) + + +@pytest.mark.parametrize( + "request_type", + [ + odb_subnet.ListOdbSubnetsRequest, + dict, + ], +) +def test_list_odb_subnets_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/odbNetworks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = odb_subnet.ListOdbSubnetsResponse( + next_page_token="next_page_token_value", + unreachable=["unreachable_value"], + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = odb_subnet.ListOdbSubnetsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_odb_subnets(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListOdbSubnetsPager) + assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_odb_subnets_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_odb_subnets" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_odb_subnets_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_odb_subnets" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = odb_subnet.ListOdbSubnetsRequest.pb( + odb_subnet.ListOdbSubnetsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = odb_subnet.ListOdbSubnetsResponse.to_json( + odb_subnet.ListOdbSubnetsResponse() + ) + req.return_value.content = return_value + + request = odb_subnet.ListOdbSubnetsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = odb_subnet.ListOdbSubnetsResponse() + post_with_metadata.return_value = odb_subnet.ListOdbSubnetsResponse(), metadata + + client.list_odb_subnets( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_odb_subnet_rest_bad_request(request_type=odb_subnet.GetOdbSubnetRequest): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/odbNetworks/sample3/odbSubnets/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_odb_subnet(request) + + +@pytest.mark.parametrize( + "request_type", + [ + odb_subnet.GetOdbSubnetRequest, + dict, + ], +) +def test_get_odb_subnet_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/odbNetworks/sample3/odbSubnets/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = odb_subnet.OdbSubnet( + name="name_value", + cidr_range="cidr_range_value", + purpose=odb_subnet.OdbSubnet.Purpose.CLIENT_SUBNET, + state=odb_subnet.OdbSubnet.State.PROVISIONING, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = odb_subnet.OdbSubnet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_odb_subnet(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, odb_subnet.OdbSubnet) + assert response.name == "name_value" + assert response.cidr_range == "cidr_range_value" + assert response.purpose == odb_subnet.OdbSubnet.Purpose.CLIENT_SUBNET + assert response.state == odb_subnet.OdbSubnet.State.PROVISIONING + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_odb_subnet_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_get_odb_subnet" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_get_odb_subnet_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_get_odb_subnet" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = odb_subnet.GetOdbSubnetRequest.pb(odb_subnet.GetOdbSubnetRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = odb_subnet.OdbSubnet.to_json(odb_subnet.OdbSubnet()) + req.return_value.content = return_value + + request = odb_subnet.GetOdbSubnetRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = odb_subnet.OdbSubnet() + post_with_metadata.return_value = odb_subnet.OdbSubnet(), metadata + + client.get_odb_subnet( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_create_odb_subnet_rest_bad_request( + request_type=gco_odb_subnet.CreateOdbSubnetRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/odbNetworks/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_odb_subnet(request) + + +@pytest.mark.parametrize( + "request_type", + [ + gco_odb_subnet.CreateOdbSubnetRequest, + dict, + ], +) +def test_create_odb_subnet_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/odbNetworks/sample3"} + request_init["odb_subnet"] = { + "name": "name_value", + "cidr_range": "cidr_range_value", + "purpose": 1, + "labels": {}, + "create_time": {"seconds": 751, "nanos": 543}, + "state": 1, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = gco_odb_subnet.CreateOdbSubnetRequest.meta.fields["odb_subnet"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["odb_subnet"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["odb_subnet"][field])): + del request_init["odb_subnet"][field][i][subfield] + else: + del request_init["odb_subnet"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_odb_subnet(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_odb_subnet_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_create_odb_subnet" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_create_odb_subnet_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_create_odb_subnet" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = gco_odb_subnet.CreateOdbSubnetRequest.pb( + gco_odb_subnet.CreateOdbSubnetRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = gco_odb_subnet.CreateOdbSubnetRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_odb_subnet( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_delete_odb_subnet_rest_bad_request( + request_type=odb_subnet.DeleteOdbSubnetRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/odbNetworks/sample3/odbSubnets/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_odb_subnet(request) + + +@pytest.mark.parametrize( + "request_type", + [ + odb_subnet.DeleteOdbSubnetRequest, + dict, + ], +) +def test_delete_odb_subnet_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/odbNetworks/sample3/odbSubnets/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_odb_subnet(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_odb_subnet_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_delete_odb_subnet" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_delete_odb_subnet_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_delete_odb_subnet" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = odb_subnet.DeleteOdbSubnetRequest.pb( + odb_subnet.DeleteOdbSubnetRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = odb_subnet.DeleteOdbSubnetRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.delete_odb_subnet( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_exadb_vm_clusters_rest_bad_request( + request_type=oracledatabase.ListExadbVmClustersRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_exadb_vm_clusters(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.ListExadbVmClustersRequest, + dict, + ], +) +def test_list_exadb_vm_clusters_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = oracledatabase.ListExadbVmClustersResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = oracledatabase.ListExadbVmClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_exadb_vm_clusters(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExadbVmClustersPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_exadb_vm_clusters_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_list_exadb_vm_clusters" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_list_exadb_vm_clusters_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_list_exadb_vm_clusters" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.ListExadbVmClustersRequest.pb( + oracledatabase.ListExadbVmClustersRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = oracledatabase.ListExadbVmClustersResponse.to_json( + oracledatabase.ListExadbVmClustersResponse() + ) + req.return_value.content = return_value + + request = oracledatabase.ListExadbVmClustersRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = oracledatabase.ListExadbVmClustersResponse() + post_with_metadata.return_value = ( + oracledatabase.ListExadbVmClustersResponse(), + metadata, + ) + + client.list_exadb_vm_clusters( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_exadb_vm_cluster_rest_bad_request( + request_type=oracledatabase.GetExadbVmClusterRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_exadb_vm_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.GetExadbVmClusterRequest, + dict, + ], +) +def test_get_exadb_vm_cluster_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = exadb_vm_cluster.ExadbVmCluster( + name="name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", + backup_odb_subnet="backup_odb_subnet_value", + display_name="display_name_value", + entitlement_id="entitlement_id_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = exadb_vm_cluster.ExadbVmCluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_exadb_vm_cluster(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, exadb_vm_cluster.ExadbVmCluster) + assert response.name == "name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" + assert response.backup_odb_subnet == "backup_odb_subnet_value" + assert response.display_name == "display_name_value" + assert response.entitlement_id == "entitlement_id_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_exadb_vm_cluster_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_get_exadb_vm_cluster" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_get_exadb_vm_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_get_exadb_vm_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.GetExadbVmClusterRequest.pb( + oracledatabase.GetExadbVmClusterRequest() + ) + transcode.return_value = { "method": "post", "uri": "my_uri", "body": pb_message, @@ -19386,24 +48068,250 @@ def test_list_cloud_exadata_infrastructures_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListCloudExadataInfrastructuresResponse.to_json( - oracledatabase.ListCloudExadataInfrastructuresResponse() + return_value = exadb_vm_cluster.ExadbVmCluster.to_json( + exadb_vm_cluster.ExadbVmCluster() ) req.return_value.content = return_value - request = oracledatabase.ListCloudExadataInfrastructuresRequest() + request = oracledatabase.GetExadbVmClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = oracledatabase.ListCloudExadataInfrastructuresResponse() - post_with_metadata.return_value = ( - oracledatabase.ListCloudExadataInfrastructuresResponse(), - metadata, + post.return_value = exadb_vm_cluster.ExadbVmCluster() + post_with_metadata.return_value = exadb_vm_cluster.ExadbVmCluster(), metadata + + client.get_exadb_vm_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) - client.list_cloud_exadata_infrastructures( + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_create_exadb_vm_cluster_rest_bad_request( + request_type=oracledatabase.CreateExadbVmClusterRequest, +): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_exadb_vm_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + oracledatabase.CreateExadbVmClusterRequest, + dict, + ], +) +def test_create_exadb_vm_cluster_rest_call_success(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["exadb_vm_cluster"] = { + "name": "name_value", + "properties": { + "cluster_name": "cluster_name_value", + "grid_image_id": "grid_image_id_value", + "node_count": 1070, + "enabled_ecpu_count_per_node": 2826, + "additional_ecpu_count_per_node": 3160, + "vm_file_system_storage": {"size_in_gbs_per_node": 2103}, + "license_model": 1, + "exascale_db_storage_vault": "exascale_db_storage_vault_value", + "hostname_prefix": "hostname_prefix_value", + "hostname": "hostname_value", + "ssh_public_keys": ["ssh_public_keys_value1", "ssh_public_keys_value2"], + "data_collection_options": { + "is_diagnostics_events_enabled": True, + "is_health_monitoring_enabled": True, + "is_incident_logs_enabled": True, + }, + "time_zone": {"id": "id_value", "version": "version_value"}, + "lifecycle_state": 1, + "shape_attribute": 1, + "memory_size_gb": 1499, + "scan_listener_port_tcp": 2356, + "oci_uri": "oci_uri_value", + "gi_version": "gi_version_value", + }, + "gcp_oracle_zone": "gcp_oracle_zone_value", + "labels": {}, + "odb_network": "odb_network_value", + "odb_subnet": "odb_subnet_value", + "backup_odb_subnet": "backup_odb_subnet_value", + "display_name": "display_name_value", + "create_time": {"seconds": 751, "nanos": 543}, + "entitlement_id": "entitlement_id_value", + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = oracledatabase.CreateExadbVmClusterRequest.meta.fields[ + "exadb_vm_cluster" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["exadb_vm_cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["exadb_vm_cluster"][field])): + del request_init["exadb_vm_cluster"][field][i][subfield] + else: + del request_init["exadb_vm_cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_exadb_vm_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_exadb_vm_cluster_rest_interceptors(null_interceptor): + transport = transports.OracleDatabaseRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.OracleDatabaseRestInterceptor(), + ) + client = OracleDatabaseClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_create_exadb_vm_cluster" + ) as post, mock.patch.object( + transports.OracleDatabaseRestInterceptor, + "post_create_exadb_vm_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.OracleDatabaseRestInterceptor, "pre_create_exadb_vm_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = oracledatabase.CreateExadbVmClusterRequest.pb( + oracledatabase.CreateExadbVmClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = oracledatabase.CreateExadbVmClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_exadb_vm_cluster( request, metadata=[ ("key", "val"), @@ -19416,15 +48324,15 @@ def test_list_cloud_exadata_infrastructures_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_get_cloud_exadata_infrastructure_rest_bad_request( - request_type=oracledatabase.GetCloudExadataInfrastructureRequest, +def test_delete_exadb_vm_cluster_rest_bad_request( + request_type=oracledatabase.DeleteExadbVmClusterRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" } request = request_type(**request_init) @@ -19440,59 +48348,47 @@ def test_get_cloud_exadata_infrastructure_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_cloud_exadata_infrastructure(request) + client.delete_exadb_vm_cluster(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.GetCloudExadataInfrastructureRequest, + oracledatabase.DeleteExadbVmClusterRequest, dict, ], ) -def test_get_cloud_exadata_infrastructure_rest_call_success(request_type): +def test_delete_exadb_vm_cluster_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = exadata_infra.CloudExadataInfrastructure( - name="name_value", - display_name="display_name_value", - gcp_oracle_zone="gcp_oracle_zone_value", - entitlement_id="entitlement_id_value", - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = exadata_infra.CloudExadataInfrastructure.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_cloud_exadata_infrastructure(request) + response = client.delete_exadb_vm_cluster(request) # Establish that the response is the type that we expect. - assert isinstance(response, exadata_infra.CloudExadataInfrastructure) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.gcp_oracle_zone == "gcp_oracle_zone_value" - assert response.entitlement_id == "entitlement_id_value" + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_cloud_exadata_infrastructure_rest_interceptors(null_interceptor): +def test_delete_exadb_vm_cluster_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -19506,19 +48402,20 @@ def test_get_cloud_exadata_infrastructure_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_get_cloud_exadata_infrastructure", + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.OracleDatabaseRestInterceptor, "post_delete_exadb_vm_cluster" ) as post, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_get_cloud_exadata_infrastructure_with_metadata", + "post_delete_exadb_vm_cluster_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_get_cloud_exadata_infrastructure" + transports.OracleDatabaseRestInterceptor, "pre_delete_exadb_vm_cluster" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.GetCloudExadataInfrastructureRequest.pb( - oracledatabase.GetCloudExadataInfrastructureRequest() + pb_message = oracledatabase.DeleteExadbVmClusterRequest.pb( + oracledatabase.DeleteExadbVmClusterRequest() ) transcode.return_value = { "method": "post", @@ -19530,24 +48427,19 @@ def test_get_cloud_exadata_infrastructure_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = exadata_infra.CloudExadataInfrastructure.to_json( - exadata_infra.CloudExadataInfrastructure() - ) + return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = oracledatabase.GetCloudExadataInfrastructureRequest() + request = oracledatabase.DeleteExadbVmClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = exadata_infra.CloudExadataInfrastructure() - post_with_metadata.return_value = ( - exadata_infra.CloudExadataInfrastructure(), - metadata, - ) + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.get_cloud_exadata_infrastructure( + client.delete_exadb_vm_cluster( request, metadata=[ ("key", "val"), @@ -19560,14 +48452,18 @@ def test_get_cloud_exadata_infrastructure_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_create_cloud_exadata_infrastructure_rest_bad_request( - request_type=oracledatabase.CreateCloudExadataInfrastructureRequest, +def test_update_exadb_vm_cluster_rest_bad_request( + request_type=oracledatabase.UpdateExadbVmClusterRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = { + "exadb_vm_cluster": { + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" + } + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -19582,77 +48478,70 @@ def test_create_cloud_exadata_infrastructure_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_cloud_exadata_infrastructure(request) + client.update_exadb_vm_cluster(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.CreateCloudExadataInfrastructureRequest, + oracledatabase.UpdateExadbVmClusterRequest, dict, ], ) -def test_create_cloud_exadata_infrastructure_rest_call_success(request_type): +def test_update_exadb_vm_cluster_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} - request_init["cloud_exadata_infrastructure"] = { - "name": "name_value", - "display_name": "display_name_value", - "gcp_oracle_zone": "gcp_oracle_zone_value", - "entitlement_id": "entitlement_id_value", + request_init = { + "exadb_vm_cluster": { + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" + } + } + request_init["exadb_vm_cluster"] = { + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3", "properties": { - "ocid": "ocid_value", - "compute_count": 1413, - "storage_count": 1405, - "total_storage_size_gb": 2234, - "available_storage_size_gb": 2615, - "maintenance_window": { - "preference": 1, - "months": [1], - "weeks_of_month": [1497, 1498], - "days_of_week": [1], - "hours_of_day": [1283, 1284], - "lead_time_week": 1455, - "patching_mode": 1, - "custom_action_timeout_mins": 2804, - "is_custom_action_timeout_enabled": True, + "cluster_name": "cluster_name_value", + "grid_image_id": "grid_image_id_value", + "node_count": 1070, + "enabled_ecpu_count_per_node": 2826, + "additional_ecpu_count_per_node": 3160, + "vm_file_system_storage": {"size_in_gbs_per_node": 2103}, + "license_model": 1, + "exascale_db_storage_vault": "exascale_db_storage_vault_value", + "hostname_prefix": "hostname_prefix_value", + "hostname": "hostname_value", + "ssh_public_keys": ["ssh_public_keys_value1", "ssh_public_keys_value2"], + "data_collection_options": { + "is_diagnostics_events_enabled": True, + "is_health_monitoring_enabled": True, + "is_incident_logs_enabled": True, }, - "state": 1, - "shape": "shape_value", - "oci_url": "oci_url_value", - "cpu_count": 976, - "max_cpu_count": 1397, + "time_zone": {"id": "id_value", "version": "version_value"}, + "lifecycle_state": 1, + "shape_attribute": 1, "memory_size_gb": 1499, - "max_memory_gb": 1382, - "db_node_storage_size_gb": 2401, - "max_db_node_storage_size_gb": 2822, - "data_storage_size_tb": 0.2109, - "max_data_storage_tb": 0.19920000000000002, - "activated_storage_count": 2449, - "additional_storage_count": 2549, - "db_server_version": "db_server_version_value", - "storage_server_version": "storage_server_version_value", - "next_maintenance_run_id": "next_maintenance_run_id_value", - "next_maintenance_run_time": {"seconds": 751, "nanos": 543}, - "next_security_maintenance_run_time": {}, - "customer_contacts": [{"email": "email_value"}], - "monthly_storage_server_version": "monthly_storage_server_version_value", - "monthly_db_server_version": "monthly_db_server_version_value", + "scan_listener_port_tcp": 2356, + "oci_uri": "oci_uri_value", + "gi_version": "gi_version_value", }, + "gcp_oracle_zone": "gcp_oracle_zone_value", "labels": {}, - "create_time": {}, + "odb_network": "odb_network_value", + "odb_subnet": "odb_subnet_value", + "backup_odb_subnet": "backup_odb_subnet_value", + "display_name": "display_name_value", + "create_time": {"seconds": 751, "nanos": 543}, + "entitlement_id": "entitlement_id_value", } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency # See https://github.com/googleapis/gapic-generator-python/issues/1748 # Determine if the message type is proto-plus or protobuf - test_field = oracledatabase.CreateCloudExadataInfrastructureRequest.meta.fields[ - "cloud_exadata_infrastructure" + test_field = oracledatabase.UpdateExadbVmClusterRequest.meta.fields[ + "exadb_vm_cluster" ] def get_message_fields(field): @@ -19681,9 +48570,7 @@ def get_message_fields(field): # For each item in the sample request, create a list of sub fields which are not present at runtime # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init[ - "cloud_exadata_infrastructure" - ].items(): # pragma: NO COVER + for field, value in request_init["exadb_vm_cluster"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -19713,12 +48600,10 @@ def get_message_fields(field): subfield = subfield_to_delete.get("subfield") if subfield: if field_repeated: - for i in range( - 0, len(request_init["cloud_exadata_infrastructure"][field]) - ): - del request_init["cloud_exadata_infrastructure"][field][i][subfield] + for i in range(0, len(request_init["exadb_vm_cluster"][field])): + del request_init["exadb_vm_cluster"][field][i][subfield] else: - del request_init["cloud_exadata_infrastructure"][field][subfield] + del request_init["exadb_vm_cluster"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -19733,14 +48618,14 @@ def get_message_fields(field): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_cloud_exadata_infrastructure(request) + response = client.update_exadb_vm_cluster(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_cloud_exadata_infrastructure_rest_interceptors(null_interceptor): +def test_update_exadb_vm_cluster_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -19756,20 +48641,18 @@ def test_create_cloud_exadata_infrastructure_rest_interceptors(null_interceptor) ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_create_cloud_exadata_infrastructure", + transports.OracleDatabaseRestInterceptor, "post_update_exadb_vm_cluster" ) as post, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_create_cloud_exadata_infrastructure_with_metadata", + "post_update_exadb_vm_cluster_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "pre_create_cloud_exadata_infrastructure", + transports.OracleDatabaseRestInterceptor, "pre_update_exadb_vm_cluster" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.CreateCloudExadataInfrastructureRequest.pb( - oracledatabase.CreateCloudExadataInfrastructureRequest() + pb_message = oracledatabase.UpdateExadbVmClusterRequest.pb( + oracledatabase.UpdateExadbVmClusterRequest() ) transcode.return_value = { "method": "post", @@ -19784,7 +48667,7 @@ def test_create_cloud_exadata_infrastructure_rest_interceptors(null_interceptor) return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = oracledatabase.CreateCloudExadataInfrastructureRequest() + request = oracledatabase.UpdateExadbVmClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -19793,7 +48676,7 @@ def test_create_cloud_exadata_infrastructure_rest_interceptors(null_interceptor) post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.create_cloud_exadata_infrastructure( + client.update_exadb_vm_cluster( request, metadata=[ ("key", "val"), @@ -19806,15 +48689,15 @@ def test_create_cloud_exadata_infrastructure_rest_interceptors(null_interceptor) post_with_metadata.assert_called_once() -def test_delete_cloud_exadata_infrastructure_rest_bad_request( - request_type=oracledatabase.DeleteCloudExadataInfrastructureRequest, +def test_remove_virtual_machine_exadb_vm_cluster_rest_bad_request( + request_type=oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" } request = request_type(**request_init) @@ -19830,24 +48713,24 @@ def test_delete_cloud_exadata_infrastructure_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_cloud_exadata_infrastructure(request) + client.remove_virtual_machine_exadb_vm_cluster(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.DeleteCloudExadataInfrastructureRequest, + oracledatabase.RemoveVirtualMachineExadbVmClusterRequest, dict, ], ) -def test_delete_cloud_exadata_infrastructure_rest_call_success(request_type): +def test_remove_virtual_machine_exadb_vm_cluster_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" + "name": "projects/sample1/locations/sample2/exadbVmClusters/sample3" } request = request_type(**request_init) @@ -19863,14 +48746,14 @@ def test_delete_cloud_exadata_infrastructure_rest_call_success(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_cloud_exadata_infrastructure(request) + response = client.remove_virtual_machine_exadb_vm_cluster(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_cloud_exadata_infrastructure_rest_interceptors(null_interceptor): +def test_remove_virtual_machine_exadb_vm_cluster_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -19887,19 +48770,19 @@ def test_delete_cloud_exadata_infrastructure_rest_interceptors(null_interceptor) operation.Operation, "_set_result_from_operation" ), mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_delete_cloud_exadata_infrastructure", + "post_remove_virtual_machine_exadb_vm_cluster", ) as post, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_delete_cloud_exadata_infrastructure_with_metadata", + "post_remove_virtual_machine_exadb_vm_cluster_with_metadata", ) as post_with_metadata, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "pre_delete_cloud_exadata_infrastructure", + "pre_remove_virtual_machine_exadb_vm_cluster", ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.DeleteCloudExadataInfrastructureRequest.pb( - oracledatabase.DeleteCloudExadataInfrastructureRequest() + pb_message = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest.pb( + oracledatabase.RemoveVirtualMachineExadbVmClusterRequest() ) transcode.return_value = { "method": "post", @@ -19914,7 +48797,7 @@ def test_delete_cloud_exadata_infrastructure_rest_interceptors(null_interceptor) return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = oracledatabase.DeleteCloudExadataInfrastructureRequest() + request = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -19923,7 +48806,7 @@ def test_delete_cloud_exadata_infrastructure_rest_interceptors(null_interceptor) post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.delete_cloud_exadata_infrastructure( + client.remove_virtual_machine_exadb_vm_cluster( request, metadata=[ ("key", "val"), @@ -19936,8 +48819,8 @@ def test_delete_cloud_exadata_infrastructure_rest_interceptors(null_interceptor) post_with_metadata.assert_called_once() -def test_list_cloud_vm_clusters_rest_bad_request( - request_type=oracledatabase.ListCloudVmClustersRequest, +def test_list_exascale_db_storage_vaults_rest_bad_request( + request_type=exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -19958,17 +48841,17 @@ def test_list_cloud_vm_clusters_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_cloud_vm_clusters(request) + client.list_exascale_db_storage_vaults(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListCloudVmClustersRequest, + exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest, dict, ], ) -def test_list_cloud_vm_clusters_rest_call_success(request_type): +def test_list_exascale_db_storage_vaults_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -19980,7 +48863,7 @@ def test_list_cloud_vm_clusters_rest_call_success(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListCloudVmClustersResponse( + return_value = exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse( next_page_token="next_page_token_value", ) @@ -19989,20 +48872,22 @@ def test_list_cloud_vm_clusters_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = oracledatabase.ListCloudVmClustersResponse.pb(return_value) + return_value = exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_cloud_vm_clusters(request) + response = client.list_exascale_db_storage_vaults(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCloudVmClustersPager) + assert isinstance(response, pagers.ListExascaleDbStorageVaultsPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_cloud_vm_clusters_rest_interceptors(null_interceptor): +def test_list_exascale_db_storage_vaults_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20016,18 +48901,18 @@ def test_list_cloud_vm_clusters_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_cloud_vm_clusters" + transports.OracleDatabaseRestInterceptor, "post_list_exascale_db_storage_vaults" ) as post, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_list_cloud_vm_clusters_with_metadata", + "post_list_exascale_db_storage_vaults_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_list_cloud_vm_clusters" + transports.OracleDatabaseRestInterceptor, "pre_list_exascale_db_storage_vaults" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListCloudVmClustersRequest.pb( - oracledatabase.ListCloudVmClustersRequest() + pb_message = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest.pb( + exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest() ) transcode.return_value = { "method": "post", @@ -20039,24 +48924,28 @@ def test_list_cloud_vm_clusters_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListCloudVmClustersResponse.to_json( - oracledatabase.ListCloudVmClustersResponse() + return_value = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse.to_json( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() + ) ) req.return_value.content = return_value - request = oracledatabase.ListCloudVmClustersRequest() + request = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = oracledatabase.ListCloudVmClustersResponse() + post.return_value = ( + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse() + ) post_with_metadata.return_value = ( - oracledatabase.ListCloudVmClustersResponse(), + exascale_db_storage_vault.ListExascaleDbStorageVaultsResponse(), metadata, ) - client.list_cloud_vm_clusters( + client.list_exascale_db_storage_vaults( request, metadata=[ ("key", "val"), @@ -20069,15 +48958,15 @@ def test_list_cloud_vm_clusters_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_get_cloud_vm_cluster_rest_bad_request( - request_type=oracledatabase.GetCloudVmClusterRequest, +def test_get_exascale_db_storage_vault_rest_bad_request( + request_type=exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + "name": "projects/sample1/locations/sample2/exascaleDbStorageVaults/sample3" } request = request_type(**request_init) @@ -20093,38 +48982,35 @@ def test_get_cloud_vm_cluster_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_cloud_vm_cluster(request) + client.get_exascale_db_storage_vault(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.GetCloudVmClusterRequest, + exascale_db_storage_vault.GetExascaleDbStorageVaultRequest, dict, ], ) -def test_get_cloud_vm_cluster_rest_call_success(request_type): +def test_get_exascale_db_storage_vault_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + "name": "projects/sample1/locations/sample2/exascaleDbStorageVaults/sample3" } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = vm_cluster.CloudVmCluster( + return_value = exascale_db_storage_vault.ExascaleDbStorageVault( name="name_value", - exadata_infrastructure="exadata_infrastructure_value", display_name="display_name_value", gcp_oracle_zone="gcp_oracle_zone_value", - cidr="cidr_value", - backup_subnet_cidr="backup_subnet_cidr_value", - network="network_value", + entitlement_id="entitlement_id_value", ) # Wrap the value into a proper Response obj @@ -20132,26 +49018,23 @@ def test_get_cloud_vm_cluster_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = vm_cluster.CloudVmCluster.pb(return_value) + return_value = exascale_db_storage_vault.ExascaleDbStorageVault.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_cloud_vm_cluster(request) + response = client.get_exascale_db_storage_vault(request) # Establish that the response is the type that we expect. - assert isinstance(response, vm_cluster.CloudVmCluster) + assert isinstance(response, exascale_db_storage_vault.ExascaleDbStorageVault) assert response.name == "name_value" - assert response.exadata_infrastructure == "exadata_infrastructure_value" assert response.display_name == "display_name_value" assert response.gcp_oracle_zone == "gcp_oracle_zone_value" - assert response.cidr == "cidr_value" - assert response.backup_subnet_cidr == "backup_subnet_cidr_value" - assert response.network == "network_value" + assert response.entitlement_id == "entitlement_id_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_cloud_vm_cluster_rest_interceptors(null_interceptor): +def test_get_exascale_db_storage_vault_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20165,18 +49048,18 @@ def test_get_cloud_vm_cluster_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_get_cloud_vm_cluster" + transports.OracleDatabaseRestInterceptor, "post_get_exascale_db_storage_vault" ) as post, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_get_cloud_vm_cluster_with_metadata", + "post_get_exascale_db_storage_vault_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_get_cloud_vm_cluster" + transports.OracleDatabaseRestInterceptor, "pre_get_exascale_db_storage_vault" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.GetCloudVmClusterRequest.pb( - oracledatabase.GetCloudVmClusterRequest() + pb_message = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest.pb( + exascale_db_storage_vault.GetExascaleDbStorageVaultRequest() ) transcode.return_value = { "method": "post", @@ -20188,19 +49071,24 @@ def test_get_cloud_vm_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = vm_cluster.CloudVmCluster.to_json(vm_cluster.CloudVmCluster()) + return_value = exascale_db_storage_vault.ExascaleDbStorageVault.to_json( + exascale_db_storage_vault.ExascaleDbStorageVault() + ) req.return_value.content = return_value - request = oracledatabase.GetCloudVmClusterRequest() + request = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = vm_cluster.CloudVmCluster() - post_with_metadata.return_value = vm_cluster.CloudVmCluster(), metadata + post.return_value = exascale_db_storage_vault.ExascaleDbStorageVault() + post_with_metadata.return_value = ( + exascale_db_storage_vault.ExascaleDbStorageVault(), + metadata, + ) - client.get_cloud_vm_cluster( + client.get_exascale_db_storage_vault( request, metadata=[ ("key", "val"), @@ -20213,8 +49101,8 @@ def test_get_cloud_vm_cluster_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_create_cloud_vm_cluster_rest_bad_request( - request_type=oracledatabase.CreateCloudVmClusterRequest, +def test_create_exascale_db_storage_vault_rest_bad_request( + request_type=gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -20235,80 +49123,57 @@ def test_create_cloud_vm_cluster_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_cloud_vm_cluster(request) + client.create_exascale_db_storage_vault(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.CreateCloudVmClusterRequest, + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest, dict, ], ) -def test_create_cloud_vm_cluster_rest_call_success(request_type): +def test_create_exascale_db_storage_vault_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/locations/sample2"} - request_init["cloud_vm_cluster"] = { + request_init["exascale_db_storage_vault"] = { "name": "name_value", - "exadata_infrastructure": "exadata_infrastructure_value", "display_name": "display_name_value", "gcp_oracle_zone": "gcp_oracle_zone_value", "properties": { "ocid": "ocid_value", - "license_type": 1, - "gi_version": "gi_version_value", "time_zone": {"id": "id_value", "version": "version_value"}, - "ssh_public_keys": ["ssh_public_keys_value1", "ssh_public_keys_value2"], - "node_count": 1070, - "shape": "shape_value", - "ocpu_count": 0.1087, - "memory_size_gb": 1499, - "db_node_storage_size_gb": 2401, - "storage_size_gb": 1591, - "data_storage_size_tb": 0.2109, - "disk_redundancy": 1, - "sparse_diskgroup_enabled": True, - "local_backup_enabled": True, - "hostname_prefix": "hostname_prefix_value", - "diagnostics_data_collection_options": { - "diagnostics_events_enabled": True, - "health_monitoring_enabled": True, - "incident_logs_enabled": True, + "exascale_db_storage_details": { + "available_size_gbs": 1878, + "total_size_gbs": 1497, }, "state": 1, - "scan_listener_port_tcp": 2356, - "scan_listener_port_tcp_ssl": 2789, - "domain": "domain_value", - "scan_dns": "scan_dns_value", - "hostname": "hostname_value", - "cpu_core_count": 1496, - "system_version": "system_version_value", - "scan_ip_ids": ["scan_ip_ids_value1", "scan_ip_ids_value2"], - "scan_dns_record_id": "scan_dns_record_id_value", - "oci_url": "oci_url_value", - "db_server_ocids": ["db_server_ocids_value1", "db_server_ocids_value2"], - "compartment_id": "compartment_id_value", - "dns_listener_ip": "dns_listener_ip_value", - "cluster_name": "cluster_name_value", + "description": "description_value", + "vm_cluster_ids": ["vm_cluster_ids_value1", "vm_cluster_ids_value2"], + "vm_cluster_count": 1740, + "additional_flash_cache_percent": 3113, + "oci_uri": "oci_uri_value", + "attached_shape_attributes": [1], + "available_shape_attributes": [1], }, - "labels": {}, "create_time": {"seconds": 751, "nanos": 543}, - "cidr": "cidr_value", - "backup_subnet_cidr": "backup_subnet_cidr_value", - "network": "network_value", + "entitlement_id": "entitlement_id_value", + "labels": {}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency # See https://github.com/googleapis/gapic-generator-python/issues/1748 # Determine if the message type is proto-plus or protobuf - test_field = oracledatabase.CreateCloudVmClusterRequest.meta.fields[ - "cloud_vm_cluster" - ] + test_field = ( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest.meta.fields[ + "exascale_db_storage_vault" + ] + ) def get_message_fields(field): # Given a field which is a message (composite type), return a list with @@ -20336,7 +49201,9 @@ def get_message_fields(field): # For each item in the sample request, create a list of sub fields which are not present at runtime # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["cloud_vm_cluster"].items(): # pragma: NO COVER + for field, value in request_init[ + "exascale_db_storage_vault" + ].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -20366,10 +49233,12 @@ def get_message_fields(field): subfield = subfield_to_delete.get("subfield") if subfield: if field_repeated: - for i in range(0, len(request_init["cloud_vm_cluster"][field])): - del request_init["cloud_vm_cluster"][field][i][subfield] + for i in range( + 0, len(request_init["exascale_db_storage_vault"][field]) + ): + del request_init["exascale_db_storage_vault"][field][i][subfield] else: - del request_init["cloud_vm_cluster"][field][subfield] + del request_init["exascale_db_storage_vault"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -20384,14 +49253,14 @@ def get_message_fields(field): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_cloud_vm_cluster(request) + response = client.create_exascale_db_storage_vault(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_cloud_vm_cluster_rest_interceptors(null_interceptor): +def test_create_exascale_db_storage_vault_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20407,18 +49276,21 @@ def test_create_cloud_vm_cluster_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_create_cloud_vm_cluster" + transports.OracleDatabaseRestInterceptor, + "post_create_exascale_db_storage_vault", ) as post, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_create_cloud_vm_cluster_with_metadata", + "post_create_exascale_db_storage_vault_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_create_cloud_vm_cluster" + transports.OracleDatabaseRestInterceptor, "pre_create_exascale_db_storage_vault" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.CreateCloudVmClusterRequest.pb( - oracledatabase.CreateCloudVmClusterRequest() + pb_message = ( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest.pb( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest() + ) ) transcode.return_value = { "method": "post", @@ -20433,7 +49305,7 @@ def test_create_cloud_vm_cluster_rest_interceptors(null_interceptor): return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = oracledatabase.CreateCloudVmClusterRequest() + request = gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -20442,7 +49314,7 @@ def test_create_cloud_vm_cluster_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.create_cloud_vm_cluster( + client.create_exascale_db_storage_vault( request, metadata=[ ("key", "val"), @@ -20455,15 +49327,15 @@ def test_create_cloud_vm_cluster_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_delete_cloud_vm_cluster_rest_bad_request( - request_type=oracledatabase.DeleteCloudVmClusterRequest, +def test_delete_exascale_db_storage_vault_rest_bad_request( + request_type=exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + "name": "projects/sample1/locations/sample2/exascaleDbStorageVaults/sample3" } request = request_type(**request_init) @@ -20479,24 +49351,24 @@ def test_delete_cloud_vm_cluster_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_cloud_vm_cluster(request) + client.delete_exascale_db_storage_vault(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.DeleteCloudVmClusterRequest, + exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest, dict, ], ) -def test_delete_cloud_vm_cluster_rest_call_success(request_type): +def test_delete_exascale_db_storage_vault_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/cloudVmClusters/sample3" + "name": "projects/sample1/locations/sample2/exascaleDbStorageVaults/sample3" } request = request_type(**request_init) @@ -20512,14 +49384,14 @@ def test_delete_cloud_vm_cluster_rest_call_success(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_cloud_vm_cluster(request) + response = client.delete_exascale_db_storage_vault(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_cloud_vm_cluster_rest_interceptors(null_interceptor): +def test_delete_exascale_db_storage_vault_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20535,18 +49407,19 @@ def test_delete_cloud_vm_cluster_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_delete_cloud_vm_cluster" + transports.OracleDatabaseRestInterceptor, + "post_delete_exascale_db_storage_vault", ) as post, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_delete_cloud_vm_cluster_with_metadata", + "post_delete_exascale_db_storage_vault_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_delete_cloud_vm_cluster" + transports.OracleDatabaseRestInterceptor, "pre_delete_exascale_db_storage_vault" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.DeleteCloudVmClusterRequest.pb( - oracledatabase.DeleteCloudVmClusterRequest() + pb_message = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest.pb( + exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest() ) transcode.return_value = { "method": "post", @@ -20561,7 +49434,7 @@ def test_delete_cloud_vm_cluster_rest_interceptors(null_interceptor): return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = oracledatabase.DeleteCloudVmClusterRequest() + request = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -20570,7 +49443,7 @@ def test_delete_cloud_vm_cluster_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.delete_cloud_vm_cluster( + client.delete_exascale_db_storage_vault( request, metadata=[ ("key", "val"), @@ -20583,8 +49456,8 @@ def test_delete_cloud_vm_cluster_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_list_entitlements_rest_bad_request( - request_type=oracledatabase.ListEntitlementsRequest, +def test_list_db_system_initial_storage_sizes_rest_bad_request( + request_type=db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -20605,17 +49478,17 @@ def test_list_entitlements_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_entitlements(request) + client.list_db_system_initial_storage_sizes(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListEntitlementsRequest, + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest, dict, ], ) -def test_list_entitlements_rest_call_success(request_type): +def test_list_db_system_initial_storage_sizes_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -20627,8 +49500,10 @@ def test_list_entitlements_rest_call_success(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListEntitlementsResponse( - next_page_token="next_page_token_value", + return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse( + next_page_token="next_page_token_value", + ) ) # Wrap the value into a proper Response obj @@ -20636,20 +49511,24 @@ def test_list_entitlements_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = oracledatabase.ListEntitlementsResponse.pb(return_value) + return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse.pb( + return_value + ) + ) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_entitlements(request) + response = client.list_db_system_initial_storage_sizes(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEntitlementsPager) + assert isinstance(response, pagers.ListDbSystemInitialStorageSizesPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_entitlements_rest_interceptors(null_interceptor): +def test_list_db_system_initial_storage_sizes_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20663,17 +49542,22 @@ def test_list_entitlements_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_entitlements" + transports.OracleDatabaseRestInterceptor, + "post_list_db_system_initial_storage_sizes", ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_entitlements_with_metadata" + transports.OracleDatabaseRestInterceptor, + "post_list_db_system_initial_storage_sizes_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_list_entitlements" + transports.OracleDatabaseRestInterceptor, + "pre_list_db_system_initial_storage_sizes", ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListEntitlementsRequest.pb( - oracledatabase.ListEntitlementsRequest() + pb_message = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest.pb( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest() + ) ) transcode.return_value = { "method": "post", @@ -20685,24 +49569,28 @@ def test_list_entitlements_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListEntitlementsResponse.to_json( - oracledatabase.ListEntitlementsResponse() + return_value = db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse.to_json( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() ) req.return_value.content = return_value - request = oracledatabase.ListEntitlementsRequest() + request = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest() + ) metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = oracledatabase.ListEntitlementsResponse() + post.return_value = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse() + ) post_with_metadata.return_value = ( - oracledatabase.ListEntitlementsResponse(), + db_system_initial_storage_size.ListDbSystemInitialStorageSizesResponse(), metadata, ) - client.list_entitlements( + client.list_db_system_initial_storage_sizes( request, metadata=[ ("key", "val"), @@ -20715,16 +49603,12 @@ def test_list_entitlements_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_list_db_servers_rest_bad_request( - request_type=oracledatabase.ListDbServersRequest, -): +def test_list_databases_rest_bad_request(request_type=database.ListDatabasesRequest): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "parent": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -20739,31 +49623,29 @@ def test_list_db_servers_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_db_servers(request) + client.list_databases(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListDbServersRequest, + database.ListDatabasesRequest, dict, ], ) -def test_list_db_servers_rest_call_success(request_type): +def test_list_databases_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "parent": "projects/sample1/locations/sample2/cloudExadataInfrastructures/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListDbServersResponse( + return_value = database.ListDatabasesResponse( next_page_token="next_page_token_value", ) @@ -20772,20 +49654,20 @@ def test_list_db_servers_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = oracledatabase.ListDbServersResponse.pb(return_value) + return_value = database.ListDatabasesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_db_servers(request) + response = client.list_databases(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDbServersPager) + assert isinstance(response, pagers.ListDatabasesPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_db_servers_rest_interceptors(null_interceptor): +def test_list_databases_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20799,18 +49681,16 @@ def test_list_db_servers_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_db_servers" + transports.OracleDatabaseRestInterceptor, "post_list_databases" ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_db_servers_with_metadata" + transports.OracleDatabaseRestInterceptor, "post_list_databases_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_list_db_servers" + transports.OracleDatabaseRestInterceptor, "pre_list_databases" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListDbServersRequest.pb( - oracledatabase.ListDbServersRequest() - ) + pb_message = database.ListDatabasesRequest.pb(database.ListDatabasesRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -20821,24 +49701,21 @@ def test_list_db_servers_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListDbServersResponse.to_json( - oracledatabase.ListDbServersResponse() + return_value = database.ListDatabasesResponse.to_json( + database.ListDatabasesResponse() ) req.return_value.content = return_value - request = oracledatabase.ListDbServersRequest() + request = database.ListDatabasesRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = oracledatabase.ListDbServersResponse() - post_with_metadata.return_value = ( - oracledatabase.ListDbServersResponse(), - metadata, - ) + post.return_value = database.ListDatabasesResponse() + post_with_metadata.return_value = database.ListDatabasesResponse(), metadata - client.list_db_servers( + client.list_databases( request, metadata=[ ("key", "val"), @@ -20851,14 +49728,12 @@ def test_list_db_servers_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_list_db_nodes_rest_bad_request(request_type=oracledatabase.ListDbNodesRequest): +def test_get_database_rest_bad_request(request_type=database.GetDatabaseRequest): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "parent": "projects/sample1/locations/sample2/cloudVmClusters/sample3" - } + request_init = {"name": "projects/sample1/locations/sample2/databases/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -20873,32 +49748,41 @@ def test_list_db_nodes_rest_bad_request(request_type=oracledatabase.ListDbNodesR response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_db_nodes(request) + client.get_database(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListDbNodesRequest, + database.GetDatabaseRequest, dict, ], ) -def test_list_db_nodes_rest_call_success(request_type): +def test_get_database_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "parent": "projects/sample1/locations/sample2/cloudVmClusters/sample3" - } + request_init = {"name": "projects/sample1/locations/sample2/databases/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListDbNodesResponse( - next_page_token="next_page_token_value", + return_value = database.Database( + name="name_value", + db_name="db_name_value", + db_unique_name="db_unique_name_value", + admin_password="admin_password_value", + tde_wallet_password="tde_wallet_password_value", + character_set="character_set_value", + ncharacter_set="ncharacter_set_value", + oci_url="oci_url_value", + database_id="database_id_value", + db_home_name="db_home_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + ops_insights_status=database.Database.OperationsInsightsStatus.ENABLING, ) # Wrap the value into a proper Response obj @@ -20906,20 +49790,34 @@ def test_list_db_nodes_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = oracledatabase.ListDbNodesResponse.pb(return_value) + return_value = database.Database.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_db_nodes(request) + response = client.get_database(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDbNodesPager) - assert response.next_page_token == "next_page_token_value" + assert isinstance(response, database.Database) + assert response.name == "name_value" + assert response.db_name == "db_name_value" + assert response.db_unique_name == "db_unique_name_value" + assert response.admin_password == "admin_password_value" + assert response.tde_wallet_password == "tde_wallet_password_value" + assert response.character_set == "character_set_value" + assert response.ncharacter_set == "ncharacter_set_value" + assert response.oci_url == "oci_url_value" + assert response.database_id == "database_id_value" + assert response.db_home_name == "db_home_name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert ( + response.ops_insights_status + == database.Database.OperationsInsightsStatus.ENABLING + ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_db_nodes_rest_interceptors(null_interceptor): +def test_get_database_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20933,18 +49831,16 @@ def test_list_db_nodes_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_db_nodes" + transports.OracleDatabaseRestInterceptor, "post_get_database" ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_db_nodes_with_metadata" + transports.OracleDatabaseRestInterceptor, "post_get_database_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_list_db_nodes" + transports.OracleDatabaseRestInterceptor, "pre_get_database" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListDbNodesRequest.pb( - oracledatabase.ListDbNodesRequest() - ) + pb_message = database.GetDatabaseRequest.pb(database.GetDatabaseRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -20955,21 +49851,19 @@ def test_list_db_nodes_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListDbNodesResponse.to_json( - oracledatabase.ListDbNodesResponse() - ) + return_value = database.Database.to_json(database.Database()) req.return_value.content = return_value - request = oracledatabase.ListDbNodesRequest() + request = database.GetDatabaseRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = oracledatabase.ListDbNodesResponse() - post_with_metadata.return_value = oracledatabase.ListDbNodesResponse(), metadata + post.return_value = database.Database() + post_with_metadata.return_value = database.Database(), metadata - client.list_db_nodes( + client.get_database( request, metadata=[ ("key", "val"), @@ -20982,8 +49876,8 @@ def test_list_db_nodes_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_list_gi_versions_rest_bad_request( - request_type=oracledatabase.ListGiVersionsRequest, +def test_list_pluggable_databases_rest_bad_request( + request_type=pluggable_database.ListPluggableDatabasesRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -21004,17 +49898,17 @@ def test_list_gi_versions_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_gi_versions(request) + client.list_pluggable_databases(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListGiVersionsRequest, + pluggable_database.ListPluggableDatabasesRequest, dict, ], ) -def test_list_gi_versions_rest_call_success(request_type): +def test_list_pluggable_databases_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -21026,7 +49920,7 @@ def test_list_gi_versions_rest_call_success(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListGiVersionsResponse( + return_value = pluggable_database.ListPluggableDatabasesResponse( next_page_token="next_page_token_value", ) @@ -21035,20 +49929,22 @@ def test_list_gi_versions_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = oracledatabase.ListGiVersionsResponse.pb(return_value) + return_value = pluggable_database.ListPluggableDatabasesResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_gi_versions(request) + response = client.list_pluggable_databases(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListGiVersionsPager) + assert isinstance(response, pagers.ListPluggableDatabasesPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_gi_versions_rest_interceptors(null_interceptor): +def test_list_pluggable_databases_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21062,17 +49958,18 @@ def test_list_gi_versions_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_gi_versions" + transports.OracleDatabaseRestInterceptor, "post_list_pluggable_databases" ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_gi_versions_with_metadata" + transports.OracleDatabaseRestInterceptor, + "post_list_pluggable_databases_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_list_gi_versions" + transports.OracleDatabaseRestInterceptor, "pre_list_pluggable_databases" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListGiVersionsRequest.pb( - oracledatabase.ListGiVersionsRequest() + pb_message = pluggable_database.ListPluggableDatabasesRequest.pb( + pluggable_database.ListPluggableDatabasesRequest() ) transcode.return_value = { "method": "post", @@ -21084,24 +49981,24 @@ def test_list_gi_versions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListGiVersionsResponse.to_json( - oracledatabase.ListGiVersionsResponse() + return_value = pluggable_database.ListPluggableDatabasesResponse.to_json( + pluggable_database.ListPluggableDatabasesResponse() ) req.return_value.content = return_value - request = oracledatabase.ListGiVersionsRequest() + request = pluggable_database.ListPluggableDatabasesRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = oracledatabase.ListGiVersionsResponse() + post.return_value = pluggable_database.ListPluggableDatabasesResponse() post_with_metadata.return_value = ( - oracledatabase.ListGiVersionsResponse(), + pluggable_database.ListPluggableDatabasesResponse(), metadata, ) - client.list_gi_versions( + client.list_pluggable_databases( request, metadata=[ ("key", "val"), @@ -21114,14 +50011,16 @@ def test_list_gi_versions_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_list_db_system_shapes_rest_bad_request( - request_type=oracledatabase.ListDbSystemShapesRequest, +def test_get_pluggable_database_rest_bad_request( + request_type=pluggable_database.GetPluggableDatabaseRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = { + "name": "projects/sample1/locations/sample2/pluggableDatabases/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -21136,30 +50035,33 @@ def test_list_db_system_shapes_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_db_system_shapes(request) + client.get_pluggable_database(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListDbSystemShapesRequest, + pluggable_database.GetPluggableDatabaseRequest, dict, ], ) -def test_list_db_system_shapes_rest_call_success(request_type): +def test_get_pluggable_database_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = { + "name": "projects/sample1/locations/sample2/pluggableDatabases/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListDbSystemShapesResponse( - next_page_token="next_page_token_value", + return_value = pluggable_database.PluggableDatabase( + name="name_value", + oci_url="oci_url_value", ) # Wrap the value into a proper Response obj @@ -21167,20 +50069,21 @@ def test_list_db_system_shapes_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = oracledatabase.ListDbSystemShapesResponse.pb(return_value) + return_value = pluggable_database.PluggableDatabase.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_db_system_shapes(request) + response = client.get_pluggable_database(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDbSystemShapesPager) - assert response.next_page_token == "next_page_token_value" + assert isinstance(response, pluggable_database.PluggableDatabase) + assert response.name == "name_value" + assert response.oci_url == "oci_url_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_db_system_shapes_rest_interceptors(null_interceptor): +def test_get_pluggable_database_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21194,18 +50097,18 @@ def test_list_db_system_shapes_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_db_system_shapes" + transports.OracleDatabaseRestInterceptor, "post_get_pluggable_database" ) as post, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_list_db_system_shapes_with_metadata", + "post_get_pluggable_database_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_list_db_system_shapes" + transports.OracleDatabaseRestInterceptor, "pre_get_pluggable_database" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListDbSystemShapesRequest.pb( - oracledatabase.ListDbSystemShapesRequest() + pb_message = pluggable_database.GetPluggableDatabaseRequest.pb( + pluggable_database.GetPluggableDatabaseRequest() ) transcode.return_value = { "method": "post", @@ -21217,24 +50120,24 @@ def test_list_db_system_shapes_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListDbSystemShapesResponse.to_json( - oracledatabase.ListDbSystemShapesResponse() + return_value = pluggable_database.PluggableDatabase.to_json( + pluggable_database.PluggableDatabase() ) req.return_value.content = return_value - request = oracledatabase.ListDbSystemShapesRequest() + request = pluggable_database.GetPluggableDatabaseRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = oracledatabase.ListDbSystemShapesResponse() + post.return_value = pluggable_database.PluggableDatabase() post_with_metadata.return_value = ( - oracledatabase.ListDbSystemShapesResponse(), + pluggable_database.PluggableDatabase(), metadata, ) - client.list_db_system_shapes( + client.get_pluggable_database( request, metadata=[ ("key", "val"), @@ -21247,9 +50150,7 @@ def test_list_db_system_shapes_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_list_autonomous_databases_rest_bad_request( - request_type=oracledatabase.ListAutonomousDatabasesRequest, -): +def test_list_db_systems_rest_bad_request(request_type=db_system.ListDbSystemsRequest): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -21269,17 +50170,17 @@ def test_list_autonomous_databases_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_autonomous_databases(request) + client.list_db_systems(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListAutonomousDatabasesRequest, + db_system.ListDbSystemsRequest, dict, ], ) -def test_list_autonomous_databases_rest_call_success(request_type): +def test_list_db_systems_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -21291,7 +50192,7 @@ def test_list_autonomous_databases_rest_call_success(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDatabasesResponse( + return_value = db_system.ListDbSystemsResponse( next_page_token="next_page_token_value", ) @@ -21300,20 +50201,20 @@ def test_list_autonomous_databases_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDatabasesResponse.pb(return_value) + return_value = db_system.ListDbSystemsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_autonomous_databases(request) + response = client.list_db_systems(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAutonomousDatabasesPager) + assert isinstance(response, pagers.ListDbSystemsPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_autonomous_databases_rest_interceptors(null_interceptor): +def test_list_db_systems_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21327,19 +50228,16 @@ def test_list_autonomous_databases_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_autonomous_databases" + transports.OracleDatabaseRestInterceptor, "post_list_db_systems" ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_list_autonomous_databases_with_metadata", + transports.OracleDatabaseRestInterceptor, "post_list_db_systems_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_list_autonomous_databases" + transports.OracleDatabaseRestInterceptor, "pre_list_db_systems" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListAutonomousDatabasesRequest.pb( - oracledatabase.ListAutonomousDatabasesRequest() - ) + pb_message = db_system.ListDbSystemsRequest.pb(db_system.ListDbSystemsRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -21350,24 +50248,21 @@ def test_list_autonomous_databases_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListAutonomousDatabasesResponse.to_json( - oracledatabase.ListAutonomousDatabasesResponse() + return_value = db_system.ListDbSystemsResponse.to_json( + db_system.ListDbSystemsResponse() ) req.return_value.content = return_value - request = oracledatabase.ListAutonomousDatabasesRequest() + request = db_system.ListDbSystemsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = oracledatabase.ListAutonomousDatabasesResponse() - post_with_metadata.return_value = ( - oracledatabase.ListAutonomousDatabasesResponse(), - metadata, - ) + post.return_value = db_system.ListDbSystemsResponse() + post_with_metadata.return_value = db_system.ListDbSystemsResponse(), metadata - client.list_autonomous_databases( + client.list_db_systems( request, metadata=[ ("key", "val"), @@ -21380,16 +50275,12 @@ def test_list_autonomous_databases_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_get_autonomous_database_rest_bad_request( - request_type=oracledatabase.GetAutonomousDatabaseRequest, -): +def test_get_db_system_rest_bad_request(request_type=db_system.GetDbSystemRequest): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + request_init = {"name": "projects/sample1/locations/sample2/dbSystems/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -21404,38 +50295,36 @@ def test_get_autonomous_database_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_autonomous_database(request) + client.get_db_system(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.GetAutonomousDatabaseRequest, + db_system.GetDbSystemRequest, dict, ], ) -def test_get_autonomous_database_rest_call_success(request_type): +def test_get_db_system_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + request_init = {"name": "projects/sample1/locations/sample2/dbSystems/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = autonomous_database.AutonomousDatabase( + return_value = db_system.DbSystem( name="name_value", - database="database_value", - display_name="display_name_value", + gcp_oracle_zone="gcp_oracle_zone_value", + odb_network="odb_network_value", + odb_subnet="odb_subnet_value", entitlement_id="entitlement_id_value", - admin_password="admin_password_value", - network="network_value", - cidr="cidr_value", + display_name="display_name_value", + oci_url="oci_url_value", ) # Wrap the value into a proper Response obj @@ -21443,26 +50332,26 @@ def test_get_autonomous_database_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = autonomous_database.AutonomousDatabase.pb(return_value) + return_value = db_system.DbSystem.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_autonomous_database(request) + response = client.get_db_system(request) # Establish that the response is the type that we expect. - assert isinstance(response, autonomous_database.AutonomousDatabase) + assert isinstance(response, db_system.DbSystem) assert response.name == "name_value" - assert response.database == "database_value" - assert response.display_name == "display_name_value" + assert response.gcp_oracle_zone == "gcp_oracle_zone_value" + assert response.odb_network == "odb_network_value" + assert response.odb_subnet == "odb_subnet_value" assert response.entitlement_id == "entitlement_id_value" - assert response.admin_password == "admin_password_value" - assert response.network == "network_value" - assert response.cidr == "cidr_value" + assert response.display_name == "display_name_value" + assert response.oci_url == "oci_url_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_autonomous_database_rest_interceptors(null_interceptor): +def test_get_db_system_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21476,19 +50365,16 @@ def test_get_autonomous_database_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_get_autonomous_database" + transports.OracleDatabaseRestInterceptor, "post_get_db_system" ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_get_autonomous_database_with_metadata", + transports.OracleDatabaseRestInterceptor, "post_get_db_system_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_get_autonomous_database" + transports.OracleDatabaseRestInterceptor, "pre_get_db_system" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.GetAutonomousDatabaseRequest.pb( - oracledatabase.GetAutonomousDatabaseRequest() - ) + pb_message = db_system.GetDbSystemRequest.pb(db_system.GetDbSystemRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -21499,24 +50385,19 @@ def test_get_autonomous_database_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = autonomous_database.AutonomousDatabase.to_json( - autonomous_database.AutonomousDatabase() - ) + return_value = db_system.DbSystem.to_json(db_system.DbSystem()) req.return_value.content = return_value - request = oracledatabase.GetAutonomousDatabaseRequest() + request = db_system.GetDbSystemRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = autonomous_database.AutonomousDatabase() - post_with_metadata.return_value = ( - autonomous_database.AutonomousDatabase(), - metadata, - ) + post.return_value = db_system.DbSystem() + post_with_metadata.return_value = db_system.DbSystem(), metadata - client.get_autonomous_database( + client.get_db_system( request, metadata=[ ("key", "val"), @@ -21529,8 +50410,8 @@ def test_get_autonomous_database_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_create_autonomous_database_rest_bad_request( - request_type=oracledatabase.CreateAutonomousDatabaseRequest, +def test_create_db_system_rest_bad_request( + request_type=gco_db_system.CreateDbSystemRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -21551,159 +50432,102 @@ def test_create_autonomous_database_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_autonomous_database(request) + client.create_db_system(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.CreateAutonomousDatabaseRequest, + gco_db_system.CreateDbSystemRequest, dict, ], ) -def test_create_autonomous_database_rest_call_success(request_type): +def test_create_db_system_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/locations/sample2"} - request_init["autonomous_database"] = { + request_init["db_system"] = { "name": "name_value", - "database": "database_value", - "display_name": "display_name_value", - "entitlement_id": "entitlement_id_value", - "admin_password": "admin_password_value", "properties": { - "ocid": "ocid_value", - "compute_count": 0.1413, - "cpu_core_count": 1496, - "data_storage_size_tb": 2109, - "data_storage_size_gb": 2096, - "db_workload": 1, - "db_edition": 1, - "character_set": "character_set_value", - "n_character_set": "n_character_set_value", - "private_endpoint_ip": "private_endpoint_ip_value", - "private_endpoint_label": "private_endpoint_label_value", - "db_version": "db_version_value", - "is_auto_scaling_enabled": True, - "is_storage_auto_scaling_enabled": True, - "license_type": 1, - "customer_contacts": [{"email": "email_value"}], - "secret_id": "secret_id_value", - "vault_id": "vault_id_value", - "maintenance_schedule_type": 1, - "mtls_connection_required": True, - "backup_retention_period_days": 2975, - "actual_used_data_storage_size_tb": 0.3366, - "allocated_storage_size_tb": 0.2636, - "apex_details": { - "apex_version": "apex_version_value", - "ords_version": "ords_version_value", + "shape": "shape_value", + "compute_count": 1413, + "initial_data_storage_size_gb": 2937, + "database_edition": 1, + "license_model": 1, + "ssh_public_keys": ["ssh_public_keys_value1", "ssh_public_keys_value2"], + "hostname_prefix": "hostname_prefix_value", + "hostname": "hostname_value", + "private_ip": "private_ip_value", + "data_collection_options": { + "is_diagnostics_events_enabled": True, + "is_incident_logs_enabled": True, }, - "are_primary_allowlisted_ips_used": True, - "lifecycle_details": "lifecycle_details_value", - "state": 1, - "autonomous_container_database_id": "autonomous_container_database_id_value", - "available_upgrade_versions": [ - "available_upgrade_versions_value1", - "available_upgrade_versions_value2", - ], - "connection_strings": { - "all_connection_strings": { - "high": "high_value", - "low": "low_value", - "medium": "medium_value", + "time_zone": {"id": "id_value", "version": "version_value"}, + "lifecycle_state": 1, + "db_home": { + "display_name": "display_name_value", + "db_version": "db_version_value", + "database": { + "name": "name_value", + "db_name": "db_name_value", + "db_unique_name": "db_unique_name_value", + "admin_password": "admin_password_value", + "tde_wallet_password": "tde_wallet_password_value", + "character_set": "character_set_value", + "ncharacter_set": "ncharacter_set_value", + "oci_url": "oci_url_value", + "create_time": {"seconds": 751, "nanos": 543}, + "properties": { + "state": 1, + "db_version": "db_version_value", + "db_backup_config": { + "auto_backup_enabled": True, + "backup_destination_details": [{"type_": 1}], + "retention_period_days": 2250, + "backup_deletion_policy": 1, + "auto_full_backup_day": 1, + "auto_full_backup_window": 1, + "auto_incremental_backup_window": 1, + }, + "database_management_config": { + "management_state": 1, + "management_type": 1, + }, + }, + "database_id": "database_id_value", + "db_home_name": "db_home_name_value", + "gcp_oracle_zone": "gcp_oracle_zone_value", + "ops_insights_status": 1, }, - "dedicated": "dedicated_value", - "high": "high_value", - "low": "low_value", - "medium": "medium_value", - "profiles": [ - { - "consumer_group": 1, - "display_name": "display_name_value", - "host_format": 1, - "is_regional": True, - "protocol": 1, - "session_mode": 1, - "syntax_format": 1, - "tls_authentication": 1, - "value": "value_value", - } - ], - }, - "connection_urls": { - "apex_uri": "apex_uri_value", - "database_transforms_uri": "database_transforms_uri_value", - "graph_studio_uri": "graph_studio_uri_value", - "machine_learning_notebook_uri": "machine_learning_notebook_uri_value", - "machine_learning_user_management_uri": "machine_learning_user_management_uri_value", - "mongo_db_uri": "mongo_db_uri_value", - "ords_uri": "ords_uri_value", - "sql_dev_web_uri": "sql_dev_web_uri_value", + "is_unified_auditing_enabled": True, }, - "failed_data_recovery_duration": {"seconds": 751, "nanos": 543}, - "memory_table_gbs": 1691, - "is_local_data_guard_enabled": True, - "local_adg_auto_failover_max_data_loss_limit": 4513, - "local_standby_db": { - "lag_time_duration": {}, - "lifecycle_details": "lifecycle_details_value", - "state": 1, - "data_guard_role_changed_time": {"seconds": 751, "nanos": 543}, - "disaster_recovery_role_changed_time": {}, - }, - "memory_per_oracle_compute_unit_gbs": 3626, - "local_disaster_recovery_type": 1, - "data_safe_state": 1, - "database_management_state": 1, - "open_mode": 1, - "operations_insights_state": 1, - "peer_db_ids": ["peer_db_ids_value1", "peer_db_ids_value2"], - "permission_level": 1, - "private_endpoint": "private_endpoint_value", - "refreshable_mode": 1, - "refreshable_state": 1, - "role": 1, - "scheduled_operation_details": [ - { - "day_of_week": 1, - "start_time": { - "hours": 561, - "minutes": 773, - "seconds": 751, - "nanos": 543, - }, - "stop_time": {}, - } - ], - "sql_web_developer_url": "sql_web_developer_url_value", - "supported_clone_regions": [ - "supported_clone_regions_value1", - "supported_clone_regions_value2", - ], - "used_data_storage_size_tbs": 2752, - "oci_url": "oci_url_value", - "total_auto_backup_storage_size_gbs": 0.36100000000000004, - "next_long_term_backup_time": {}, - "maintenance_begin_time": {}, - "maintenance_end_time": {}, + "ocid": "ocid_value", + "memory_size_gb": 1499, + "compute_model": 1, + "data_storage_size_gb": 2096, + "reco_storage_size_gb": 2111, + "domain": "domain_value", + "node_count": 1070, + "db_system_options": {"storage_management": 1}, }, + "gcp_oracle_zone": "gcp_oracle_zone_value", "labels": {}, - "network": "network_value", - "cidr": "cidr_value", + "odb_network": "odb_network_value", + "odb_subnet": "odb_subnet_value", + "entitlement_id": "entitlement_id_value", + "display_name": "display_name_value", "create_time": {}, + "oci_url": "oci_url_value", } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency # See https://github.com/googleapis/gapic-generator-python/issues/1748 # Determine if the message type is proto-plus or protobuf - test_field = oracledatabase.CreateAutonomousDatabaseRequest.meta.fields[ - "autonomous_database" - ] + test_field = gco_db_system.CreateDbSystemRequest.meta.fields["db_system"] def get_message_fields(field): # Given a field which is a message (composite type), return a list with @@ -21731,7 +50555,7 @@ def get_message_fields(field): # For each item in the sample request, create a list of sub fields which are not present at runtime # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["autonomous_database"].items(): # pragma: NO COVER + for field, value in request_init["db_system"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -21761,10 +50585,10 @@ def get_message_fields(field): subfield = subfield_to_delete.get("subfield") if subfield: if field_repeated: - for i in range(0, len(request_init["autonomous_database"][field])): - del request_init["autonomous_database"][field][i][subfield] + for i in range(0, len(request_init["db_system"][field])): + del request_init["db_system"][field][i][subfield] else: - del request_init["autonomous_database"][field][subfield] + del request_init["db_system"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -21779,14 +50603,14 @@ def get_message_fields(field): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_autonomous_database(request) + response = client.create_db_system(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_autonomous_database_rest_interceptors(null_interceptor): +def test_create_db_system_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21802,18 +50626,17 @@ def test_create_autonomous_database_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_create_autonomous_database" + transports.OracleDatabaseRestInterceptor, "post_create_db_system" ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_create_autonomous_database_with_metadata", + transports.OracleDatabaseRestInterceptor, "post_create_db_system_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_create_autonomous_database" + transports.OracleDatabaseRestInterceptor, "pre_create_db_system" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.CreateAutonomousDatabaseRequest.pb( - oracledatabase.CreateAutonomousDatabaseRequest() + pb_message = gco_db_system.CreateDbSystemRequest.pb( + gco_db_system.CreateDbSystemRequest() ) transcode.return_value = { "method": "post", @@ -21828,7 +50651,7 @@ def test_create_autonomous_database_rest_interceptors(null_interceptor): return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = oracledatabase.CreateAutonomousDatabaseRequest() + request = gco_db_system.CreateDbSystemRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -21837,7 +50660,7 @@ def test_create_autonomous_database_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.create_autonomous_database( + client.create_db_system( request, metadata=[ ("key", "val"), @@ -21850,16 +50673,14 @@ def test_create_autonomous_database_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_delete_autonomous_database_rest_bad_request( - request_type=oracledatabase.DeleteAutonomousDatabaseRequest, +def test_delete_db_system_rest_bad_request( + request_type=db_system.DeleteDbSystemRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + request_init = {"name": "projects/sample1/locations/sample2/dbSystems/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -21874,25 +50695,23 @@ def test_delete_autonomous_database_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_autonomous_database(request) + client.delete_db_system(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.DeleteAutonomousDatabaseRequest, + db_system.DeleteDbSystemRequest, dict, ], ) -def test_delete_autonomous_database_rest_call_success(request_type): +def test_delete_db_system_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + request_init = {"name": "projects/sample1/locations/sample2/dbSystems/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -21907,14 +50726,14 @@ def test_delete_autonomous_database_rest_call_success(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_autonomous_database(request) + response = client.delete_db_system(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_autonomous_database_rest_interceptors(null_interceptor): +def test_delete_db_system_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21930,18 +50749,17 @@ def test_delete_autonomous_database_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_delete_autonomous_database" + transports.OracleDatabaseRestInterceptor, "post_delete_db_system" ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_delete_autonomous_database_with_metadata", + transports.OracleDatabaseRestInterceptor, "post_delete_db_system_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_delete_autonomous_database" + transports.OracleDatabaseRestInterceptor, "pre_delete_db_system" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.DeleteAutonomousDatabaseRequest.pb( - oracledatabase.DeleteAutonomousDatabaseRequest() + pb_message = db_system.DeleteDbSystemRequest.pb( + db_system.DeleteDbSystemRequest() ) transcode.return_value = { "method": "post", @@ -21956,7 +50774,7 @@ def test_delete_autonomous_database_rest_interceptors(null_interceptor): return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = oracledatabase.DeleteAutonomousDatabaseRequest() + request = db_system.DeleteDbSystemRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -21965,7 +50783,7 @@ def test_delete_autonomous_database_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.delete_autonomous_database( + client.delete_db_system( request, metadata=[ ("key", "val"), @@ -21978,16 +50796,14 @@ def test_delete_autonomous_database_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_restore_autonomous_database_rest_bad_request( - request_type=oracledatabase.RestoreAutonomousDatabaseRequest, +def test_list_db_versions_rest_bad_request( + request_type=db_version.ListDbVersionsRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -22002,47 +50818,51 @@ def test_restore_autonomous_database_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.restore_autonomous_database(request) + client.list_db_versions(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.RestoreAutonomousDatabaseRequest, + db_version.ListDbVersionsRequest, dict, ], ) -def test_restore_autonomous_database_rest_call_success(request_type): +def test_list_db_versions_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = db_version.ListDbVersionsResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = db_version.ListDbVersionsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.restore_autonomous_database(request) + response = client.list_db_versions(request) # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert isinstance(response, pagers.ListDbVersionsPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_restore_autonomous_database_rest_interceptors(null_interceptor): +def test_list_db_versions_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -22056,20 +50876,17 @@ def test_restore_autonomous_database_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_restore_autonomous_database" + transports.OracleDatabaseRestInterceptor, "post_list_db_versions" ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_restore_autonomous_database_with_metadata", + transports.OracleDatabaseRestInterceptor, "post_list_db_versions_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_restore_autonomous_database" + transports.OracleDatabaseRestInterceptor, "pre_list_db_versions" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.RestoreAutonomousDatabaseRequest.pb( - oracledatabase.RestoreAutonomousDatabaseRequest() + pb_message = db_version.ListDbVersionsRequest.pb( + db_version.ListDbVersionsRequest() ) transcode.return_value = { "method": "post", @@ -22081,19 +50898,21 @@ def test_restore_autonomous_database_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) + return_value = db_version.ListDbVersionsResponse.to_json( + db_version.ListDbVersionsResponse() + ) req.return_value.content = return_value - request = oracledatabase.RestoreAutonomousDatabaseRequest() + request = db_version.ListDbVersionsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata + post.return_value = db_version.ListDbVersionsResponse() + post_with_metadata.return_value = db_version.ListDbVersionsResponse(), metadata - client.restore_autonomous_database( + client.list_db_versions( request, metadata=[ ("key", "val"), @@ -22106,16 +50925,14 @@ def test_restore_autonomous_database_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_generate_autonomous_database_wallet_rest_bad_request( - request_type=oracledatabase.GenerateAutonomousDatabaseWalletRequest, +def test_list_database_character_sets_rest_bad_request( + request_type=database_character_set.ListDatabaseCharacterSetsRequest, ): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -22130,32 +50947,30 @@ def test_generate_autonomous_database_wallet_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.generate_autonomous_database_wallet(request) + client.list_database_character_sets(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.GenerateAutonomousDatabaseWalletRequest, + database_character_set.ListDatabaseCharacterSetsRequest, dict, ], ) -def test_generate_autonomous_database_wallet_rest_call_success(request_type): +def test_list_database_character_sets_rest_call_success(request_type): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse( - archive_content=b"archive_content_blob", + return_value = database_character_set.ListDatabaseCharacterSetsResponse( + next_page_token="next_page_token_value", ) # Wrap the value into a proper Response obj @@ -22163,22 +50978,22 @@ def test_generate_autonomous_database_wallet_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse.pb( + return_value = database_character_set.ListDatabaseCharacterSetsResponse.pb( return_value ) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.generate_autonomous_database_wallet(request) + response = client.list_database_character_sets(request) # Establish that the response is the type that we expect. - assert isinstance(response, oracledatabase.GenerateAutonomousDatabaseWalletResponse) - assert response.archive_content == b"archive_content_blob" + assert isinstance(response, pagers.ListDatabaseCharacterSetsPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_autonomous_database_wallet_rest_interceptors(null_interceptor): +def test_list_database_character_sets_rest_interceptors(null_interceptor): transport = transports.OracleDatabaseRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -22192,20 +51007,18 @@ def test_generate_autonomous_database_wallet_rest_interceptors(null_interceptor) ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_generate_autonomous_database_wallet", + transports.OracleDatabaseRestInterceptor, "post_list_database_character_sets" ) as post, mock.patch.object( transports.OracleDatabaseRestInterceptor, - "post_generate_autonomous_database_wallet_with_metadata", + "post_list_database_character_sets_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "pre_generate_autonomous_database_wallet", + transports.OracleDatabaseRestInterceptor, "pre_list_database_character_sets" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = oracledatabase.GenerateAutonomousDatabaseWalletRequest.pb( - oracledatabase.GenerateAutonomousDatabaseWalletRequest() + pb_message = database_character_set.ListDatabaseCharacterSetsRequest.pb( + database_character_set.ListDatabaseCharacterSetsRequest() ) transcode.return_value = { "method": "post", @@ -22217,24 +51030,24 @@ def test_generate_autonomous_database_wallet_rest_interceptors(null_interceptor) req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse.to_json( - oracledatabase.GenerateAutonomousDatabaseWalletResponse() + return_value = database_character_set.ListDatabaseCharacterSetsResponse.to_json( + database_character_set.ListDatabaseCharacterSetsResponse() ) req.return_value.content = return_value - request = oracledatabase.GenerateAutonomousDatabaseWalletRequest() + request = database_character_set.ListDatabaseCharacterSetsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = oracledatabase.GenerateAutonomousDatabaseWalletResponse() + post.return_value = database_character_set.ListDatabaseCharacterSetsResponse() post_with_metadata.return_value = ( - oracledatabase.GenerateAutonomousDatabaseWalletResponse(), + database_character_set.ListDatabaseCharacterSetsResponse(), metadata, ) - client.generate_autonomous_database_wallet( + client.list_database_character_sets( request, metadata=[ ("key", "val"), @@ -22247,1176 +51060,1080 @@ def test_generate_autonomous_database_wallet_rest_interceptors(null_interceptor) post_with_metadata.assert_called_once() -def test_list_autonomous_db_versions_rest_bad_request( - request_type=oracledatabase.ListAutonomousDbVersionsRequest, -): +def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} - request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. with mock.patch.object(Session, "request") as req, pytest.raises( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = mock.Mock() + response_value = Response() json_return_value = "" response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = mock.Mock() + response_value.request = Request() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_autonomous_db_versions(request) + client.get_location(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListAutonomousDbVersionsRequest, + locations_pb2.GetLocationRequest, dict, ], ) -def test_list_autonomous_db_versions_rest_call_success(request_type): +def test_get_location_rest(request_type): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = {"name": "projects/sample1/locations/sample2"} request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: + with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDbVersionsResponse( - next_page_token="next_page_token_value", - ) + return_value = locations_pb2.Location() # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDbVersionsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_autonomous_db_versions(request) + + response = client.get_location(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAutonomousDbVersionsPager) - assert response.next_page_token == "next_page_token_value" + assert isinstance(response, locations_pb2.Location) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_autonomous_db_versions_rest_interceptors(null_interceptor): - transport = transports.OracleDatabaseRestTransport( +def test_list_locations_rest_bad_request( + request_type=locations_pb2.ListLocationsRequest, +): + client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.OracleDatabaseRestInterceptor(), + transport="rest", ) - client = OracleDatabaseClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_list_autonomous_db_versions" - ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_list_autonomous_db_versions_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_list_autonomous_db_versions" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListAutonomousDbVersionsRequest.pb( - oracledatabase.ListAutonomousDbVersionsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) - req.return_value = mock.Mock() - req.return_value.status_code = 200 + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListAutonomousDbVersionsResponse.to_json( - oracledatabase.ListAutonomousDbVersionsResponse() - ) - req.return_value.content = return_value + client.list_locations(request) - request = oracledatabase.ListAutonomousDbVersionsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = oracledatabase.ListAutonomousDbVersionsResponse() - post_with_metadata.return_value = ( - oracledatabase.ListAutonomousDbVersionsResponse(), - metadata, - ) - client.list_autonomous_db_versions( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_locations(request) + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) -def test_list_autonomous_database_character_sets_rest_bad_request( - request_type=oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, + +def test_cancel_operation_rest_bad_request( + request_type=operations_pb2.CancelOperationRequest, ): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} - request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. with mock.patch.object(Session, "request") as req, pytest.raises( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = mock.Mock() + response_value = Response() json_return_value = "" response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = mock.Mock() + response_value.request = Request() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_autonomous_database_character_sets(request) + client.cancel_operation(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListAutonomousDatabaseCharacterSetsRequest, + operations_pb2.CancelOperationRequest, dict, ], ) -def test_list_autonomous_database_character_sets_rest_call_success(request_type): +def test_cancel_operation_rest(request_type): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: + with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse( - next_page_token="next_page_token_value", - ) + return_value = None # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "{}" response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_autonomous_database_character_sets(request) + + response = client.cancel_operation(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAutonomousDatabaseCharacterSetsPager) - assert response.next_page_token == "next_page_token_value" + assert response is None -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_autonomous_database_character_sets_rest_interceptors(null_interceptor): - transport = transports.OracleDatabaseRestTransport( +def test_delete_operation_rest_bad_request( + request_type=operations_pb2.DeleteOperationRequest, +): + client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.OracleDatabaseRestInterceptor(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request ) - client = OracleDatabaseClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_list_autonomous_database_character_sets", - ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_list_autonomous_database_character_sets_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "pre_list_autonomous_database_character_sets", - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest.pb( - oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = mock.Mock() - req.return_value.status_code = 200 + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = ( - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse.to_json( - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() - ) - ) - req.return_value.content = return_value + client.delete_operation(request) - request = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = oracledatabase.ListAutonomousDatabaseCharacterSetsResponse() - post_with_metadata.return_value = ( - oracledatabase.ListAutonomousDatabaseCharacterSetsResponse(), - metadata, - ) - client.list_autonomous_database_character_sets( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "{}" + response_value.content = json_return_value.encode("UTF-8") + + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_operation(request) -def test_list_autonomous_database_backups_rest_bad_request( - request_type=oracledatabase.ListAutonomousDatabaseBackupsRequest, + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + request_type=operations_pb2.GetOperationRequest, ): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} - request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. with mock.patch.object(Session, "request") as req, pytest.raises( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = mock.Mock() + response_value = Response() json_return_value = "" response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = mock.Mock() + response_value.request = Request() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_autonomous_database_backups(request) + client.get_operation(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.ListAutonomousDatabaseBackupsRequest, + operations_pb2.GetOperationRequest, dict, ], ) -def test_list_autonomous_database_backups_rest_call_success(request_type): +def test_get_operation_rest(request_type): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: + with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. - return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse( - next_page_token="next_page_token_value", - ) + return_value = operations_pb2.Operation() # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse.pb( - return_value - ) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_autonomous_database_backups(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAutonomousDatabaseBackupsPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_autonomous_database_backups_rest_interceptors(null_interceptor): - transport = transports.OracleDatabaseRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.OracleDatabaseRestInterceptor(), - ) - client = OracleDatabaseClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_list_autonomous_database_backups", - ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_list_autonomous_database_backups_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_list_autonomous_database_backups" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = oracledatabase.ListAutonomousDatabaseBackupsRequest.pb( - oracledatabase.ListAutonomousDatabaseBackupsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = mock.Mock() - req.return_value.status_code = 200 + req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse.to_json( - oracledatabase.ListAutonomousDatabaseBackupsResponse() - ) - req.return_value.content = return_value - - request = oracledatabase.ListAutonomousDatabaseBackupsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = oracledatabase.ListAutonomousDatabaseBackupsResponse() - post_with_metadata.return_value = ( - oracledatabase.ListAutonomousDatabaseBackupsResponse(), - metadata, - ) - client.list_autonomous_database_backups( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) + response = client.get_operation(request) - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) -def test_stop_autonomous_database_rest_bad_request( - request_type=oracledatabase.StopAutonomousDatabaseRequest, +def test_list_operations_rest_bad_request( + request_type=operations_pb2.ListOperationsRequest, ): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } - request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. with mock.patch.object(Session, "request") as req, pytest.raises( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = mock.Mock() + response_value = Response() json_return_value = "" response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = mock.Mock() + response_value.request = Request() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.stop_autonomous_database(request) + client.list_operations(request) @pytest.mark.parametrize( "request_type", [ - oracledatabase.StopAutonomousDatabaseRequest, + operations_pb2.ListOperationsRequest, dict, ], ) -def test_stop_autonomous_database_rest_call_success(request_type): +def test_list_operations_rest(request_type): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } + request_init = {"name": "projects/sample1/locations/sample2"} request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: + with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = operations_pb2.ListOperationsResponse() # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.stop_autonomous_database(request) - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_initialize_client_w_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_cloud_exadata_infrastructures_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_cloud_exadata_infrastructures), "__call__" + ) as call: + client.list_cloud_exadata_infrastructures(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListCloudExadataInfrastructuresRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cloud_exadata_infrastructure_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_cloud_exadata_infrastructure), "__call__" + ) as call: + client.get_cloud_exadata_infrastructure(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetCloudExadataInfrastructureRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cloud_exadata_infrastructure_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_cloud_exadata_infrastructure), "__call__" + ) as call: + client.create_cloud_exadata_infrastructure(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateCloudExadataInfrastructureRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cloud_exadata_infrastructure_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_cloud_exadata_infrastructure), "__call__" + ) as call: + client.delete_cloud_exadata_infrastructure(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteCloudExadataInfrastructureRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_cloud_vm_clusters_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_cloud_vm_clusters), "__call__" + ) as call: + client.list_cloud_vm_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListCloudVmClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cloud_vm_cluster_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_cloud_vm_cluster), "__call__" + ) as call: + client.get_cloud_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetCloudVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cloud_vm_cluster_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_cloud_vm_cluster), "__call__" + ) as call: + client.create_cloud_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateCloudVmClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cloud_vm_cluster_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_cloud_vm_cluster), "__call__" + ) as call: + client.delete_cloud_vm_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteCloudVmClusterRequest() + + assert args[0] == request_msg -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_stop_autonomous_database_rest_interceptors(null_interceptor): - transport = transports.OracleDatabaseRestTransport( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_entitlements_empty_call_rest(): + client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.OracleDatabaseRestInterceptor(), + transport="rest", ) - client = OracleDatabaseClient(transport=transport) + # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_stop_autonomous_database" - ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_stop_autonomous_database_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_stop_autonomous_database" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = oracledatabase.StopAutonomousDatabaseRequest.pb( - oracledatabase.StopAutonomousDatabaseRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + type(client.transport.list_entitlements), "__call__" + ) as call: + client.list_entitlements(request=None) - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListEntitlementsRequest() - request = oracledatabase.StopAutonomousDatabaseRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata + assert args[0] == request_msg - client.stop_autonomous_database( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_db_servers_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_servers), "__call__") as call: + client.list_db_servers(request=None) -def test_start_autonomous_database_rest_bad_request( - request_type=oracledatabase.StartAutonomousDatabaseRequest, -): + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListDbServersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_db_nodes_empty_call_rest(): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } - request = request_type(**request_init) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.start_autonomous_database(request) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_nodes), "__call__") as call: + client.list_db_nodes(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListDbNodesRequest() -@pytest.mark.parametrize( - "request_type", - [ - oracledatabase.StartAutonomousDatabaseRequest, - dict, - ], -) -def test_start_autonomous_database_rest_call_success(request_type): + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_gi_versions_empty_call_rest(): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_gi_versions), "__call__") as call: + client.list_gi_versions(request=None) - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.start_autonomous_database(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListGiVersionsRequest() - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert args[0] == request_msg -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_start_autonomous_database_rest_interceptors(null_interceptor): - transport = transports.OracleDatabaseRestTransport( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_minor_versions_empty_call_rest(): + client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.OracleDatabaseRestInterceptor(), + transport="rest", ) - client = OracleDatabaseClient(transport=transport) + # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_start_autonomous_database" - ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_start_autonomous_database_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_start_autonomous_database" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = oracledatabase.StartAutonomousDatabaseRequest.pb( - oracledatabase.StartAutonomousDatabaseRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + type(client.transport.list_minor_versions), "__call__" + ) as call: + client.list_minor_versions(request=None) - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = minor_version.ListMinorVersionsRequest() - request = oracledatabase.StartAutonomousDatabaseRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata + assert args[0] == request_msg - client.start_autonomous_database( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_db_system_shapes_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_db_system_shapes), "__call__" + ) as call: + client.list_db_system_shapes(request=None) -def test_restart_autonomous_database_rest_bad_request( - request_type=oracledatabase.RestartAutonomousDatabaseRequest, -): + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListDbSystemShapesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_autonomous_databases_empty_call_rest(): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } - request = request_type(**request_init) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.restart_autonomous_database(request) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_databases), "__call__" + ) as call: + client.list_autonomous_databases(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDatabasesRequest() -@pytest.mark.parametrize( - "request_type", - [ - oracledatabase.RestartAutonomousDatabaseRequest, - dict, - ], -) -def test_restart_autonomous_database_rest_call_success(request_type): + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_autonomous_database_empty_call_rest(): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/autonomousDatabases/sample3" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_autonomous_database), "__call__" + ) as call: + client.get_autonomous_database(request=None) - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.restart_autonomous_database(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GetAutonomousDatabaseRequest() - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert args[0] == request_msg -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_restart_autonomous_database_rest_interceptors(null_interceptor): - transport = transports.OracleDatabaseRestTransport( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_autonomous_database_empty_call_rest(): + client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.OracleDatabaseRestInterceptor(), + transport="rest", ) - client = OracleDatabaseClient(transport=transport) + # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.OracleDatabaseRestInterceptor, "post_restart_autonomous_database" - ) as post, mock.patch.object( - transports.OracleDatabaseRestInterceptor, - "post_restart_autonomous_database_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.OracleDatabaseRestInterceptor, "pre_restart_autonomous_database" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = oracledatabase.RestartAutonomousDatabaseRequest.pb( - oracledatabase.RestartAutonomousDatabaseRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + type(client.transport.create_autonomous_database), "__call__" + ) as call: + client.create_autonomous_database(request=None) - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.CreateAutonomousDatabaseRequest() - request = oracledatabase.RestartAutonomousDatabaseRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata + assert args[0] == request_msg - client.restart_autonomous_database( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_autonomous_database_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_autonomous_database), "__call__" + ) as call: + client.update_autonomous_database(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.UpdateAutonomousDatabaseRequest() -def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest): + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_autonomous_database_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type() - request = json_format.ParseDict( - {"name": "projects/sample1/locations/sample2"}, request - ) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_location(request) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_autonomous_database), "__call__" + ) as call: + client.delete_autonomous_database(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.DeleteAutonomousDatabaseRequest() -@pytest.mark.parametrize( - "request_type", - [ - locations_pb2.GetLocationRequest, - dict, - ], -) -def test_get_location_rest(request_type): + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_restore_autonomous_database_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request_init = {"name": "projects/sample1/locations/sample2"} - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # Designate an appropriate value for the returned response. - return_value = locations_pb2.Location() + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.restore_autonomous_database), "__call__" + ) as call: + client.restore_autonomous_database(request=None) - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.RestoreAutonomousDatabaseRequest() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + assert args[0] == request_msg - response = client.get_location(request) - # Establish that the response is the type that we expect. - assert isinstance(response, locations_pb2.Location) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_autonomous_database_wallet_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_autonomous_database_wallet), "__call__" + ) as call: + client.generate_autonomous_database_wallet(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.GenerateAutonomousDatabaseWalletRequest() + + assert args[0] == request_msg -def test_list_locations_rest_bad_request( - request_type=locations_pb2.ListLocationsRequest, -): +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_autonomous_db_versions_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type() - request = json_format.ParseDict({"name": "projects/sample1"}, request) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_locations(request) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_db_versions), "__call__" + ) as call: + client.list_autonomous_db_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDbVersionsRequest() + assert args[0] == request_msg -@pytest.mark.parametrize( - "request_type", - [ - locations_pb2.ListLocationsRequest, - dict, - ], -) -def test_list_locations_rest(request_type): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_autonomous_database_character_sets_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request_init = {"name": "projects/sample1"} - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # Designate an appropriate value for the returned response. - return_value = locations_pb2.ListLocationsResponse() - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_database_character_sets), "__call__" + ) as call: + client.list_autonomous_database_character_sets(request=None) - response = client.list_locations(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() - # Establish that the response is the type that we expect. - assert isinstance(response, locations_pb2.ListLocationsResponse) + assert args[0] == request_msg -def test_cancel_operation_rest_bad_request( - request_type=operations_pb2.CancelOperationRequest, -): +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_autonomous_database_backups_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type() - request = json_format.ParseDict( - {"name": "projects/sample1/locations/sample2/operations/sample3"}, request - ) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.cancel_operation(request) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_autonomous_database_backups), "__call__" + ) as call: + client.list_autonomous_database_backups(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.ListAutonomousDatabaseBackupsRequest() -@pytest.mark.parametrize( - "request_type", - [ - operations_pb2.CancelOperationRequest, - dict, - ], -) -def test_cancel_operation_rest(request_type): + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_stop_autonomous_database_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "{}" - response_value.content = json_return_value.encode("UTF-8") - - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.stop_autonomous_database), "__call__" + ) as call: + client.stop_autonomous_database(request=None) - response = client.cancel_operation(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.StopAutonomousDatabaseRequest() - # Establish that the response is the type that we expect. - assert response is None + assert args[0] == request_msg -def test_delete_operation_rest_bad_request( - request_type=operations_pb2.DeleteOperationRequest, -): +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_start_autonomous_database_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type() - request = json_format.ParseDict( - {"name": "projects/sample1/locations/sample2/operations/sample3"}, request - ) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_operation(request) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.start_autonomous_database), "__call__" + ) as call: + client.start_autonomous_database(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.StartAutonomousDatabaseRequest() -@pytest.mark.parametrize( - "request_type", - [ - operations_pb2.DeleteOperationRequest, - dict, - ], -) -def test_delete_operation_rest(request_type): + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_restart_autonomous_database_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "{}" - response_value.content = json_return_value.encode("UTF-8") - - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.restart_autonomous_database), "__call__" + ) as call: + client.restart_autonomous_database(request=None) - response = client.delete_operation(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.RestartAutonomousDatabaseRequest() - # Establish that the response is the type that we expect. - assert response is None + assert args[0] == request_msg -def test_get_operation_rest_bad_request( - request_type=operations_pb2.GetOperationRequest, -): +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_switchover_autonomous_database_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type() - request = json_format.ParseDict( - {"name": "projects/sample1/locations/sample2/operations/sample3"}, request - ) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_operation(request) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.switchover_autonomous_database), "__call__" + ) as call: + client.switchover_autonomous_database(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.SwitchoverAutonomousDatabaseRequest() -@pytest.mark.parametrize( - "request_type", - [ - operations_pb2.GetOperationRequest, - dict, - ], -) -def test_get_operation_rest(request_type): + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_failover_autonomous_database_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation() - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.failover_autonomous_database), "__call__" + ) as call: + client.failover_autonomous_database(request=None) - response = client.get_operation(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = oracledatabase.FailoverAutonomousDatabaseRequest() - # Establish that the response is the type that we expect. - assert isinstance(response, operations_pb2.Operation) + assert args[0] == request_msg -def test_list_operations_rest_bad_request( - request_type=operations_pb2.ListOperationsRequest, -): +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_odb_networks_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type() - request = json_format.ParseDict( - {"name": "projects/sample1/locations/sample2"}, request - ) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_operations(request) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_odb_networks), "__call__" + ) as call: + client.list_odb_networks(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_network.ListOdbNetworksRequest() -@pytest.mark.parametrize( - "request_type", - [ - operations_pb2.ListOperationsRequest, - dict, - ], -) -def test_list_operations_rest(request_type): + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_odb_network_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request_init = {"name": "projects/sample1/locations/sample2"} - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.ListOperationsResponse() - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_odb_network), "__call__") as call: + client.get_odb_network(request=None) - response = client.list_operations(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = odb_network.GetOdbNetworkRequest() - # Establish that the response is the type that we expect. - assert isinstance(response, operations_pb2.ListOperationsResponse) + assert args[0] == request_msg -def test_initialize_client_w_rest(): +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_odb_network_empty_call_rest(): client = OracleDatabaseClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - assert client is not None + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_odb_network), "__call__" + ) as call: + client.create_odb_network(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = gco_odb_network.CreateOdbNetworkRequest() + + assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_cloud_exadata_infrastructures_empty_call_rest(): +def test_delete_odb_network_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23424,65 +52141,61 @@ def test_list_cloud_exadata_infrastructures_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.list_cloud_exadata_infrastructures), "__call__" + type(client.transport.delete_odb_network), "__call__" ) as call: - client.list_cloud_exadata_infrastructures(request=None) + client.delete_odb_network(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListCloudExadataInfrastructuresRequest() + request_msg = odb_network.DeleteOdbNetworkRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_get_cloud_exadata_infrastructure_empty_call_rest(): +def test_list_odb_subnets_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_cloud_exadata_infrastructure), "__call__" - ) as call: - client.get_cloud_exadata_infrastructure(request=None) + with mock.patch.object(type(client.transport.list_odb_subnets), "__call__") as call: + client.list_odb_subnets(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GetCloudExadataInfrastructureRequest() + request_msg = odb_subnet.ListOdbSubnetsRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_create_cloud_exadata_infrastructure_empty_call_rest(): +def test_get_odb_subnet_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_cloud_exadata_infrastructure), "__call__" - ) as call: - client.create_cloud_exadata_infrastructure(request=None) + with mock.patch.object(type(client.transport.get_odb_subnet), "__call__") as call: + client.get_odb_subnet(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.CreateCloudExadataInfrastructureRequest() + request_msg = odb_subnet.GetOdbSubnetRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_delete_cloud_exadata_infrastructure_empty_call_rest(): +def test_create_odb_subnet_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23490,21 +52203,21 @@ def test_delete_cloud_exadata_infrastructure_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.delete_cloud_exadata_infrastructure), "__call__" + type(client.transport.create_odb_subnet), "__call__" ) as call: - client.delete_cloud_exadata_infrastructure(request=None) + client.create_odb_subnet(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.DeleteCloudExadataInfrastructureRequest() + request_msg = gco_odb_subnet.CreateOdbSubnetRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_cloud_vm_clusters_empty_call_rest(): +def test_delete_odb_subnet_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23512,21 +52225,21 @@ def test_list_cloud_vm_clusters_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.list_cloud_vm_clusters), "__call__" + type(client.transport.delete_odb_subnet), "__call__" ) as call: - client.list_cloud_vm_clusters(request=None) + client.delete_odb_subnet(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListCloudVmClustersRequest() + request_msg = odb_subnet.DeleteOdbSubnetRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_get_cloud_vm_cluster_empty_call_rest(): +def test_list_exadb_vm_clusters_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23534,21 +52247,21 @@ def test_get_cloud_vm_cluster_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.get_cloud_vm_cluster), "__call__" + type(client.transport.list_exadb_vm_clusters), "__call__" ) as call: - client.get_cloud_vm_cluster(request=None) + client.list_exadb_vm_clusters(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GetCloudVmClusterRequest() + request_msg = oracledatabase.ListExadbVmClustersRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_create_cloud_vm_cluster_empty_call_rest(): +def test_get_exadb_vm_cluster_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23556,21 +52269,21 @@ def test_create_cloud_vm_cluster_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.create_cloud_vm_cluster), "__call__" + type(client.transport.get_exadb_vm_cluster), "__call__" ) as call: - client.create_cloud_vm_cluster(request=None) + client.get_exadb_vm_cluster(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.CreateCloudVmClusterRequest() + request_msg = oracledatabase.GetExadbVmClusterRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_delete_cloud_vm_cluster_empty_call_rest(): +def test_create_exadb_vm_cluster_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23578,21 +52291,21 @@ def test_delete_cloud_vm_cluster_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.delete_cloud_vm_cluster), "__call__" + type(client.transport.create_exadb_vm_cluster), "__call__" ) as call: - client.delete_cloud_vm_cluster(request=None) + client.create_exadb_vm_cluster(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.DeleteCloudVmClusterRequest() + request_msg = oracledatabase.CreateExadbVmClusterRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_entitlements_empty_call_rest(): +def test_delete_exadb_vm_cluster_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23600,81 +52313,87 @@ def test_list_entitlements_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.list_entitlements), "__call__" + type(client.transport.delete_exadb_vm_cluster), "__call__" ) as call: - client.list_entitlements(request=None) + client.delete_exadb_vm_cluster(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListEntitlementsRequest() + request_msg = oracledatabase.DeleteExadbVmClusterRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_db_servers_empty_call_rest(): +def test_update_exadb_vm_cluster_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_db_servers), "__call__") as call: - client.list_db_servers(request=None) + with mock.patch.object( + type(client.transport.update_exadb_vm_cluster), "__call__" + ) as call: + client.update_exadb_vm_cluster(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListDbServersRequest() + request_msg = oracledatabase.UpdateExadbVmClusterRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_db_nodes_empty_call_rest(): +def test_remove_virtual_machine_exadb_vm_cluster_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_db_nodes), "__call__") as call: - client.list_db_nodes(request=None) + with mock.patch.object( + type(client.transport.remove_virtual_machine_exadb_vm_cluster), "__call__" + ) as call: + client.remove_virtual_machine_exadb_vm_cluster(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListDbNodesRequest() + request_msg = oracledatabase.RemoveVirtualMachineExadbVmClusterRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_gi_versions_empty_call_rest(): +def test_list_exascale_db_storage_vaults_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_gi_versions), "__call__") as call: - client.list_gi_versions(request=None) + with mock.patch.object( + type(client.transport.list_exascale_db_storage_vaults), "__call__" + ) as call: + client.list_exascale_db_storage_vaults(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListGiVersionsRequest() + request_msg = exascale_db_storage_vault.ListExascaleDbStorageVaultsRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_db_system_shapes_empty_call_rest(): +def test_get_exascale_db_storage_vault_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23682,21 +52401,21 @@ def test_list_db_system_shapes_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.list_db_system_shapes), "__call__" + type(client.transport.get_exascale_db_storage_vault), "__call__" ) as call: - client.list_db_system_shapes(request=None) + client.get_exascale_db_storage_vault(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListDbSystemShapesRequest() + request_msg = exascale_db_storage_vault.GetExascaleDbStorageVaultRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_autonomous_databases_empty_call_rest(): +def test_create_exascale_db_storage_vault_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23704,21 +52423,23 @@ def test_list_autonomous_databases_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.list_autonomous_databases), "__call__" + type(client.transport.create_exascale_db_storage_vault), "__call__" ) as call: - client.list_autonomous_databases(request=None) + client.create_exascale_db_storage_vault(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDatabasesRequest() + request_msg = ( + gco_exascale_db_storage_vault.CreateExascaleDbStorageVaultRequest() + ) assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_get_autonomous_database_empty_call_rest(): +def test_delete_exascale_db_storage_vault_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23726,21 +52447,21 @@ def test_get_autonomous_database_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.get_autonomous_database), "__call__" + type(client.transport.delete_exascale_db_storage_vault), "__call__" ) as call: - client.get_autonomous_database(request=None) + client.delete_exascale_db_storage_vault(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GetAutonomousDatabaseRequest() + request_msg = exascale_db_storage_vault.DeleteExascaleDbStorageVaultRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_create_autonomous_database_empty_call_rest(): +def test_list_db_system_initial_storage_sizes_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23748,65 +52469,63 @@ def test_create_autonomous_database_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.create_autonomous_database), "__call__" + type(client.transport.list_db_system_initial_storage_sizes), "__call__" ) as call: - client.create_autonomous_database(request=None) + client.list_db_system_initial_storage_sizes(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.CreateAutonomousDatabaseRequest() + request_msg = ( + db_system_initial_storage_size.ListDbSystemInitialStorageSizesRequest() + ) assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_delete_autonomous_database_empty_call_rest(): +def test_list_databases_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_autonomous_database), "__call__" - ) as call: - client.delete_autonomous_database(request=None) + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + client.list_databases(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.DeleteAutonomousDatabaseRequest() + request_msg = database.ListDatabasesRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_restore_autonomous_database_empty_call_rest(): +def test_get_database_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.restore_autonomous_database), "__call__" - ) as call: - client.restore_autonomous_database(request=None) + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + client.get_database(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.RestoreAutonomousDatabaseRequest() + request_msg = database.GetDatabaseRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_generate_autonomous_database_wallet_empty_call_rest(): +def test_list_pluggable_databases_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23814,21 +52533,21 @@ def test_generate_autonomous_database_wallet_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.generate_autonomous_database_wallet), "__call__" + type(client.transport.list_pluggable_databases), "__call__" ) as call: - client.generate_autonomous_database_wallet(request=None) + client.list_pluggable_databases(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.GenerateAutonomousDatabaseWalletRequest() + request_msg = pluggable_database.ListPluggableDatabasesRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_autonomous_db_versions_empty_call_rest(): +def test_get_pluggable_database_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23836,109 +52555,121 @@ def test_list_autonomous_db_versions_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.list_autonomous_db_versions), "__call__" + type(client.transport.get_pluggable_database), "__call__" ) as call: - client.list_autonomous_db_versions(request=None) + client.get_pluggable_database(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDbVersionsRequest() + request_msg = pluggable_database.GetPluggableDatabaseRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_autonomous_database_character_sets_empty_call_rest(): +def test_list_db_systems_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_autonomous_database_character_sets), "__call__" - ) as call: - client.list_autonomous_database_character_sets(request=None) + with mock.patch.object(type(client.transport.list_db_systems), "__call__") as call: + client.list_db_systems(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDatabaseCharacterSetsRequest() + request_msg = db_system.ListDbSystemsRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_list_autonomous_database_backups_empty_call_rest(): +def test_get_db_system_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_autonomous_database_backups), "__call__" - ) as call: - client.list_autonomous_database_backups(request=None) + with mock.patch.object(type(client.transport.get_db_system), "__call__") as call: + client.get_db_system(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.ListAutonomousDatabaseBackupsRequest() + request_msg = db_system.GetDbSystemRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_stop_autonomous_database_empty_call_rest(): +def test_create_db_system_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.stop_autonomous_database), "__call__" - ) as call: - client.stop_autonomous_database(request=None) + with mock.patch.object(type(client.transport.create_db_system), "__call__") as call: + client.create_db_system(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.StopAutonomousDatabaseRequest() + request_msg = gco_db_system.CreateDbSystemRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_start_autonomous_database_empty_call_rest(): +def test_delete_db_system_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.start_autonomous_database), "__call__" - ) as call: - client.start_autonomous_database(request=None) + with mock.patch.object(type(client.transport.delete_db_system), "__call__") as call: + client.delete_db_system(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.StartAutonomousDatabaseRequest() + request_msg = db_system.DeleteDbSystemRequest() assert args[0] == request_msg # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_restart_autonomous_database_empty_call_rest(): +def test_list_db_versions_empty_call_rest(): + client = OracleDatabaseClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_db_versions), "__call__") as call: + client.list_db_versions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = db_version.ListDbVersionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_database_character_sets_empty_call_rest(): client = OracleDatabaseClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -23946,14 +52677,14 @@ def test_restart_autonomous_database_empty_call_rest(): # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.restart_autonomous_database), "__call__" + type(client.transport.list_database_character_sets), "__call__" ) as call: - client.restart_autonomous_database(request=None) + client.list_database_character_sets(request=None) # Establish that the underlying stub method was called. call.assert_called() _, args, _ = call.mock_calls[0] - request_msg = oracledatabase.RestartAutonomousDatabaseRequest() + request_msg = database_character_set.ListDatabaseCharacterSetsRequest() assert args[0] == request_msg @@ -24020,10 +52751,12 @@ def test_oracle_database_base_transport(): "list_db_servers", "list_db_nodes", "list_gi_versions", + "list_minor_versions", "list_db_system_shapes", "list_autonomous_databases", "get_autonomous_database", "create_autonomous_database", + "update_autonomous_database", "delete_autonomous_database", "restore_autonomous_database", "generate_autonomous_database_wallet", @@ -24033,6 +52766,37 @@ def test_oracle_database_base_transport(): "stop_autonomous_database", "start_autonomous_database", "restart_autonomous_database", + "switchover_autonomous_database", + "failover_autonomous_database", + "list_odb_networks", + "get_odb_network", + "create_odb_network", + "delete_odb_network", + "list_odb_subnets", + "get_odb_subnet", + "create_odb_subnet", + "delete_odb_subnet", + "list_exadb_vm_clusters", + "get_exadb_vm_cluster", + "create_exadb_vm_cluster", + "delete_exadb_vm_cluster", + "update_exadb_vm_cluster", + "remove_virtual_machine_exadb_vm_cluster", + "list_exascale_db_storage_vaults", + "get_exascale_db_storage_vault", + "create_exascale_db_storage_vault", + "delete_exascale_db_storage_vault", + "list_db_system_initial_storage_sizes", + "list_databases", + "get_database", + "list_pluggable_databases", + "get_pluggable_database", + "list_db_systems", + "get_db_system", + "create_db_system", + "delete_db_system", + "list_db_versions", + "list_database_character_sets", "get_location", "list_locations", "get_operation", @@ -24338,6 +53102,9 @@ def test_oracle_database_client_transport_session_collision(transport_name): session1 = client1.transport.list_gi_versions._session session2 = client2.transport.list_gi_versions._session assert session1 != session2 + session1 = client1.transport.list_minor_versions._session + session2 = client2.transport.list_minor_versions._session + assert session1 != session2 session1 = client1.transport.list_db_system_shapes._session session2 = client2.transport.list_db_system_shapes._session assert session1 != session2 @@ -24350,6 +53117,9 @@ def test_oracle_database_client_transport_session_collision(transport_name): session1 = client1.transport.create_autonomous_database._session session2 = client2.transport.create_autonomous_database._session assert session1 != session2 + session1 = client1.transport.update_autonomous_database._session + session2 = client2.transport.update_autonomous_database._session + assert session1 != session2 session1 = client1.transport.delete_autonomous_database._session session2 = client2.transport.delete_autonomous_database._session assert session1 != session2 @@ -24377,6 +53147,99 @@ def test_oracle_database_client_transport_session_collision(transport_name): session1 = client1.transport.restart_autonomous_database._session session2 = client2.transport.restart_autonomous_database._session assert session1 != session2 + session1 = client1.transport.switchover_autonomous_database._session + session2 = client2.transport.switchover_autonomous_database._session + assert session1 != session2 + session1 = client1.transport.failover_autonomous_database._session + session2 = client2.transport.failover_autonomous_database._session + assert session1 != session2 + session1 = client1.transport.list_odb_networks._session + session2 = client2.transport.list_odb_networks._session + assert session1 != session2 + session1 = client1.transport.get_odb_network._session + session2 = client2.transport.get_odb_network._session + assert session1 != session2 + session1 = client1.transport.create_odb_network._session + session2 = client2.transport.create_odb_network._session + assert session1 != session2 + session1 = client1.transport.delete_odb_network._session + session2 = client2.transport.delete_odb_network._session + assert session1 != session2 + session1 = client1.transport.list_odb_subnets._session + session2 = client2.transport.list_odb_subnets._session + assert session1 != session2 + session1 = client1.transport.get_odb_subnet._session + session2 = client2.transport.get_odb_subnet._session + assert session1 != session2 + session1 = client1.transport.create_odb_subnet._session + session2 = client2.transport.create_odb_subnet._session + assert session1 != session2 + session1 = client1.transport.delete_odb_subnet._session + session2 = client2.transport.delete_odb_subnet._session + assert session1 != session2 + session1 = client1.transport.list_exadb_vm_clusters._session + session2 = client2.transport.list_exadb_vm_clusters._session + assert session1 != session2 + session1 = client1.transport.get_exadb_vm_cluster._session + session2 = client2.transport.get_exadb_vm_cluster._session + assert session1 != session2 + session1 = client1.transport.create_exadb_vm_cluster._session + session2 = client2.transport.create_exadb_vm_cluster._session + assert session1 != session2 + session1 = client1.transport.delete_exadb_vm_cluster._session + session2 = client2.transport.delete_exadb_vm_cluster._session + assert session1 != session2 + session1 = client1.transport.update_exadb_vm_cluster._session + session2 = client2.transport.update_exadb_vm_cluster._session + assert session1 != session2 + session1 = client1.transport.remove_virtual_machine_exadb_vm_cluster._session + session2 = client2.transport.remove_virtual_machine_exadb_vm_cluster._session + assert session1 != session2 + session1 = client1.transport.list_exascale_db_storage_vaults._session + session2 = client2.transport.list_exascale_db_storage_vaults._session + assert session1 != session2 + session1 = client1.transport.get_exascale_db_storage_vault._session + session2 = client2.transport.get_exascale_db_storage_vault._session + assert session1 != session2 + session1 = client1.transport.create_exascale_db_storage_vault._session + session2 = client2.transport.create_exascale_db_storage_vault._session + assert session1 != session2 + session1 = client1.transport.delete_exascale_db_storage_vault._session + session2 = client2.transport.delete_exascale_db_storage_vault._session + assert session1 != session2 + session1 = client1.transport.list_db_system_initial_storage_sizes._session + session2 = client2.transport.list_db_system_initial_storage_sizes._session + assert session1 != session2 + session1 = client1.transport.list_databases._session + session2 = client2.transport.list_databases._session + assert session1 != session2 + session1 = client1.transport.get_database._session + session2 = client2.transport.get_database._session + assert session1 != session2 + session1 = client1.transport.list_pluggable_databases._session + session2 = client2.transport.list_pluggable_databases._session + assert session1 != session2 + session1 = client1.transport.get_pluggable_database._session + session2 = client2.transport.get_pluggable_database._session + assert session1 != session2 + session1 = client1.transport.list_db_systems._session + session2 = client2.transport.list_db_systems._session + assert session1 != session2 + session1 = client1.transport.get_db_system._session + session2 = client2.transport.get_db_system._session + assert session1 != session2 + session1 = client1.transport.create_db_system._session + session2 = client2.transport.create_db_system._session + assert session1 != session2 + session1 = client1.transport.delete_db_system._session + session2 = client2.transport.delete_db_system._session + assert session1 != session2 + session1 = client1.transport.list_db_versions._session + session2 = client2.transport.list_db_versions._session + assert session1 != session2 + session1 = client1.transport.list_database_character_sets._session + session2 = client2.transport.list_database_character_sets._session + assert session1 != session2 def test_oracle_database_grpc_transport_channel(): @@ -24707,11 +53570,96 @@ def test_parse_cloud_vm_cluster_path(): assert expected == actual -def test_db_node_path(): +def test_crypto_key_path(): project = "squid" location = "clam" - cloud_vm_cluster = "whelk" - db_node = "octopus" + key_ring = "whelk" + crypto_key = "octopus" + expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( + project=project, + location=location, + key_ring=key_ring, + crypto_key=crypto_key, + ) + actual = OracleDatabaseClient.crypto_key_path( + project, location, key_ring, crypto_key + ) + assert expected == actual + + +def test_parse_crypto_key_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "key_ring": "cuttlefish", + "crypto_key": "mussel", + } + path = OracleDatabaseClient.crypto_key_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_crypto_key_path(path) + assert expected == actual + + +def test_database_path(): + project = "winkle" + location = "nautilus" + database = "scallop" + expected = "projects/{project}/locations/{location}/databases/{database}".format( + project=project, + location=location, + database=database, + ) + actual = OracleDatabaseClient.database_path(project, location, database) + assert expected == actual + + +def test_parse_database_path(): + expected = { + "project": "abalone", + "location": "squid", + "database": "clam", + } + path = OracleDatabaseClient.database_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_database_path(path) + assert expected == actual + + +def test_database_character_set_path(): + project = "whelk" + location = "octopus" + database_character_set = "oyster" + expected = "projects/{project}/locations/{location}/databaseCharacterSets/{database_character_set}".format( + project=project, + location=location, + database_character_set=database_character_set, + ) + actual = OracleDatabaseClient.database_character_set_path( + project, location, database_character_set + ) + assert expected == actual + + +def test_parse_database_character_set_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "database_character_set": "mussel", + } + path = OracleDatabaseClient.database_character_set_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_database_character_set_path(path) + assert expected == actual + + +def test_db_node_path(): + project = "winkle" + location = "nautilus" + cloud_vm_cluster = "scallop" + db_node = "abalone" expected = "projects/{project}/locations/{location}/cloudVmClusters/{cloud_vm_cluster}/dbNodes/{db_node}".format( project=project, location=location, @@ -24726,10 +53674,10 @@ def test_db_node_path(): def test_parse_db_node_path(): expected = { - "project": "oyster", - "location": "nudibranch", - "cloud_vm_cluster": "cuttlefish", - "db_node": "mussel", + "project": "squid", + "location": "clam", + "cloud_vm_cluster": "whelk", + "db_node": "octopus", } path = OracleDatabaseClient.db_node_path(**expected) @@ -24739,10 +53687,10 @@ def test_parse_db_node_path(): def test_db_server_path(): - project = "winkle" - location = "nautilus" - cloud_exadata_infrastructure = "scallop" - db_server = "abalone" + project = "oyster" + location = "nudibranch" + cloud_exadata_infrastructure = "cuttlefish" + db_server = "mussel" expected = "projects/{project}/locations/{location}/cloudExadataInfrastructures/{cloud_exadata_infrastructure}/dbServers/{db_server}".format( project=project, location=location, @@ -24757,10 +53705,10 @@ def test_db_server_path(): def test_parse_db_server_path(): expected = { - "project": "squid", - "location": "clam", - "cloud_exadata_infrastructure": "whelk", - "db_server": "octopus", + "project": "winkle", + "location": "nautilus", + "cloud_exadata_infrastructure": "scallop", + "db_server": "abalone", } path = OracleDatabaseClient.db_server_path(**expected) @@ -24769,10 +53717,64 @@ def test_parse_db_server_path(): assert expected == actual +def test_db_system_path(): + project = "squid" + location = "clam" + db_system = "whelk" + expected = "projects/{project}/locations/{location}/dbSystems/{db_system}".format( + project=project, + location=location, + db_system=db_system, + ) + actual = OracleDatabaseClient.db_system_path(project, location, db_system) + assert expected == actual + + +def test_parse_db_system_path(): + expected = { + "project": "octopus", + "location": "oyster", + "db_system": "nudibranch", + } + path = OracleDatabaseClient.db_system_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_db_system_path(path) + assert expected == actual + + +def test_db_system_initial_storage_size_path(): + project = "cuttlefish" + location = "mussel" + db_system_initial_storage_size = "winkle" + expected = "projects/{project}/locations/{location}/dbSystemInitialStorageSizes/{db_system_initial_storage_size}".format( + project=project, + location=location, + db_system_initial_storage_size=db_system_initial_storage_size, + ) + actual = OracleDatabaseClient.db_system_initial_storage_size_path( + project, location, db_system_initial_storage_size + ) + assert expected == actual + + +def test_parse_db_system_initial_storage_size_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "db_system_initial_storage_size": "abalone", + } + path = OracleDatabaseClient.db_system_initial_storage_size_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_db_system_initial_storage_size_path(path) + assert expected == actual + + def test_db_system_shape_path(): - project = "oyster" - location = "nudibranch" - db_system_shape = "cuttlefish" + project = "squid" + location = "clam" + db_system_shape = "whelk" expected = "projects/{project}/locations/{location}/dbSystemShapes/{db_system_shape}".format( project=project, location=location, @@ -24786,9 +53788,9 @@ def test_db_system_shape_path(): def test_parse_db_system_shape_path(): expected = { - "project": "mussel", - "location": "winkle", - "db_system_shape": "nautilus", + "project": "octopus", + "location": "oyster", + "db_system_shape": "nudibranch", } path = OracleDatabaseClient.db_system_shape_path(**expected) @@ -24797,10 +53799,36 @@ def test_parse_db_system_shape_path(): assert expected == actual +def test_db_version_path(): + project = "cuttlefish" + location = "mussel" + db_version = "winkle" + expected = "projects/{project}/locations/{location}/dbVersions/{db_version}".format( + project=project, + location=location, + db_version=db_version, + ) + actual = OracleDatabaseClient.db_version_path(project, location, db_version) + assert expected == actual + + +def test_parse_db_version_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "db_version": "abalone", + } + path = OracleDatabaseClient.db_version_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_db_version_path(path) + assert expected == actual + + def test_entitlement_path(): - project = "scallop" - location = "abalone" - entitlement = "squid" + project = "squid" + location = "clam" + entitlement = "whelk" expected = ( "projects/{project}/locations/{location}/entitlements/{entitlement}".format( project=project, @@ -24814,9 +53842,9 @@ def test_entitlement_path(): def test_parse_entitlement_path(): expected = { - "project": "clam", - "location": "whelk", - "entitlement": "octopus", + "project": "octopus", + "location": "oyster", + "entitlement": "nudibranch", } path = OracleDatabaseClient.entitlement_path(**expected) @@ -24825,10 +53853,66 @@ def test_parse_entitlement_path(): assert expected == actual +def test_exadb_vm_cluster_path(): + project = "cuttlefish" + location = "mussel" + exadb_vm_cluster = "winkle" + expected = "projects/{project}/locations/{location}/exadbVmClusters/{exadb_vm_cluster}".format( + project=project, + location=location, + exadb_vm_cluster=exadb_vm_cluster, + ) + actual = OracleDatabaseClient.exadb_vm_cluster_path( + project, location, exadb_vm_cluster + ) + assert expected == actual + + +def test_parse_exadb_vm_cluster_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "exadb_vm_cluster": "abalone", + } + path = OracleDatabaseClient.exadb_vm_cluster_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_exadb_vm_cluster_path(path) + assert expected == actual + + +def test_exascale_db_storage_vault_path(): + project = "squid" + location = "clam" + exascale_db_storage_vault = "whelk" + expected = "projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault}".format( + project=project, + location=location, + exascale_db_storage_vault=exascale_db_storage_vault, + ) + actual = OracleDatabaseClient.exascale_db_storage_vault_path( + project, location, exascale_db_storage_vault + ) + assert expected == actual + + +def test_parse_exascale_db_storage_vault_path(): + expected = { + "project": "octopus", + "location": "oyster", + "exascale_db_storage_vault": "nudibranch", + } + path = OracleDatabaseClient.exascale_db_storage_vault_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_exascale_db_storage_vault_path(path) + assert expected == actual + + def test_gi_version_path(): - project = "oyster" - location = "nudibranch" - gi_version = "cuttlefish" + project = "cuttlefish" + location = "mussel" + gi_version = "winkle" expected = "projects/{project}/locations/{location}/giVersions/{gi_version}".format( project=project, location=location, @@ -24840,9 +53924,9 @@ def test_gi_version_path(): def test_parse_gi_version_path(): expected = { - "project": "mussel", - "location": "winkle", - "gi_version": "nautilus", + "project": "nautilus", + "location": "scallop", + "gi_version": "abalone", } path = OracleDatabaseClient.gi_version_path(**expected) @@ -24851,9 +53935,40 @@ def test_parse_gi_version_path(): assert expected == actual +def test_minor_version_path(): + project = "squid" + location = "clam" + gi_version = "whelk" + minor_version = "octopus" + expected = "projects/{project}/locations/{location}/giVersions/{gi_version}/minorVersions/{minor_version}".format( + project=project, + location=location, + gi_version=gi_version, + minor_version=minor_version, + ) + actual = OracleDatabaseClient.minor_version_path( + project, location, gi_version, minor_version + ) + assert expected == actual + + +def test_parse_minor_version_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "gi_version": "cuttlefish", + "minor_version": "mussel", + } + path = OracleDatabaseClient.minor_version_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_minor_version_path(path) + assert expected == actual + + def test_network_path(): - project = "scallop" - network = "abalone" + project = "winkle" + network = "nautilus" expected = "projects/{project}/global/networks/{network}".format( project=project, network=network, @@ -24864,8 +53979,8 @@ def test_network_path(): def test_parse_network_path(): expected = { - "project": "squid", - "network": "clam", + "project": "scallop", + "network": "abalone", } path = OracleDatabaseClient.network_path(**expected) @@ -24874,8 +53989,95 @@ def test_parse_network_path(): assert expected == actual +def test_odb_network_path(): + project = "squid" + location = "clam" + odb_network = "whelk" + expected = ( + "projects/{project}/locations/{location}/odbNetworks/{odb_network}".format( + project=project, + location=location, + odb_network=odb_network, + ) + ) + actual = OracleDatabaseClient.odb_network_path(project, location, odb_network) + assert expected == actual + + +def test_parse_odb_network_path(): + expected = { + "project": "octopus", + "location": "oyster", + "odb_network": "nudibranch", + } + path = OracleDatabaseClient.odb_network_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_odb_network_path(path) + assert expected == actual + + +def test_odb_subnet_path(): + project = "cuttlefish" + location = "mussel" + odb_network = "winkle" + odb_subnet = "nautilus" + expected = "projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet}".format( + project=project, + location=location, + odb_network=odb_network, + odb_subnet=odb_subnet, + ) + actual = OracleDatabaseClient.odb_subnet_path( + project, location, odb_network, odb_subnet + ) + assert expected == actual + + +def test_parse_odb_subnet_path(): + expected = { + "project": "scallop", + "location": "abalone", + "odb_network": "squid", + "odb_subnet": "clam", + } + path = OracleDatabaseClient.odb_subnet_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_odb_subnet_path(path) + assert expected == actual + + +def test_pluggable_database_path(): + project = "whelk" + location = "octopus" + pluggable_database = "oyster" + expected = "projects/{project}/locations/{location}/pluggableDatabases/{pluggable_database}".format( + project=project, + location=location, + pluggable_database=pluggable_database, + ) + actual = OracleDatabaseClient.pluggable_database_path( + project, location, pluggable_database + ) + assert expected == actual + + +def test_parse_pluggable_database_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "pluggable_database": "mussel", + } + path = OracleDatabaseClient.pluggable_database_path(**expected) + + # Check that the path construction is reversible. + actual = OracleDatabaseClient.parse_pluggable_database_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "whelk" + billing_account = "winkle" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -24885,7 +54087,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "octopus", + "billing_account": "nautilus", } path = OracleDatabaseClient.common_billing_account_path(**expected) @@ -24895,7 +54097,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "oyster" + folder = "scallop" expected = "folders/{folder}".format( folder=folder, ) @@ -24905,7 +54107,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nudibranch", + "folder": "abalone", } path = OracleDatabaseClient.common_folder_path(**expected) @@ -24915,7 +54117,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "cuttlefish" + organization = "squid" expected = "organizations/{organization}".format( organization=organization, ) @@ -24925,7 +54127,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "mussel", + "organization": "clam", } path = OracleDatabaseClient.common_organization_path(**expected) @@ -24935,7 +54137,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "winkle" + project = "whelk" expected = "projects/{project}".format( project=project, ) @@ -24945,7 +54147,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nautilus", + "project": "octopus", } path = OracleDatabaseClient.common_project_path(**expected) @@ -24955,8 +54157,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "scallop" - location = "abalone" + project = "oyster" + location = "nudibranch" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -24967,8 +54169,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "squid", - "location": "clam", + "project": "cuttlefish", + "location": "mussel", } path = OracleDatabaseClient.common_location_path(**expected) diff --git a/packages/google-cloud-org-policy/noxfile.py b/packages/google-cloud-org-policy/noxfile.py index f760afd2d283..566ed2617bba 100644 --- a/packages/google-cloud-org-policy/noxfile.py +++ b/packages/google-cloud-org-policy/noxfile.py @@ -27,6 +27,10 @@ LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] +# Add samples to the list of directories to format if the directory exists. +if os.path.isdir("samples"): + LINT_PATHS.append("samples") + ALL_PYTHON = [ "3.7", "3.8", diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_custom_constraint_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_custom_constraint_async.py index a70975888da4..19ba7bb3d1e1 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_custom_constraint_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_custom_constraint_async.py @@ -49,4 +49,5 @@ async def sample_create_custom_constraint(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_CreateCustomConstraint_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_custom_constraint_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_custom_constraint_sync.py index 407d9013fccc..c3c42e34273d 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_custom_constraint_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_custom_constraint_sync.py @@ -49,4 +49,5 @@ def sample_create_custom_constraint(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_CreateCustomConstraint_sync] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_policy_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_policy_async.py index 167ef71158e4..89ad131d03d5 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_policy_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_policy_async.py @@ -49,4 +49,5 @@ async def sample_create_policy(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_CreatePolicy_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_policy_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_policy_sync.py index 65f13b19c66c..03f388624f95 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_policy_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_create_policy_sync.py @@ -49,4 +49,5 @@ def sample_create_policy(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_CreatePolicy_sync] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_custom_constraint_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_custom_constraint_async.py index 91eb16a213ef..267f7c51e872 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_custom_constraint_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_custom_constraint_async.py @@ -49,4 +49,5 @@ async def sample_get_custom_constraint(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_GetCustomConstraint_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_custom_constraint_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_custom_constraint_sync.py index 8207c0108148..487305f4a33c 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_custom_constraint_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_custom_constraint_sync.py @@ -49,4 +49,5 @@ def sample_get_custom_constraint(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_GetCustomConstraint_sync] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_effective_policy_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_effective_policy_async.py index 50863586c9b7..ce5d97fc96ff 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_effective_policy_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_effective_policy_async.py @@ -49,4 +49,5 @@ async def sample_get_effective_policy(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_GetEffectivePolicy_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_effective_policy_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_effective_policy_sync.py index 330d9e587de8..5762f9cda72d 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_effective_policy_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_effective_policy_sync.py @@ -49,4 +49,5 @@ def sample_get_effective_policy(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_GetEffectivePolicy_sync] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_policy_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_policy_async.py index 4afdc5fdd3ed..2861af77c3ed 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_policy_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_policy_async.py @@ -49,4 +49,5 @@ async def sample_get_policy(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_GetPolicy_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_policy_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_policy_sync.py index cff61fd86f82..9def1d4d390d 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_policy_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_get_policy_sync.py @@ -49,4 +49,5 @@ def sample_get_policy(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_GetPolicy_sync] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_constraints_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_constraints_async.py index 32635faa143c..4c2c3342b2c6 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_constraints_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_constraints_async.py @@ -50,4 +50,5 @@ async def sample_list_constraints(): async for response in page_result: print(response) + # [END orgpolicy_v2_generated_OrgPolicy_ListConstraints_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_constraints_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_constraints_sync.py index 6c46723326ea..f4d5944f4420 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_constraints_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_constraints_sync.py @@ -50,4 +50,5 @@ def sample_list_constraints(): for response in page_result: print(response) + # [END orgpolicy_v2_generated_OrgPolicy_ListConstraints_sync] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_custom_constraints_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_custom_constraints_async.py index 7b957fb47dd9..c955aadf98fb 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_custom_constraints_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_custom_constraints_async.py @@ -50,4 +50,5 @@ async def sample_list_custom_constraints(): async for response in page_result: print(response) + # [END orgpolicy_v2_generated_OrgPolicy_ListCustomConstraints_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_custom_constraints_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_custom_constraints_sync.py index 738ff29522de..3890128f2abb 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_custom_constraints_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_custom_constraints_sync.py @@ -50,4 +50,5 @@ def sample_list_custom_constraints(): for response in page_result: print(response) + # [END orgpolicy_v2_generated_OrgPolicy_ListCustomConstraints_sync] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_policies_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_policies_async.py index 4c789bb83c6e..2d0089009714 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_policies_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_policies_async.py @@ -50,4 +50,5 @@ async def sample_list_policies(): async for response in page_result: print(response) + # [END orgpolicy_v2_generated_OrgPolicy_ListPolicies_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_policies_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_policies_sync.py index d15f052b548c..89f168b99ce1 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_policies_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_list_policies_sync.py @@ -50,4 +50,5 @@ def sample_list_policies(): for response in page_result: print(response) + # [END orgpolicy_v2_generated_OrgPolicy_ListPolicies_sync] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_custom_constraint_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_custom_constraint_async.py index e5633f69aa14..c2bdb979e3a0 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_custom_constraint_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_custom_constraint_async.py @@ -39,8 +39,7 @@ async def sample_update_custom_constraint(): client = orgpolicy_v2.OrgPolicyAsyncClient() # Initialize request argument(s) - request = orgpolicy_v2.UpdateCustomConstraintRequest( - ) + request = orgpolicy_v2.UpdateCustomConstraintRequest() # Make the request response = await client.update_custom_constraint(request=request) @@ -48,4 +47,5 @@ async def sample_update_custom_constraint(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_UpdateCustomConstraint_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_custom_constraint_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_custom_constraint_sync.py index 577451fc7ed0..d99be2914e5e 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_custom_constraint_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_custom_constraint_sync.py @@ -39,8 +39,7 @@ def sample_update_custom_constraint(): client = orgpolicy_v2.OrgPolicyClient() # Initialize request argument(s) - request = orgpolicy_v2.UpdateCustomConstraintRequest( - ) + request = orgpolicy_v2.UpdateCustomConstraintRequest() # Make the request response = client.update_custom_constraint(request=request) @@ -48,4 +47,5 @@ def sample_update_custom_constraint(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_UpdateCustomConstraint_sync] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_policy_async.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_policy_async.py index 6f195fa56eef..4a54917db05c 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_policy_async.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_policy_async.py @@ -39,8 +39,7 @@ async def sample_update_policy(): client = orgpolicy_v2.OrgPolicyAsyncClient() # Initialize request argument(s) - request = orgpolicy_v2.UpdatePolicyRequest( - ) + request = orgpolicy_v2.UpdatePolicyRequest() # Make the request response = await client.update_policy(request=request) @@ -48,4 +47,5 @@ async def sample_update_policy(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_UpdatePolicy_async] diff --git a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_policy_sync.py b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_policy_sync.py index 05e2ef936996..d018533497d1 100644 --- a/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_policy_sync.py +++ b/packages/google-cloud-org-policy/samples/generated_samples/orgpolicy_v2_generated_org_policy_update_policy_sync.py @@ -39,8 +39,7 @@ def sample_update_policy(): client = orgpolicy_v2.OrgPolicyClient() # Initialize request argument(s) - request = orgpolicy_v2.UpdatePolicyRequest( - ) + request = orgpolicy_v2.UpdatePolicyRequest() # Make the request response = client.update_policy(request=request) @@ -48,4 +47,5 @@ def sample_update_policy(): # Handle the response print(response) + # [END orgpolicy_v2_generated_OrgPolicy_UpdatePolicy_sync] diff --git a/packages/google-maps-places/google/maps/places_v1/types/place.py b/packages/google-maps-places/google/maps/places_v1/types/place.py index fc1d0c2e0a5b..04848238424b 100644 --- a/packages/google-maps-places/google/maps/places_v1/types/place.py +++ b/packages/google-maps-places/google/maps/places_v1/types/place.py @@ -395,6 +395,19 @@ class Place(proto.Message): neighborhood_summary (google.maps.places_v1.types.Place.NeighborhoodSummary): A summary of points of interest near the place. + moved_place (str): + If this Place is permanently closed and has moved to a new + Place, this field contains the new Place's resource name, in + ``places/{place_id}`` format. If this Place moved multiple + times, this field will represent the first moved place. This + field will not be populated if this Place has not moved. + moved_place_id (str): + If this Place is permanently closed and has + moved to a new Place, this field contains the + new Place's place ID. If this Place moved + multiple times, this field will represent the + first moved Place. This field will not be + populated if this Place has not moved. """ class BusinessStatus(proto.Enum): @@ -1008,6 +1021,9 @@ class ReviewSummary(proto.Message): Gemini" (and its localized variants). This will be in the language specified in the request if available. + reviews_uri (str): + A link to show reviews of this place on + Google Maps. """ text: localized_text_pb2.LocalizedText = proto.Field( @@ -1024,6 +1040,10 @@ class ReviewSummary(proto.Message): number=3, message=localized_text_pb2.LocalizedText, ) + reviews_uri: str = proto.Field( + proto.STRING, + number=4, + ) class EvChargeAmenitySummary(proto.Message): r"""The summary of amenities near the EV charging station. This only @@ -1040,7 +1060,7 @@ class EvChargeAmenitySummary(proto.Message): restaurant (google.maps.places_v1.types.ContentBlock): A summary of the nearby restaurants. store (google.maps.places_v1.types.ContentBlock): - A summary of the nearby gas stations. + A summary of the nearby stores. flag_content_uri (str): A link where users can flag a problem with the summary. @@ -1465,6 +1485,14 @@ class NeighborhoodSummary(proto.Message): number=91, message=NeighborhoodSummary, ) + moved_place: str = proto.Field( + proto.STRING, + number=93, + ) + moved_place_id: str = proto.Field( + proto.STRING, + number=94, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-maps-places/noxfile.py b/packages/google-maps-places/noxfile.py index 4c8c53f244c7..22156afa21cf 100644 --- a/packages/google-maps-places/noxfile.py +++ b/packages/google-maps-places/noxfile.py @@ -27,6 +27,10 @@ LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] +# Add samples to the list of directories to format if the directory exists. +if os.path.isdir("samples"): + LINT_PATHS.append("samples") + ALL_PYTHON = [ "3.7", "3.8", diff --git a/packages/google-maps-places/tests/unit/gapic/places_v1/test_places.py b/packages/google-maps-places/tests/unit/gapic/places_v1/test_places.py index cb513d0c3067..8ccb1fbffbf5 100644 --- a/packages/google-maps-places/tests/unit/gapic/places_v1/test_places.py +++ b/packages/google-maps-places/tests/unit/gapic/places_v1/test_places.py @@ -1808,6 +1808,8 @@ def test_get_place(request_type, transport: str = "grpc"): good_for_groups=True, good_for_watching_sports=True, pure_service_area_business=True, + moved_place="moved_place_value", + moved_place_id="moved_place_id_value", ) response = client.get_place(request) @@ -1861,6 +1863,8 @@ def test_get_place(request_type, transport: str = "grpc"): assert response.good_for_groups is True assert response.good_for_watching_sports is True assert response.pure_service_area_business is True + assert response.moved_place == "moved_place_value" + assert response.moved_place_id == "moved_place_id_value" def test_get_place_non_empty_request_with_auto_populated_field(): @@ -2032,6 +2036,8 @@ async def test_get_place_async( good_for_groups=True, good_for_watching_sports=True, pure_service_area_business=True, + moved_place="moved_place_value", + moved_place_id="moved_place_id_value", ) ) response = await client.get_place(request) @@ -2086,6 +2092,8 @@ async def test_get_place_async( assert response.good_for_groups is True assert response.good_for_watching_sports is True assert response.pure_service_area_business is True + assert response.moved_place == "moved_place_value" + assert response.moved_place_id == "moved_place_id_value" @pytest.mark.asyncio @@ -3530,6 +3538,8 @@ async def test_get_place_empty_call_grpc_asyncio(): good_for_groups=True, good_for_watching_sports=True, pure_service_area_business=True, + moved_place="moved_place_value", + moved_place_id="moved_place_id_value", ) ) await client.get_place(request=None) @@ -4037,6 +4047,8 @@ def test_get_place_rest_call_success(request_type): good_for_groups=True, good_for_watching_sports=True, pure_service_area_business=True, + moved_place="moved_place_value", + moved_place_id="moved_place_id_value", ) # Wrap the value into a proper Response obj @@ -4095,6 +4107,8 @@ def test_get_place_rest_call_success(request_type): assert response.good_for_groups is True assert response.good_for_watching_sports is True assert response.pure_service_area_business is True + assert response.moved_place == "moved_place_value" + assert response.moved_place_id == "moved_place_id_value" @pytest.mark.parametrize("null_interceptor", [True, False])