From bcc129c4a6c95556d7e498916ac07b7f46dbf4d6 Mon Sep 17 00:00:00 2001 From: Daan Hoogland Date: Wed, 8 Nov 2023 09:36:21 +0100 Subject: [PATCH] cleanup vm life cycle test file --- test/integration/smoke/test_deploy_vm.py | 264 ++++ test/integration/smoke/test_hostha_kvm.py | 9 +- .../smoke/test_kvm_lifemigration.py | 263 ++++ .../smoke/test_migrate_vm_with_volume.py | 313 +++++ .../integration/smoke/test_primary_storage.py | 46 +- .../smoke/test_secondary_storage.py | 189 ++- .../smoke/test_secured_vm_migration.py | 363 +++++ .../smoke/test_vm_deployment_planner.py | 9 +- test/integration/smoke/test_vm_life_cycle.py | 1225 +---------------- test/integration/smoke/test_vmware_vapps.py | 289 ++++ 10 files changed, 1615 insertions(+), 1355 deletions(-) create mode 100644 test/integration/smoke/test_deploy_vm.py create mode 100644 test/integration/smoke/test_kvm_lifemigration.py create mode 100644 test/integration/smoke/test_migrate_vm_with_volume.py create mode 100644 test/integration/smoke/test_secured_vm_migration.py create mode 100644 test/integration/smoke/test_vmware_vapps.py diff --git a/test/integration/smoke/test_deploy_vm.py b/test/integration/smoke/test_deploy_vm.py new file mode 100644 index 000000000000..93acaea0ca07 --- /dev/null +++ b/test/integration/smoke/test_deploy_vm.py @@ -0,0 +1,264 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for Virtual Machine Life Cycle +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (recoverVirtualMachine, + destroyVirtualMachine, + attachIso, + detachIso, + provisionCertificate, + updateConfiguration, + migrateVirtualMachine, + migrateVirtualMachineWithVolume, + listNics, + listVolumes) +from marvin.lib.utils import * + +from marvin.lib.base import (Account, + ServiceOffering, + VirtualMachine, + Host, + Iso, + Router, + Configurations, + StoragePool, + Volume, + DiskOffering, + NetworkOffering, + Network) +from marvin.lib.common import (get_domain, + get_zone, + get_suitable_test_template, + get_test_ovf_templates, + list_hosts, + get_vm_vapp_configs) +from marvin.codes import FAILED, PASS +from nose.plugins.attrib import attr +from marvin.lib.decoratorGenerators import skipTestIf +# Import System modules +import time +import json +from operator import itemgetter + +_multiprocess_shared_ = True + +class TestDeployVM(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestDeployVM, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + cls.hypervisor = testClient.getHypervisorInfo() + + # If local storage is enabled, alter the offerings to use localstorage + # this step is needed for devcloud + if cls.zone.localstorageenabled == True: + cls.services["service_offerings"]["tiny"]["storagetype"] = 'local' + cls.services["service_offerings"]["small"]["storagetype"] = 'local' + cls.services["service_offerings"]["medium"]["storagetype"] = 'local' + + template = get_suitable_test_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"], + cls.hypervisor + ) + if template == FAILED: + assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] + + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.services["iso1"]["zoneid"] = cls.zone.id + + cls._cleanup = [] + + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + cls.debug(cls.account.id) + + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["tiny"] + ) + cls._cleanup.append(cls.service_offering) + + cls.virtual_machine = VirtualMachine.create( + cls.apiclient, + cls.services["small"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + mode=cls.services['mode'] + ) + cls._cleanup.append(cls.virtual_machine) + + @classmethod + def tearDownClass(cls): + super(TestDeployVM, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + def tearDown(self): + super(TestDeployVM, self).tearDown() + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_deploy_vm(self): + """Test Deploy Virtual Machine + """ + # Validate the following: + # 1. Virtual Machine is accessible via SSH + # 2. listVirtualMachines returns accurate information + list_vm_response = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine.id + ) + + self.debug( + "Verify listVirtualMachines response for virtual machine: %s" \ + % self.virtual_machine.id + ) + self.assertEqual( + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + len(list_vm_response), + 0, + "Check VM available in List Virtual Machines" + ) + vm_response = list_vm_response[0] + self.assertEqual( + + vm_response.id, + self.virtual_machine.id, + "Check virtual machine id in listVirtualMachines" + ) + self.assertEqual( + vm_response.name, + self.virtual_machine.name, + "Check virtual machine name in listVirtualMachines" + ) + self.assertEqual( + vm_response.state, + 'Running', + msg="VM is not in Running state" + ) + return + + @attr(tags=["advanced"], required_hardware="false") + def test_advZoneVirtualRouter(self): + # TODO: SIMENH: duplicate test, remove it + """ + Test advanced zone virtual router + 1. Is Running + 2. is in the account the VM was deployed in + 3. Has a linklocalip, publicip and a guestip + @return: + """ + routers = Router.list(self.apiclient, account=self.account.name) + self.assertTrue(len(routers) > 0, msg="No virtual router found") + router = routers[0] + + self.assertEqual(router.state, 'Running', msg="Router is not in running state") + self.assertEqual(router.account, self.account.name, msg="Router does not belong to the account") + + # Has linklocal, public and guest ips + self.assertIsNotNone(router.linklocalip, msg="Router has no linklocal ip") + self.assertIsNotNone(router.publicip, msg="Router has no public ip") + self.assertIsNotNone(router.guestipaddress, msg="Router has no guest ip") + + @attr(mode=["basic"], required_hardware="false") + def test_basicZoneVirtualRouter(self): + # TODO: SIMENH: duplicate test, remove it + """ + Tests for basic zone virtual router + 1. Is Running + 2. is in the account the VM was deployed in + @return: + """ + routers = Router.list(self.apiclient, account=self.account.name) + self.assertTrue(len(routers) > 0, msg="No virtual router found") + router = routers[0] + + self.assertEqual(router.state, 'Running', msg="Router is not in running state") + self.assertEqual(router.account, self.account.name, msg="Router does not belong to the account") + + @attr(tags=['advanced', 'basic', 'sg'], required_hardware="false") + def test_deploy_vm_multiple(self): + """Test Multiple Deploy Virtual Machine + + # Validate the following: + # 1. deploy 2 virtual machines + # 2. listVirtualMachines using 'ids' parameter returns accurate information + """ + account = Account.create( + self.apiclient, + self.services["account"], + domainid=self.domain.id + ) + self.cleanup.append(account) + + virtual_machine1 = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=account.name, + domainid=account.domainid, + serviceofferingid=self.service_offering.id + ) + self.cleanup.append(virtual_machine1) + virtual_machine2 = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=account.name, + domainid=account.domainid, + serviceofferingid=self.service_offering.id + ) + self.cleanup.append(virtual_machine2) + + list_vms = VirtualMachine.list(self.apiclient, ids=[virtual_machine1.id, virtual_machine2.id], listAll=True) + self.debug( + "Verify listVirtualMachines response for virtual machines: %s, %s" % ( + virtual_machine1.id, virtual_machine2.id) + ) + self.assertEqual( + isinstance(list_vms, list), + True, + "List VM response was not a valid list" + ) + self.assertEqual( + len(list_vms), + 2, + "List VM response was empty, expected 2 VMs" + ) diff --git a/test/integration/smoke/test_hostha_kvm.py b/test/integration/smoke/test_hostha_kvm.py index 9b131558dc24..9f9dd1d48a73 100644 --- a/test/integration/smoke/test_hostha_kvm.py +++ b/test/integration/smoke/test_hostha_kvm.py @@ -79,6 +79,7 @@ def setUp(self): self.apiclient, self.services["service_offerings"]["hasmall"] ) + self.cleanup = [self.service_offering] self.template = get_test_template( self.apiclient, @@ -87,7 +88,6 @@ def setUp(self): ) self.configureAndDisableHostHa() - self.cleanup = [self.service_offering] def updateConfiguration(self, name, value): cmd = updateConfiguration.updateConfigurationCmd() @@ -116,9 +116,10 @@ def tearDown(self): self.dbclient.execute("delete from mshost where runid=%s" % self.getFakeMsRunId()) self.dbclient.execute("delete from cluster_details where name='outOfBandManagementEnabled'") self.dbclient.execute("delete from data_center_details where name='outOfBandManagementEnabled'") - cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) + finally: + super(TestHAKVM, self).tearDown() def getHostHaEnableCmd(self): cmd = enableHAForHost.enableHAForHostCmd() @@ -292,7 +293,6 @@ def test_hostha_enable_ha_when_host_in_maintenance(self): # Enable HA self.configureAndEnableHostHa() - # Prepare for maintenance Host self.setHostToMaintanance(self.host.id) @@ -341,7 +341,6 @@ def test_remove_ha_provider_not_possible(self): """ self.logger.debug("Starting test_remove_ha_provider_not_possible") - # Enable HA self.apiclient.configureHAForHost(self.getHostHaConfigCmd()) cmd = self.getHostHaEnableCmd() @@ -393,7 +392,6 @@ def test_hostha_kvm_host_degraded(self): self.startAgent() self.waitUntilHostInState("Available") - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true") def test_hostha_kvm_host_recovering(self): """ @@ -581,7 +579,6 @@ def getIpmiServerIp(self): return s.getsockname()[0] def get_non_configured_ha_host(self): - response = list_hosts( self.apiclient, type='Routing' diff --git a/test/integration/smoke/test_kvm_lifemigration.py b/test/integration/smoke/test_kvm_lifemigration.py new file mode 100644 index 000000000000..22dfce381444 --- /dev/null +++ b/test/integration/smoke/test_kvm_lifemigration.py @@ -0,0 +1,263 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for Virtual Machine Life Cycle +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (recoverVirtualMachine, + destroyVirtualMachine, + attachIso, + detachIso, + provisionCertificate, + updateConfiguration, + migrateVirtualMachine, + migrateVirtualMachineWithVolume, + listNics, + listVolumes) +from marvin.lib.utils import * + +from marvin.lib.base import (Account, + ServiceOffering, + VirtualMachine, + Host, + Iso, + Router, + Configurations, + StoragePool, + Volume, + DiskOffering, + NetworkOffering, + Network) +from marvin.lib.common import (get_domain, + get_zone, + get_suitable_test_template, + get_test_ovf_templates, + list_hosts, + get_vm_vapp_configs) +from marvin.codes import FAILED, PASS +from nose.plugins.attrib import attr +from marvin.lib.decoratorGenerators import skipTestIf +# Import System modules +import time +import json +from operator import itemgetter + +_multiprocess_shared_ = True + +class TestKVMLiveMigration(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestKVMLiveMigration, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.hypervisor = testClient.getHypervisorInfo() + cls._cleanup = [] + + # Get Zone, Domain and templates + domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][ + 0].__dict__ + cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"] + + cls.hypervisorNotSupported = False + if cls.hypervisor.lower() not in ["kvm"]: + cls.hypervisorNotSupported = True + + template = get_suitable_test_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"], + cls.hypervisor + ) + if template == FAILED: + assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] + + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.services["iso1"]["zoneid"] = cls.zone.id + + # Create VMs, NAT Rules etc + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=domain.id + ) + + cls.small_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["small"] + ) + + cls._cleanup = [ + cls.small_offering, + cls.account + ] + + @classmethod + def tearDownClass(cls): + super(TestKVMLiveMigration,cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + if self.hypervisorNotSupported: + self.skipTest("VM Live Migration with Volumes is not supported on other than KVM") + + self.hosts = Host.list( + self.apiclient, + zoneid=self.zone.id, + type='Routing', + hypervisor='KVM') + + if len(self.hosts) < 2: + self.skipTest("Requires at least two hosts for performing migration related tests") + + for host in self.hosts: + if host.details['Host.OS'] and host.details['Host.OS'].startswith('CentOS'): + self.skipTest("live migration is not stabily supported on CentOS") + + def tearDown(self): + super(TestKVMLiveMigration,self).tearDown() + + def get_target_host(self, virtualmachineid): + target_hosts = Host.listForMigration(self.apiclient, + virtualmachineid=virtualmachineid) + if len(target_hosts) < 1: + self.skipTest("No target hosts found") + + return target_hosts[0] + + def get_target_pool(self, volid): + target_pools = StoragePool.listForMigration(self.apiclient, id=volid) + + if len(target_pools) < 1: + self.skipTest("Not enough storage pools found") + + return target_pools[0] + + def get_vm_volumes(self, id): + return Volume.list(self.apiclient, virtualmachineid=id, listall=True) + + def deploy_vm(self): + return VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + mode=self.services["mode"]) + + def create_volume(self): + small_disk_offering = DiskOffering.list(self.apiclient, name='Small')[0] + + return Volume.create( + self.apiclient, + self.services, + account=self.account.name, + diskofferingid=small_disk_offering.id, + domainid=self.account.domainid, + zoneid=self.zone.id + ) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_01_migrate_VM_and_root_volume(self): + """Test VM will be migrated with it's root volume""" + # Validates the following: + # 1. Deploys a VM + # 2. Migrates the VM and the root volume to another host and storage pool + # 3. Asserts migration success and checks for location + + vm = self.deploy_vm() + + root_volume = self.get_vm_volumes(vm.id)[0] + + target_pool = self.get_target_pool(root_volume.id) + + target_host = self.get_target_host(vm.id) + + cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd() + + cmd.migrateto = [{"volume": str(root_volume.id), "pool": str(target_pool.id)}] + + cmd.virtualmachineid = vm.id + cmd.hostid = target_host.id + + response = self.apiclient.migrateVirtualMachineWithVolume(cmd) + + self.assertEqual(response.hostid, target_host.id) + + self.assertEqual(Volume.list(self.apiclient, id=root_volume.id)[0].storageid, + target_pool.id, + "Pool ID was not as expected") + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_02_migrate_VM_with_two_data_disks(self): + """Test VM will be migrated with it's root volume""" + # Validate the following + # 1. Deploys a VM and attaches 2 data disks + # 2. Finds suitable host for migration + # 3. Finds suitable storage pool for volumes + # 4. Migrate the VM to new host and storage pool and assert migration successful + + vm = self.deploy_vm() + + volume1 = self.create_volume() + volume2 = self.create_volume() + + vm.attach_volume(self.apiclient, volume1) + vm.attach_volume(self.apiclient, volume2) + + root_volume = self.get_vm_volumes(vm.id)[0] + + target_pool = self.get_target_pool(root_volume.id) + volume1.target_pool = self.get_target_pool(volume1.id) + volume2.target_pool = self.get_target_pool(volume2.id) + + target_host = self.get_target_host(vm.id) + + cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd() + + cmd.migrateto = [{"volume": str(root_volume.id), "pool": str(target_pool.id)}, + {"volume": str(volume1.id), "pool": str(volume1.target_pool.id)}, + {"volume": str(volume2.id), "pool": str(volume2.target_pool.id)}] + cmd.virtualmachineid = vm.id + cmd.hostid = target_host.id + + response = self.apiclient.migrateVirtualMachineWithVolume(cmd) + + self.assertEqual(Volume.list(self.apiclient, id=root_volume.id)[0].storageid, + target_pool.id, + "Pool ID not as expected") + + self.assertEqual(Volume.list(self.apiclient, id=volume1.id)[0].storageid, + volume1.target_pool.id, + "Pool ID not as expected") + + self.assertEqual(Volume.list(self.apiclient, id=volume2.id)[0].storageid, + volume2.target_pool.id, + "Pool ID not as expected") + + self.assertEqual(response.hostid, + target_host.id, + "HostID not as expected") diff --git a/test/integration/smoke/test_migrate_vm_with_volume.py b/test/integration/smoke/test_migrate_vm_with_volume.py new file mode 100644 index 000000000000..f29252ac6766 --- /dev/null +++ b/test/integration/smoke/test_migrate_vm_with_volume.py @@ -0,0 +1,313 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for Virtual Machine Life Cycle +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (recoverVirtualMachine, + destroyVirtualMachine, + attachIso, + detachIso, + provisionCertificate, + updateConfiguration, + migrateVirtualMachine, + migrateVirtualMachineWithVolume, + listNics, + listVolumes) +from marvin.lib.utils import * + +from marvin.lib.base import (Account, + ServiceOffering, + VirtualMachine, + Host, + Iso, + Router, + Configurations, + StoragePool, + Volume, + DiskOffering, + NetworkOffering, + Network) +from marvin.lib.common import (get_domain, + get_zone, + get_suitable_test_template, + get_test_ovf_templates, + list_hosts, + get_vm_vapp_configs) +from marvin.codes import FAILED, PASS +from nose.plugins.attrib import attr +from marvin.lib.decoratorGenerators import skipTestIf +# Import System modules +import time +import json +from operator import itemgetter + +_multiprocess_shared_ = True + + +class TestMigrateVMwithVolume(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestMigrateVMwithVolume, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.hypervisor = testClient.getHypervisorInfo() + cls._cleanup = [] + + # Get Zone, Domain and templates + domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][ + 0].__dict__ + cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"] + + template = get_suitable_test_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"], + cls.hypervisor + ) + if template == FAILED: + assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] + + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.services["iso1"]["zoneid"] = cls.zone.id + + # Create VMs, NAT Rules etc + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=domain.id + ) + + cls.small_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["small"] + ) + + cls._cleanup = [ + cls.small_offering, + cls.account + ] + + @classmethod + def tearDownClass(cls): + super(TestMigrateVMwithVolume,cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + if self.hypervisor.lower() not in ["vmware"]: + self.skipTest("VM Migration with Volumes is not supported on other than VMware") + + self.hosts = Host.list( + self.apiclient, + zoneid=self.zone.id, + type='Routing', + hypervisor='VMware') + + if len(self.hosts) < 2: + self.skipTest("Requires at least two hosts for performing migration related tests") + + def tearDown(self): + super(TestMigrateVMwithVolume,self).tearDown() + + def get_target_host(self, virtualmachineid): + target_hosts = Host.listForMigration(self.apiclient, + virtualmachineid=virtualmachineid) + if len(target_hosts) < 1: + self.skipTest("No target hosts found") + + return target_hosts[0] + + def get_target_pool(self, volid): + target_pools = StoragePool.listForMigration(self.apiclient, id=volid) + + if len(target_pools) < 1: + self.skipTest("Not enough storage pools found") + + return target_pools[0] + + def get_vm_volumes(self, id): + return Volume.list(self.apiclient, virtualmachineid=id, listall=True) + + def deploy_vm(self): + return VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + mode=self.services["mode"]) + + def migrate_vm_to_pool(self, target_pool, id): + + cmd = migrateVirtualMachine.migrateVirtualMachineCmd() + + cmd.storageid = target_pool.id + cmd.virtualmachineid = id + + return self.apiclient.migrateVirtualMachine(cmd) + + def create_volume(self): + small_disk_offering = DiskOffering.list(self.apiclient, name='Small')[0] + + return Volume.create( + self.apiclient, + self.services, + account=self.account.name, + diskofferingid=small_disk_offering.id, + domainid=self.account.domainid, + zoneid=self.zone.id + ) + + """ + BVT for Vmware Offline and Live VM and Volume Migration + """ + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_01_offline_migrate_VM_and_root_volume(self): + """Test VM will be migrated with it's root volume""" + # Validate the following + # 1. Deploys a VM + # 2. Stops the VM + # 3. Finds suitable storage pool for root volume + # 4. Migrate the VM to new storage pool and assert migration successful + + vm = self.deploy_vm() + + root_volume = self.get_vm_volumes(vm.id)[0] + + target_pool = self.get_target_pool(root_volume.id) + + vm.stop(self.apiclient) + + self.migrate_vm_to_pool(target_pool, vm.id) + + root_volume = self.get_vm_volumes(vm.id)[0] + self.assertEqual(root_volume.storageid, target_pool.id, "Pool ID was not as expected") + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_02_offline_migrate_VM_with_two_data_disks(self): + """Test VM will be migrated with it's root volume""" + # Validate the following + # 1. Deploys a VM and attaches 2 data disks + # 2. Stops the VM + # 3. Finds suitable storage pool for volumes + # 4. Migrate the VM to new storage pool and assert migration successful + + vm = self.deploy_vm() + + volume1 = self.create_volume() + volume2 = self.create_volume() + + vm.attach_volume(self.apiclient, volume1) + vm.attach_volume(self.apiclient, volume2) + + root_volume = self.get_vm_volumes(vm.id)[0] + + target_pool = self.get_target_pool(root_volume.id) + + vm.stop(self.apiclient) + + self.migrate_vm_to_pool(target_pool, vm.id) + + volume1 = Volume.list(self.apiclient, id=volume1.id)[0] + volume2 = Volume.list(self.apiclient, id=volume2.id)[0] + root_volume = self.get_vm_volumes(vm.id)[0] + + self.assertEqual(root_volume.storageid, target_pool.id, "Pool ID was not as expected") + self.assertEqual(volume1.storageid, target_pool.id, "Pool ID was not as expected") + self.assertEqual(volume2.storageid, target_pool.id, "Pool ID was not as expected") + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_03_live_migrate_VM_with_two_data_disks(self): + """Test VM will be migrated with it's root volume""" + # Validate the following + # 1. Deploys a VM and attaches 2 data disks + # 2. Finds suitable host for migration + # 3. Finds suitable storage pool for volumes + # 4. Migrate the VM to new host and storage pool and assert migration successful + + vm = self.deploy_vm() + + root_volume = self.get_vm_volumes(vm.id)[0] + volume1 = self.create_volume() + volume2 = self.create_volume() + vm.attach_volume(self.apiclient, volume1) + vm.attach_volume(self.apiclient, volume2) + + target_host = self.get_target_host(vm.id) + target_pool = self.get_target_pool(root_volume.id) + volume1.target_pool = self.get_target_pool(volume1.id) + volume2.target_pool = self.get_target_pool(volume2.id) + + cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd() + cmd.migrateto = [{"volume": str(root_volume.id), "pool": str(target_pool.id)}, + {"volume": str(volume1.id), "pool": str(volume1.target_pool.id)}, + {"volume": str(volume2.id), "pool": str(volume2.target_pool.id)}] + cmd.virtualmachineid = vm.id + cmd.hostid = target_host.id + + response = self.apiclient.migrateVirtualMachineWithVolume(cmd) + + self.assertEqual(Volume.list(self.apiclient, id=root_volume.id)[0].storageid, + target_pool.id, + "Pool ID not as expected") + + self.assertEqual(Volume.list(self.apiclient, id=volume1.id)[0].storageid, + volume1.target_pool.id, + "Pool ID not as expected") + + self.assertEqual(Volume.list(self.apiclient, id=volume2.id)[0].storageid, + volume2.target_pool.id, + "Pool ID not as expected") + + self.assertEqual(response.hostid, + target_host.id, + "HostID not as expected") + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_04_migrate_detached_volume(self): + """Test VM will be migrated with it's root volume""" + # Validate the following + # 1. Deploys a VM and attaches 1 data disk + # 2. Detaches the Disk + # 3. Finds suitable storage pool for the Disk + # 4. Migrate the storage pool and assert migration successful + + vm = self.deploy_vm() + + volume1 = self.create_volume() + + vm.attach_volume(self.apiclient, volume1) + vm.detach_volume(self.apiclient, volume1) + + target_pool = self.get_target_pool(volume1.id) + + Volume.migrate(self.apiclient, storageid=target_pool.id, volumeid=volume1.id) + + vol = Volume.list(self.apiclient, id=volume1.id)[0] + + self.assertEqual(vol.storageid, target_pool.id, "Storage pool was not the same as expected") diff --git a/test/integration/smoke/test_primary_storage.py b/test/integration/smoke/test_primary_storage.py index 477d3317ad69..96d3321a05c1 100644 --- a/test/integration/smoke/test_primary_storage.py +++ b/test/integration/smoke/test_primary_storage.py @@ -60,28 +60,22 @@ def setUp(self): return def tearDown(self): - try: - # Clean up, terminate the created templates - cleanup_resources(self.apiclient, self.cleanup) - - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - return + super(TestPrimaryStorageServices, self).tearDown() @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_01_primary_storage_nfs(self): """Test primary storage pools - XEN, KVM, VMWare. Not Supported for hyperv + + Validate the following: + 1. List Clusters + 2. verify that the cluster is in 'Enabled' allocation state + 3. verify that the host is added successfully and + in Up state with listHosts api response """ if self.hypervisor.lower() in ["hyperv"]: raise self.skipTest("NFS primary storage not supported for Hyper-V") - # Validate the following: - # 1. List Clusters - # 2. verify that the cluster is in 'Enabled' allocation state - # 3. verify that the host is added successfully and - # in Up state with listHosts api response - # Create NFS storage pools with on XEN/KVM/VMWare clusters clusters = list_clusters( @@ -156,14 +150,17 @@ def test_01_primary_storage_nfs(self): storage_response.type, "Check storage pool type " ) - # Call cleanup for reusing primary storage - cleanup_resources(self.apiclient, self.cleanup) - self.cleanup = [] return @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true") def test_01_primary_storage_iscsi(self): """Test primary storage pools - XEN. Not Supported for kvm,hyperv,vmware + + Validate the following: + 1. List Clusters + 2. verify that the cluster is in 'Enabled' allocation state + 3. verify that the host is added successfully and + in Up state with listHosts api response """ if self.hypervisor.lower() in ["kvm", "hyperv", "vmware", "lxc"]: @@ -172,12 +169,6 @@ def test_01_primary_storage_iscsi(self): if not self.services["configurableData"]["iscsi"]["url"]: raise self.skipTest("iscsi test storage url not setup, skipping") - # Validate the following: - # 1. List Clusters - # 2. verify that the cluster is in 'Enabled' allocation state - # 3. verify that the host is added successfully and - # in Up state with listHosts api response - # Create iSCSI storage pools with on XEN/KVM clusters clusters = list_clusters( self.apiclient, @@ -251,10 +242,6 @@ def test_01_primary_storage_iscsi(self): storage_response.type, "Check storage pool type " ) - # Call cleanup for reusing primary storage - cleanup_resources(self.apiclient, self.cleanup) - self.cleanup = [] - return @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") @@ -293,7 +280,7 @@ def test_01_add_primary_storage_disabled_host(self): zoneid=self.zone.id, podid=self.pod.id ) - # self.cleanup.append(storage_pool_2) + self.cleanup.append(storage_pool_2) # Enable host and disable others Host.update(self.apiclient, id=selected_host.id, allocationstate="Enable") @@ -319,6 +306,7 @@ def test_01_add_primary_storage_disabled_host(self): self.services["account"], domainid=self.domain.id ) + self.cleanup.append(account) service_offering = ServiceOffering.create( self.apiclient, @@ -336,7 +324,6 @@ def test_01_add_primary_storage_disabled_host(self): serviceofferingid=service_offering.id ) self.cleanup.append(self.virtual_machine) - self.cleanup.append(account) finally: # cancel maintenance for pool in storage_pool_list: @@ -351,14 +338,13 @@ def test_01_add_primary_storage_disabled_host(self): continue Host.update(self.apiclient, id=host.id, allocationstate="Enable") - cleanup_resources(self.apiclient, self.cleanup) - self.cleanup = [] StoragePool.enableMaintenance(self.apiclient, storage_pool_2.id) time.sleep(30); cmd = deleteStoragePool.deleteStoragePoolCmd() cmd.id = storage_pool_2.id cmd.forced = True self.apiclient.deleteStoragePool(cmd) + self.cleanup.remove(storage_pool_2) return diff --git a/test/integration/smoke/test_secondary_storage.py b/test/integration/smoke/test_secondary_storage.py index 4b26950ea646..ad34c8f1fa16 100644 --- a/test/integration/smoke/test_secondary_storage.py +++ b/test/integration/smoke/test_secondary_storage.py @@ -41,12 +41,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - try: - #Cleanup resources used - cleanup_resources(cls.apiclient, cls._cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - return + super(TestSecStorageServices, cls).tearDownClass() def setUp(self): self.apiclient = self.testClient.getApiClient() @@ -76,83 +71,75 @@ def setUp(self): return def tearDown(self): - try: - #Clean up, terminate the created templates - cleanup_resources(self.apiclient, self.cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - return + super(TestSecStorageServices, self).tearDown() @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false") def test_01_sys_vm_start(self): """Test system VM start - """ - # 1. verify listHosts has all 'routing' hosts in UP state - # 2. verify listStoragePools shows all primary storage pools - # in UP state - # 3. verify that secondary storage was added successfully + 1. verify listHosts has all 'routing' hosts in UP state + 2. verify listStoragePools shows all primary storage pools in UP state + 3. verify that secondary storage was added successfully + """ list_hosts_response = list_hosts( - self.apiclient, - type='Routing', - ) + self.apiclient, + type='Routing', + ) self.assertEqual( - isinstance(list_hosts_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_hosts_response, list), + True, + "Check list response returns a valid list" + ) # ListHosts has all 'routing' hosts in UP state self.assertNotEqual( - len(list_hosts_response), - 0, - "Check list host response" - ) + len(list_hosts_response), + 0, + "Check list host response" + ) for host in list_hosts_response: self.assertEqual( - host.state, - 'Up', - "Check state of routing hosts is Up or not" - ) + host.state, + 'Up', + "Check state of routing hosts is Up or not" + ) # ListStoragePools shows all primary storage pools in UP state - list_storage_response = list_storage_pools( - self.apiclient, - ) + list_storage_response = list_storage_pools(self.apiclient) self.assertEqual( - isinstance(list_storage_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_storage_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(list_storage_response), - 0, - "Check list storage pools response" - ) + len(list_storage_response), + 0, + "Check list storage pools response" + ) for primary_storage in list_hosts_response: self.assertEqual( - primary_storage.state, - 'Up', - "Check state of primary storage pools is Up or not" - ) + primary_storage.state, + 'Up', + "Check state of primary storage pools is Up or not" + ) for _ in range(2): list_ssvm_response = list_ssvms( - self.apiclient, - systemvmtype='secondarystoragevm', - ) + self.apiclient, + systemvmtype='secondarystoragevm', + ) self.assertEqual( - isinstance(list_ssvm_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_ssvm_response, list), + True, + "Check list response returns a valid list" + ) #Verify SSVM response self.assertNotEqual( - len(list_ssvm_response), - 0, - "Check list System VMs response" - ) + len(list_ssvm_response), + 0, + "Check list System VMs response" + ) for ssvm in list_ssvm_response: if ssvm.state != 'Running': @@ -160,22 +147,21 @@ def test_01_sys_vm_start(self): continue for ssvm in list_ssvm_response: self.assertEqual( - ssvm.state, - 'Running', - "Check whether state of SSVM is running" - ) + ssvm.state, + 'Running', + "Check whether state of SSVM is running" + ) return @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false") def test_02_sys_template_ready(self): """Test system templates are ready - """ - # Validate the following - # If SSVM is in UP state and running - # 1. wait for listTemplates to show all builtin templates downloaded and - # in Ready state + Validate the following + If SSVM is in UP state and running + 1. wait for listTemplates to show all builtin templates downloaded and in Ready state + """ hypervisors = {} for zone in self.config.zones: @@ -187,15 +173,14 @@ def test_02_sys_template_ready(self): for k, v in list(hypervisors.items()): self.debug("Checking BUILTIN templates in zone: %s" %zid) list_template_response = list_templates( - self.apiclient, - hypervisor=k, - zoneid=zid, - templatefilter=v, - listall=True, - account='system' - ) - self.assertEqual(validateList(list_template_response)[0], PASS,\ - "templates list validation failed") + self.apiclient, + hypervisor=k, + zoneid=zid, + templatefilter=v, + listall=True, + account='system' + ) + self.assertEqual(validateList(list_template_response)[0], PASS, "templates list validation failed") # Ensure all BUILTIN templates are downloaded templateid = None @@ -204,13 +189,13 @@ def test_02_sys_template_ready(self): templateid = template.id template_response = list_templates( - self.apiclient, - id=templateid, - zoneid=zid, - templatefilter=v, - listall=True, - account='system' - ) + self.apiclient, + id=templateid, + zoneid=zid, + templatefilter=v, + listall=True, + account='system' + ) if isinstance(template_response, list): template = template_response[0] else: @@ -230,13 +215,13 @@ def test_02_sys_template_ready(self): @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false") def test_03_check_read_only_flag(self): """Test the secondary storage read-only flag - """ - # Validate the following - # It is possible to enable/disable the read-only flag on a secondary storage and filter by it - # 1. Make the first secondary storage as read-only and verify its state has been changed - # 2. Search for the read-only storages and make sure ours is in the list - # 3. Make it again read/write and verify it has been set properly + Validate the following + It is possible to enable/disable the read-only flag on a secondary storage and filter by it + 1. Make the first secondary storage as read-only and verify its state has been changed + 2. Search for the read-only storages and make sure ours is in the list + 3. Make it again read/write and verify it has been set properly + """ first_storage = self.list_secondary_storages(self.apiclient)[0] first_storage_id = first_storage['id'] @@ -275,15 +260,15 @@ def test_03_check_read_only_flag(self): @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false") def test_04_migrate_to_read_only_storage(self): """Test migrations to a read-only secondary storage - """ - # Validate the following - # It is not possible to migrate a storage to a read-only one - # NOTE: This test requires more than one secondary storage in the system - # 1. Make the first storage read-only - # 2. Try complete migration from the second to the first storage - it should fail - # 3. Try balanced migration from the second to the first storage - it should fail - # 4. Make the first storage read-write again + Validate the following + It is not possible to migrate a storage to a read-only one + NOTE: This test requires more than one secondary storage in the system + 1. Make the first storage read-only + 2. Try complete migration from the second to the first storage - it should fail + 3. Try balanced migration from the second to the first storage - it should fail + 4. Make the first storage read-write again + """ storages = self.list_secondary_storages(self.apiclient) if (len(storages)) < 2: @@ -332,12 +317,12 @@ def test_04_migrate_to_read_only_storage(self): @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false") def test_05_migrate_to_less_free_space(self): """Test migrations when the destination storage has less space - """ - # Validate the following - # Migration to a secondary storage with less space should be refused - # NOTE: This test requires more than one secondary storage in the system - # 1. Try complete migration from a storage with more (or equal) free space - migration should be refused + Validate the following + Migration to a secondary storage with less space should be refused + NOTE: This test requires more than one secondary storage in the system + 1. Try complete migration from a storage with more (or equal) free space - migration should be refused + """ storages = self.list_secondary_storages(self.apiclient) if (len(storages)) < 2 or (storages[0]['zoneid'] != storages[1]['zoneid']): diff --git a/test/integration/smoke/test_secured_vm_migration.py b/test/integration/smoke/test_secured_vm_migration.py new file mode 100644 index 000000000000..3e1669f41dcd --- /dev/null +++ b/test/integration/smoke/test_secured_vm_migration.py @@ -0,0 +1,363 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for Virtual Machine Life Cycle +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (recoverVirtualMachine, + destroyVirtualMachine, + attachIso, + detachIso, + provisionCertificate, + updateConfiguration, + migrateVirtualMachine, + migrateVirtualMachineWithVolume, + listNics, + listVolumes) +from marvin.lib.utils import * + +from marvin.lib.base import (Account, + ServiceOffering, + VirtualMachine, + Host, + Iso, + Router, + Configurations, + StoragePool, + Volume, + DiskOffering, + NetworkOffering, + Network) +from marvin.lib.common import (get_domain, + get_zone, + get_suitable_test_template, + get_test_ovf_templates, + list_hosts, + get_vm_vapp_configs) +from marvin.codes import FAILED, PASS +from nose.plugins.attrib import attr +from marvin.lib.decoratorGenerators import skipTestIf +# Import System modules +import time +import json +from operator import itemgetter + +_multiprocess_shared_ = True + + +class TestSecuredVmMigration(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestSecuredVmMigration, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.hypervisor = testClient.getHypervisorInfo() + cls._cleanup = [] + + cls.hypervisorNotSupported = False + if cls.hypervisor.lower() not in ["kvm"]: + cls.hypervisorNotSupported = True + + # Get Zone, Domain and templates + domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][ + 0].__dict__ + cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"] + + template = get_suitable_test_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"], + cls.hypervisor + ) + if template == FAILED: + assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] + + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.services["iso1"]["zoneid"] = cls.zone.id + + # Create VMs, NAT Rules etc + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=domain.id + ) + + cls.small_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["small"] + ) + + cls._cleanup = [ + cls.small_offering, + cls.account + ] + + @classmethod + def tearDownClass(cls): + if cls.hypervisor.lower() in ["kvm"]: + cls.ensure_all_hosts_are_up() + super(TestSecuredVmMigration, cls).tearDownClass() + + @classmethod + def ensure_all_hosts_are_up(cls): + hosts = Host.list( + cls.apiclient, + zoneid=cls.zone.id, + type='Routing', + hypervisor='KVM' + ) + for host in hosts: + if host.state != "Up": + SshClient(host.ipaddress, port=22, user=cls.hostConfig["username"], passwd=cls.hostConfig["password"]) \ + .execute("service cloudstack-agent stop ; \ + sleep 10 ; \ + service cloudstack-agent start") + interval = 5 + retries = 10 + while retries > -1: + time.sleep(interval) + restarted_host = Host.list( + cls.apiclient, + id=host.id, + type='Routing' + )[0] + if restarted_host.state == "Up": + break + retries = retries - 1 + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + if self.hypervisorNotSupported: + self.skipTest("Secured migration is not supported on other than KVM") + + self.hosts = Host.list( + self.apiclient, + zoneid=self.zone.id, + type='Routing', + hypervisor='KVM', + state='Up') + + if len(self.hosts) < 2: + self.skipTest("Requires at least two hosts for performing migration related tests") + + self.secure_all_hosts() + self.updateConfiguration("ca.plugin.root.auth.strictness", "false") + + def tearDown(self): + self.secure_all_hosts() + self.updateConfiguration("ca.plugin.root.auth.strictness", "true") + super(TestSecuredVmMigration, self).tearDown() + + def get_target_host(self, secured, virtualmachineid): + target_hosts = Host.listForMigration(self.apiclient, + virtualmachineid=virtualmachineid) + for host in target_hosts: + h = list_hosts(self.apiclient, type='Routing', id=host.id)[0] + if h.details.secured == secured: + return h + + cloudstackTestCase.skipTest(self, "No target hosts available, skipping test.") + + def check_migration_protocol(self, protocol, host): + resp = SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"]) \ + .execute("grep -a listen_%s=1 /etc/libvirt/libvirtd.conf | tail -1" % protocol) + + if protocol not in resp[0]: + cloudstackTestCase.fail(self, "Libvirt listen protocol expected: '" + protocol + "\n" + "does not match actual: " + resp[0]) + + def migrate_and_check(self, vm, src_host, dest_host, proto='tls'): + """ + Migrates a VM from source host to destination host and checks status + """ + self.check_migration_protocol(protocol=proto, host=src_host) + vm.migrate(self.apiclient, hostid=dest_host.id) + vm_response = VirtualMachine.list(self.apiclient, id=vm.id)[0] + self.assertEqual(vm_response.hostid, dest_host.id, "Check destination host ID of migrated VM") + + def waitUntilHostInState(self, hostId, state="Up", interval=5, retries=20): + while retries > -1: + time.sleep(interval) + host = Host.list( + self.apiclient, + hostid=hostId, + type='Routing' + )[0] + if host.state != state: + if retries >= 0: + retries = retries - 1 + continue + else: + print("Host %s now showing as %s" % (hostId, state)) + return + + def unsecure_host(self, host): + SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"]) \ + .execute("rm -f /etc/cloudstack/agent/cloud* && \ + service cloudstack-agent stop ; \ + service libvirtd stop ; \ + service libvirt-bin stop ; \ + sed -i 's/listen_tls.*/listen_tls=0/g' /etc/libvirt/libvirtd.conf && \ + sed -i 's/listen_tcp.*/listen_tcp=1/g' /etc/libvirt/libvirtd.conf && \ + sed -i '/.*_file=.*/d' /etc/libvirt/libvirtd.conf && \ + sed -i 's/vnc_tls.*/vnc_tls=0/g' /etc/libvirt/qemu.conf && \ + service libvirtd start ; \ + service libvirt-bin start ; \ + sleep 30 ; \ + service cloudstack-agent start") + time.sleep(30) + print("Unsecuring Host: %s" % (host.name)) + self.waitUntilHostInState(hostId=host.id, state="Up") + self.check_connection(host=host, secured='false') + return host + + def secure_all_hosts(self): + for host in self.hosts: + cmd = provisionCertificate.provisionCertificateCmd() + cmd.hostid = host.id + cmd.reconnect = True + self.apiclient.provisionCertificate(cmd) + + for host in self.hosts: + print("Securing Host %s" % host.name) + self.waitUntilHostInState(hostId=host.id, state="Up") + self.check_connection(secured='true', host=host) + + def deploy_vm(self, origin_host): + return VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + mode=self.services["mode"], + hostid=origin_host.id) + + def check_connection(self, secured, host, retries=20, interval=6): + + while retries > -1: + time.sleep(interval) + host = Host.list( + self.apiclient, + zoneid=self.zone.id, + hostid=host.id, + type='Routing' + )[0] + if host.details.secured != secured: + if retries >= 0: + retries = retries - 1 + continue + else: + return + + raise Exception("Host detail 'secured' was expected: " + secured + + ", actual is: " + host.details.secured) + + def updateConfiguration(self, name, value): + cmd = updateConfiguration.updateConfigurationCmd() + cmd.name = name + cmd.value = value + self.apiclient.updateConfiguration(cmd) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_01_secure_vm_migration(self): + """Test secure VM migration""" + # Validate the following + # 1. Environment has enough hosts for migration + # 2. DeployVM on suitable host (with another host in the cluster) + # 3. Migrate the VM and assert migration successful + + src_host = self.hosts[0] + vm = self.deploy_vm(src_host) + self.cleanup.append(vm) + + self.debug("Securing Host(s)") + dest_host = self.get_target_host(secured='true', virtualmachineid=vm.id) + self.migrate_and_check(vm, src_host, dest_host) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_02_unsecure_vm_migration(self): + """Test Non-secured VM Migration + """ + # Validate the following + # 1. Prepare 2 hosts to run in non-secured more + # 2. DeployVM on suitable host (with another host in the cluster) + # 3. Migrate the VM and assert migration successful + + for host in self.hosts: + self.unsecure_host(host) + + src_host = self.hosts[0] + vm = self.deploy_vm(src_host) + self.cleanup.append(vm) + + dest_host = self.get_target_host(secured='false', virtualmachineid=vm.id) + self.migrate_and_check(vm, src_host, dest_host, proto='tcp') + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_03_secured_to_nonsecured_vm_migration(self): + """Test destroy Virtual Machine + """ + # Validate the following + # 1. Makes one of the hosts non-secured + # 2. Deploys a VM to a Secured host + # 3. Migrates the VM to the non-secured host via TLS, and ensure exception + + unsecure_host = self.unsecure_host(self.hosts[0]) + secure_host = self.hosts[1] + + vm = self.deploy_vm(secure_host) + self.cleanup.append(vm) + + try: + self.migrate_and_check(vm, secure_host, unsecure_host, proto='tls') + except Exception: + pass + else: + self.fail("Migration succeeded, instead it should fail") + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_04_nonsecured_to_secured_vm_migration(self): + """Test Non-secured VM Migration + """ + # Validate the following + # 1. Makes one of the hosts non-secured + # 2. Deploys a VM to the non-secured host + # 3. Migrates the VM to the non-secured host via TCP, and ensure exception + + unsecure_host = self.unsecure_host(self.hosts[0]) + secure_host = self.hosts[1] + + vm = self.deploy_vm(unsecure_host) + self.cleanup.append(vm) + + try: + self.migrate_and_check(vm, unsecure_host, secure_host, proto='tcp') + except Exception: + pass + else: + self.fail("Migration succeeded, instead it should fail") diff --git a/test/integration/smoke/test_vm_deployment_planner.py b/test/integration/smoke/test_vm_deployment_planner.py index e8d24cbf31e5..50aeb212901e 100644 --- a/test/integration/smoke/test_vm_deployment_planner.py +++ b/test/integration/smoke/test_vm_deployment_planner.py @@ -42,8 +42,8 @@ def setUpClass(cls): cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.hypervisor = testClient.getHypervisorInfo() cls.services['mode'] = cls.zone.networktype - cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls._cleanup = [] # Create an account, network, VM and IP addresses cls.account = Account.create( @@ -51,15 +51,14 @@ def setUpClass(cls): cls.services["account"], domainid=cls.domain.id ) + cls._cleanup.append(cls.account) + cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["tiny"] ) + cls._cleanup.append(cls.service_offering) - cls._cleanup = [ - cls.account, - cls.service_offering - ] @classmethod def tearDownClass(cls): diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index c7c9a01bd32c..43c67bf4604e 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -59,208 +59,6 @@ _multiprocess_shared_ = True -class TestDeployVM(cloudstackTestCase): - - @classmethod - def setUpClass(cls): - testClient = super(TestDeployVM, cls).getClsTestClient() - cls.apiclient = testClient.getApiClient() - cls.services = testClient.getParsedTestDataConfig() - - # Get Zone, Domain and templates - cls.domain = get_domain(cls.apiclient) - cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) - cls.services['mode'] = cls.zone.networktype - cls.hypervisor = testClient.getHypervisorInfo() - - # If local storage is enabled, alter the offerings to use localstorage - # this step is needed for devcloud - if cls.zone.localstorageenabled == True: - cls.services["service_offerings"]["tiny"]["storagetype"] = 'local' - cls.services["service_offerings"]["small"]["storagetype"] = 'local' - cls.services["service_offerings"]["medium"]["storagetype"] = 'local' - - template = get_suitable_test_template( - cls.apiclient, - cls.zone.id, - cls.services["ostype"], - cls.hypervisor - ) - if template == FAILED: - assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] - - # Set Zones and disk offerings - cls.services["small"]["zoneid"] = cls.zone.id - cls.services["small"]["template"] = template.id - - cls.services["iso1"]["zoneid"] = cls.zone.id - - cls._cleanup = [] - - cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=cls.domain.id - ) - cls._cleanup.append(cls.account) - cls.debug(cls.account.id) - - cls.service_offering = ServiceOffering.create( - cls.apiclient, - cls.services["service_offerings"]["tiny"] - ) - cls._cleanup.append(cls.service_offering) - - cls.virtual_machine = VirtualMachine.create( - cls.apiclient, - cls.services["small"], - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.service_offering.id, - mode=cls.services['mode'] - ) - - @classmethod - def tearDownClass(cls): - super(TestDeployVM, cls).tearDownClass() - - def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.dbclient = self.testClient.getDbConnection() - self.cleanup = [] - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") - def test_deploy_vm(self): - """Test Deploy Virtual Machine - """ - # Validate the following: - # 1. Virtual Machine is accessible via SSH - # 2. listVirtualMachines returns accurate information - list_vm_response = VirtualMachine.list( - self.apiclient, - id=self.virtual_machine.id - ) - - self.debug( - "Verify listVirtualMachines response for virtual machine: %s" \ - % self.virtual_machine.id - ) - self.assertEqual( - isinstance(list_vm_response, list), - True, - "Check list response returns a valid list" - ) - self.assertNotEqual( - len(list_vm_response), - 0, - "Check VM available in List Virtual Machines" - ) - vm_response = list_vm_response[0] - self.assertEqual( - - vm_response.id, - self.virtual_machine.id, - "Check virtual machine id in listVirtualMachines" - ) - self.assertEqual( - vm_response.name, - self.virtual_machine.name, - "Check virtual machine name in listVirtualMachines" - ) - self.assertEqual( - vm_response.state, - 'Running', - msg="VM is not in Running state" - ) - return - - @attr(tags=["advanced"], required_hardware="false") - def test_advZoneVirtualRouter(self): - # TODO: SIMENH: duplicate test, remove it - """ - Test advanced zone virtual router - 1. Is Running - 2. is in the account the VM was deployed in - 3. Has a linklocalip, publicip and a guestip - @return: - """ - routers = Router.list(self.apiclient, account=self.account.name) - self.assertTrue(len(routers) > 0, msg="No virtual router found") - router = routers[0] - - self.assertEqual(router.state, 'Running', msg="Router is not in running state") - self.assertEqual(router.account, self.account.name, msg="Router does not belong to the account") - - # Has linklocal, public and guest ips - self.assertIsNotNone(router.linklocalip, msg="Router has no linklocal ip") - self.assertIsNotNone(router.publicip, msg="Router has no public ip") - self.assertIsNotNone(router.guestipaddress, msg="Router has no guest ip") - - @attr(mode=["basic"], required_hardware="false") - def test_basicZoneVirtualRouter(self): - # TODO: SIMENH: duplicate test, remove it - """ - Tests for basic zone virtual router - 1. Is Running - 2. is in the account the VM was deployed in - @return: - """ - routers = Router.list(self.apiclient, account=self.account.name) - self.assertTrue(len(routers) > 0, msg="No virtual router found") - router = routers[0] - - self.assertEqual(router.state, 'Running', msg="Router is not in running state") - self.assertEqual(router.account, self.account.name, msg="Router does not belong to the account") - - @attr(tags=['advanced', 'basic', 'sg'], required_hardware="false") - def test_deploy_vm_multiple(self): - """Test Multiple Deploy Virtual Machine - - # Validate the following: - # 1. deploy 2 virtual machines - # 2. listVirtualMachines using 'ids' parameter returns accurate information - """ - account = Account.create( - self.apiclient, - self.services["account"], - domainid=self.domain.id - ) - self.cleanup.append(account) - - virtual_machine1 = VirtualMachine.create( - self.apiclient, - self.services["small"], - accountid=account.name, - domainid=account.domainid, - serviceofferingid=self.service_offering.id - ) - virtual_machine2 = VirtualMachine.create( - self.apiclient, - self.services["small"], - accountid=account.name, - domainid=account.domainid, - serviceofferingid=self.service_offering.id - ) - - list_vms = VirtualMachine.list(self.apiclient, ids=[virtual_machine1.id, virtual_machine2.id], listAll=True) - self.debug( - "Verify listVirtualMachines response for virtual machines: %s, %s" % ( - virtual_machine1.id, virtual_machine2.id) - ) - self.assertEqual( - isinstance(list_vms, list), - True, - "List VM response was not a valid list" - ) - self.assertEqual( - len(list_vms), - 2, - "List VM response was empty, expected 2 VMs" - ) - - def tearDown(self): - super(TestDeployVM, self).tearDown() - class TestVMLifeCycle(cloudstackTestCase): @@ -297,6 +95,7 @@ def setUpClass(cls): cls.services["small"]["template"] = template.id cls.services["iso1"]["zoneid"] = cls.zone.id + cls._cleanup = [] # Create VMs, NAT Rules etc cls.account = Account.create( @@ -304,16 +103,20 @@ def setUpClass(cls): cls.services["account"], domainid=cls.domain.id ) + cls._cleanup.append(cls.account) cls.small_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["small"] ) + cls._cleanup.append(cls.small_offering) cls.medium_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["medium"] ) + cls._cleanup.append(cls.medium_offering) + # create small and large virtual machines cls.small_virtual_machine = VirtualMachine.create( cls.apiclient, @@ -323,6 +126,8 @@ def setUpClass(cls): serviceofferingid=cls.small_offering.id, mode=cls.services["mode"] ) + # we don't `cls._cleanup.append(cls.small_virtual_machine)`, as this will be destroyed either during the tests or as a part of the account. + cls.medium_virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["small"], @@ -331,6 +136,7 @@ def setUpClass(cls): serviceofferingid=cls.medium_offering.id, mode=cls.services["mode"] ) + cls._cleanup.append(cls.medium_virtual_machine) cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["small"], @@ -339,11 +145,7 @@ def setUpClass(cls): serviceofferingid=cls.small_offering.id, mode=cls.services["mode"] ) - cls._cleanup = [ - cls.small_offering, - cls.medium_offering, - cls.account - ] + cls._cleanup.append(cls.virtual_machine) @classmethod def tearDownClass(cls): @@ -355,13 +157,7 @@ def setUp(self): self.cleanup = [] def tearDown(self): - # This should be a super call instead (like tearDownClass), which reverses cleanup order. Kept for now since fixing requires adjusting test 12. - try: - # Clean up, terminate the created ISOs - cleanup_resources(self.apiclient, self.cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - return + super(TestVMLifeCycle, self).tearDown() @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_01_stop_vm(self): @@ -418,7 +214,7 @@ def test_02_start_vm(self): # 1. listVM command should return this VM.State # of this VM should be Running". - self.debug("Starting VM - ID: %s" % self.virtual_machine.id) + self.debug("Starting VM - ID: %s" % self.small_virtual_machine.id) self.small_virtual_machine.start(self.apiclient) list_vm_response = VirtualMachine.list( @@ -458,7 +254,7 @@ def test_03_reboot_vm(self): # 2. listVM command should return the deployed VM. # State of this VM should be "Running" - self.debug("Rebooting VM - ID: %s" % self.virtual_machine.id) + self.debug("Rebooting VM - ID: %s" % self.small_virtual_machine.id) self.small_virtual_machine.reboot(self.apiclient) list_vm_response = VirtualMachine.list( @@ -925,6 +721,7 @@ def test_12_start_vm_multiple_volumes_allocated(self): ) self.cleanup.append(volume) # Needs adjusting when changing tearDown to a super call, since it will try to delete an attached volume. VirtualMachine.attach_volume(vm, self.apiclient, volume) + self.cleanup.remove(volume) # Start the VM self.debug("Starting VM - ID: %s" % vm.id) @@ -1063,999 +860,3 @@ def test_14_destroy_vm_delete_protection(self): vm.delete(self.apiclient) return - -class TestSecuredVmMigration(cloudstackTestCase): - - @classmethod - def setUpClass(cls): - testClient = super(TestSecuredVmMigration, cls).getClsTestClient() - cls.apiclient = testClient.getApiClient() - cls.services = testClient.getParsedTestDataConfig() - cls.hypervisor = testClient.getHypervisorInfo() - cls._cleanup = [] - - if cls.hypervisor.lower() not in ["kvm"]: - return - - # Get Zone, Domain and templates - domain = get_domain(cls.apiclient) - cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) - cls.services['mode'] = cls.zone.networktype - cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][ - 0].__dict__ - cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"] - - template = get_suitable_test_template( - cls.apiclient, - cls.zone.id, - cls.services["ostype"], - cls.hypervisor - ) - if template == FAILED: - assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] - - # Set Zones and disk offerings - cls.services["small"]["zoneid"] = cls.zone.id - cls.services["small"]["template"] = template.id - - cls.services["iso1"]["zoneid"] = cls.zone.id - - # Create VMs, NAT Rules etc - cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=domain.id - ) - - cls.small_offering = ServiceOffering.create( - cls.apiclient, - cls.services["service_offerings"]["small"] - ) - - cls._cleanup = [ - cls.small_offering, - cls.account - ] - - @classmethod - def tearDownClass(cls): - if cls.hypervisor.lower() in ["kvm"]: - cls.ensure_all_hosts_are_up() - super(TestSecuredVmMigration, cls).tearDownClass() - - @classmethod - def ensure_all_hosts_are_up(cls): - hosts = Host.list( - cls.apiclient, - zoneid=cls.zone.id, - type='Routing', - hypervisor='KVM' - ) - for host in hosts: - if host.state != "Up": - SshClient(host.ipaddress, port=22, user=cls.hostConfig["username"], passwd=cls.hostConfig["password"]) \ - .execute("service cloudstack-agent stop ; \ - sleep 10 ; \ - service cloudstack-agent start") - interval = 5 - retries = 10 - while retries > -1: - time.sleep(interval) - restarted_host = Host.list( - cls.apiclient, - id=host.id, - type='Routing' - )[0] - if restarted_host.state == "Up": - break - retries = retries - 1 - - def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.dbclient = self.testClient.getDbConnection() - self.cleanup = [] - - if self.hypervisor.lower() not in ["kvm"]: - self.skipTest("Secured migration is not supported on other than KVM") - - self.hosts = Host.list( - self.apiclient, - zoneid=self.zone.id, - type='Routing', - hypervisor='KVM', - state='Up') - - if len(self.hosts) < 2: - self.skipTest("Requires at least two hosts for performing migration related tests") - - self.secure_all_hosts() - self.updateConfiguration("ca.plugin.root.auth.strictness", "false") - - def tearDown(self): - self.secure_all_hosts() - self.updateConfiguration("ca.plugin.root.auth.strictness", "true") - super(TestSecuredVmMigration, self).tearDown() - - def get_target_host(self, secured, virtualmachineid): - target_hosts = Host.listForMigration(self.apiclient, - virtualmachineid=virtualmachineid) - for host in target_hosts: - h = list_hosts(self.apiclient, type='Routing', id=host.id)[0] - if h.details.secured == secured: - return h - - cloudstackTestCase.skipTest(self, "No target hosts available, skipping test.") - - def check_migration_protocol(self, protocol, host): - resp = SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"]) \ - .execute("grep -a listen_%s=1 /etc/libvirt/libvirtd.conf | tail -1" % protocol) - - if protocol not in resp[0]: - cloudstackTestCase.fail(self, "Libvirt listen protocol expected: '" + protocol + "\n" - "does not match actual: " + resp[0]) - - def migrate_and_check(self, vm, src_host, dest_host, proto='tls'): - """ - Migrates a VM from source host to destination host and checks status - """ - self.check_migration_protocol(protocol=proto, host=src_host) - vm.migrate(self.apiclient, hostid=dest_host.id) - vm_response = VirtualMachine.list(self.apiclient, id=vm.id)[0] - self.assertEqual(vm_response.hostid, dest_host.id, "Check destination host ID of migrated VM") - - def waitUntilHostInState(self, hostId, state="Up", interval=5, retries=20): - while retries > -1: - time.sleep(interval) - host = Host.list( - self.apiclient, - id=hostId, - type='Routing' - )[0] - if host.state != state: - if retries >= 0: - retries = retries - 1 - continue - else: - print("Host %s now showing as %s" % (hostId, state)) - return - - def unsecure_host(self, host): - SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"]) \ - .execute("rm -f /etc/cloudstack/agent/cloud* && \ - service cloudstack-agent stop ; \ - service libvirtd stop ; \ - service libvirt-bin stop ; \ - sed -i 's/listen_tls.*/listen_tls=0/g' /etc/libvirt/libvirtd.conf && \ - sed -i 's/listen_tcp.*/listen_tcp=1/g' /etc/libvirt/libvirtd.conf && \ - sed -i '/.*_file=.*/d' /etc/libvirt/libvirtd.conf && \ - sed -i 's/vnc_tls.*/vnc_tls=0/g' /etc/libvirt/qemu.conf && \ - service libvirtd start ; \ - service libvirt-bin start ; \ - sleep 30 ; \ - service cloudstack-agent start") - time.sleep(30) - print("Unsecuring Host: %s" % (host.name)) - self.waitUntilHostInState(hostId=host.id, state="Up") - self.check_connection(host=host, secured='false') - return host - - def secure_all_hosts(self): - for host in self.hosts: - cmd = provisionCertificate.provisionCertificateCmd() - cmd.hostid = host.id - cmd.reconnect = True - self.apiclient.provisionCertificate(cmd) - - for host in self.hosts: - print("Securing Host %s" % host.name) - self.waitUntilHostInState(hostId=host.id, state="Up") - self.check_connection(secured='true', host=host) - - def deploy_vm(self, origin_host): - return VirtualMachine.create( - self.apiclient, - self.services["small"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.small_offering.id, - mode=self.services["mode"], - hostid=origin_host.id) - - def check_connection(self, secured, host, retries=20, interval=6): - - while retries > -1: - time.sleep(interval) - host = Host.list( - self.apiclient, - zoneid=self.zone.id, - id=host.id, - type='Routing' - )[0] - if host.details.secured != secured: - if retries >= 0: - retries = retries - 1 - continue - else: - return - - raise Exception("Host detail 'secured' was expected: " + secured + - ", actual is: " + host.details.secured) - - def updateConfiguration(self, name, value): - cmd = updateConfiguration.updateConfigurationCmd() - cmd.name = name - cmd.value = value - self.apiclient.updateConfiguration(cmd) - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_01_secure_vm_migration(self): - """Test secure VM migration""" - # Validate the following - # 1. Environment has enough hosts for migration - # 2. DeployVM on suitable host (with another host in the cluster) - # 3. Migrate the VM and assert migration successful - - src_host = self.hosts[0] - vm = self.deploy_vm(src_host) - self.cleanup.append(vm) - - self.debug("Securing Host(s)") - dest_host = self.get_target_host(secured='true', virtualmachineid=vm.id) - self.migrate_and_check(vm, src_host, dest_host) - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_02_unsecure_vm_migration(self): - """Test Non-secured VM Migration - """ - # Validate the following - # 1. Prepare 2 hosts to run in non-secured more - # 2. DeployVM on suitable host (with another host in the cluster) - # 3. Migrate the VM and assert migration successful - - for host in self.hosts: - self.unsecure_host(host) - - src_host = self.hosts[0] - vm = self.deploy_vm(src_host) - self.cleanup.append(vm) - - dest_host = self.get_target_host(secured='false', virtualmachineid=vm.id) - self.migrate_and_check(vm, src_host, dest_host, proto='tcp') - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_03_secured_to_nonsecured_vm_migration(self): - """Test destroy Virtual Machine - """ - # Validate the following - # 1. Makes one of the hosts non-secured - # 2. Deploys a VM to a Secured host - # 3. Migrates the VM to the non-secured host via TLS, and ensure exception - - unsecure_host = self.unsecure_host(self.hosts[0]) - secure_host = self.hosts[1] - - vm = self.deploy_vm(secure_host) - self.cleanup.append(vm) - - try: - self.migrate_and_check(vm, secure_host, unsecure_host, proto='tls') - except Exception: - pass - else: - self.fail("Migration succeeded, instead it should fail") - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_04_nonsecured_to_secured_vm_migration(self): - """Test Non-secured VM Migration - """ - # Validate the following - # 1. Makes one of the hosts non-secured - # 2. Deploys a VM to the non-secured host - # 3. Migrates the VM to the non-secured host via TCP, and ensure exception - - unsecure_host = self.unsecure_host(self.hosts[0]) - secure_host = self.hosts[1] - - vm = self.deploy_vm(unsecure_host) - self.cleanup.append(vm) - - try: - self.migrate_and_check(vm, unsecure_host, secure_host, proto='tcp') - except Exception: - pass - else: - self.fail("Migration succeeded, instead it should fail") - - -class TestMigrateVMwithVolume(cloudstackTestCase): - - @classmethod - def setUpClass(cls): - testClient = super(TestMigrateVMwithVolume, cls).getClsTestClient() - cls.apiclient = testClient.getApiClient() - cls.services = testClient.getParsedTestDataConfig() - cls.hypervisor = testClient.getHypervisorInfo() - cls._cleanup = [] - - # Get Zone, Domain and templates - domain = get_domain(cls.apiclient) - cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) - cls.services['mode'] = cls.zone.networktype - cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][ - 0].__dict__ - cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"] - - template = get_suitable_test_template( - cls.apiclient, - cls.zone.id, - cls.services["ostype"], - cls.hypervisor - ) - if template == FAILED: - assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] - - # Set Zones and disk offerings - cls.services["small"]["zoneid"] = cls.zone.id - cls.services["small"]["template"] = template.id - - cls.services["iso1"]["zoneid"] = cls.zone.id - - # Create VMs, NAT Rules etc - cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=domain.id - ) - - cls.small_offering = ServiceOffering.create( - cls.apiclient, - cls.services["service_offerings"]["small"] - ) - - cls._cleanup = [ - cls.small_offering, - cls.account - ] - - @classmethod - def tearDownClass(cls): - super(TestMigrateVMwithVolume,cls).tearDownClass() - - def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.dbclient = self.testClient.getDbConnection() - self.cleanup = [] - - if self.hypervisor.lower() not in ["vmware"]: - self.skipTest("VM Migration with Volumes is not supported on other than VMware") - - self.hosts = Host.list( - self.apiclient, - zoneid=self.zone.id, - type='Routing', - hypervisor='VMware') - - if len(self.hosts) < 2: - self.skipTest("Requires at least two hosts for performing migration related tests") - - def tearDown(self): - super(TestMigrateVMwithVolume,self).tearDown() - - def get_target_host(self, virtualmachineid): - target_hosts = Host.listForMigration(self.apiclient, - virtualmachineid=virtualmachineid) - if len(target_hosts) < 1: - self.skipTest("No target hosts found") - - return target_hosts[0] - - def get_target_pool(self, volid): - target_pools = StoragePool.listForMigration(self.apiclient, id=volid) - - if len(target_pools) < 1: - self.skipTest("Not enough storage pools found") - - return target_pools[0] - - def get_vm_volumes(self, id): - return Volume.list(self.apiclient, virtualmachineid=id, listall=True) - - def deploy_vm(self): - return VirtualMachine.create( - self.apiclient, - self.services["small"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.small_offering.id, - mode=self.services["mode"]) - - def migrate_vm_to_pool(self, target_pool, id): - - cmd = migrateVirtualMachine.migrateVirtualMachineCmd() - - cmd.storageid = target_pool.id - cmd.virtualmachineid = id - - return self.apiclient.migrateVirtualMachine(cmd) - - def create_volume(self): - small_disk_offering = DiskOffering.list(self.apiclient, name='Small')[0] - - return Volume.create( - self.apiclient, - self.services, - account=self.account.name, - diskofferingid=small_disk_offering.id, - domainid=self.account.domainid, - zoneid=self.zone.id - ) - - """ - BVT for Vmware Offline and Live VM and Volume Migration - """ - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_01_offline_migrate_VM_and_root_volume(self): - """Test VM will be migrated with it's root volume""" - # Validate the following - # 1. Deploys a VM - # 2. Stops the VM - # 3. Finds suitable storage pool for root volume - # 4. Migrate the VM to new storage pool and assert migration successful - - vm = self.deploy_vm() - - root_volume = self.get_vm_volumes(vm.id)[0] - - target_pool = self.get_target_pool(root_volume.id) - - vm.stop(self.apiclient) - - self.migrate_vm_to_pool(target_pool, vm.id) - - root_volume = self.get_vm_volumes(vm.id)[0] - self.assertEqual(root_volume.storageid, target_pool.id, "Pool ID was not as expected") - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_02_offline_migrate_VM_with_two_data_disks(self): - """Test VM will be migrated with it's root volume""" - # Validate the following - # 1. Deploys a VM and attaches 2 data disks - # 2. Stops the VM - # 3. Finds suitable storage pool for volumes - # 4. Migrate the VM to new storage pool and assert migration successful - - vm = self.deploy_vm() - - volume1 = self.create_volume() - volume2 = self.create_volume() - - vm.attach_volume(self.apiclient, volume1) - vm.attach_volume(self.apiclient, volume2) - - root_volume = self.get_vm_volumes(vm.id)[0] - - target_pool = self.get_target_pool(root_volume.id) - - vm.stop(self.apiclient) - - self.migrate_vm_to_pool(target_pool, vm.id) - - volume1 = Volume.list(self.apiclient, id=volume1.id)[0] - volume2 = Volume.list(self.apiclient, id=volume2.id)[0] - root_volume = self.get_vm_volumes(vm.id)[0] - - self.assertEqual(root_volume.storageid, target_pool.id, "Pool ID was not as expected") - self.assertEqual(volume1.storageid, target_pool.id, "Pool ID was not as expected") - self.assertEqual(volume2.storageid, target_pool.id, "Pool ID was not as expected") - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_03_live_migrate_VM_with_two_data_disks(self): - """Test VM will be migrated with it's root volume""" - # Validate the following - # 1. Deploys a VM and attaches 2 data disks - # 2. Finds suitable host for migration - # 3. Finds suitable storage pool for volumes - # 4. Migrate the VM to new host and storage pool and assert migration successful - - vm = self.deploy_vm() - - root_volume = self.get_vm_volumes(vm.id)[0] - volume1 = self.create_volume() - volume2 = self.create_volume() - vm.attach_volume(self.apiclient, volume1) - vm.attach_volume(self.apiclient, volume2) - - target_host = self.get_target_host(vm.id) - target_pool = self.get_target_pool(root_volume.id) - volume1.target_pool = self.get_target_pool(volume1.id) - volume2.target_pool = self.get_target_pool(volume2.id) - - cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd() - cmd.migrateto = [{"volume": str(root_volume.id), "pool": str(target_pool.id)}, - {"volume": str(volume1.id), "pool": str(volume1.target_pool.id)}, - {"volume": str(volume2.id), "pool": str(volume2.target_pool.id)}] - cmd.virtualmachineid = vm.id - cmd.hostid = target_host.id - - response = self.apiclient.migrateVirtualMachineWithVolume(cmd) - - self.assertEqual(Volume.list(self.apiclient, id=root_volume.id)[0].storageid, - target_pool.id, - "Pool ID not as expected") - - self.assertEqual(Volume.list(self.apiclient, id=volume1.id)[0].storageid, - volume1.target_pool.id, - "Pool ID not as expected") - - self.assertEqual(Volume.list(self.apiclient, id=volume2.id)[0].storageid, - volume2.target_pool.id, - "Pool ID not as expected") - - self.assertEqual(response.hostid, - target_host.id, - "HostID not as expected") - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_04_migrate_detached_volume(self): - """Test VM will be migrated with it's root volume""" - # Validate the following - # 1. Deploys a VM and attaches 1 data disk - # 2. Detaches the Disk - # 3. Finds suitable storage pool for the Disk - # 4. Migrate the storage pool and assert migration successful - - vm = self.deploy_vm() - - volume1 = self.create_volume() - - vm.attach_volume(self.apiclient, volume1) - vm.detach_volume(self.apiclient, volume1) - - target_pool = self.get_target_pool(volume1.id) - - Volume.migrate(self.apiclient, storageid=target_pool.id, volumeid=volume1.id) - - vol = Volume.list(self.apiclient, id=volume1.id)[0] - - self.assertEqual(vol.storageid, target_pool.id, "Storage pool was not the same as expected") - - -class TestKVMLiveMigration(cloudstackTestCase): - - @classmethod - def setUpClass(cls): - testClient = super(TestKVMLiveMigration, cls).getClsTestClient() - cls.apiclient = testClient.getApiClient() - cls.services = testClient.getParsedTestDataConfig() - cls.hypervisor = testClient.getHypervisorInfo() - cls._cleanup = [] - - # Get Zone, Domain and templates - domain = get_domain(cls.apiclient) - cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) - cls.services['mode'] = cls.zone.networktype - cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][ - 0].__dict__ - cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"] - - template = get_suitable_test_template( - cls.apiclient, - cls.zone.id, - cls.services["ostype"], - cls.hypervisor - ) - if template == FAILED: - assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] - - # Set Zones and disk offerings - cls.services["small"]["zoneid"] = cls.zone.id - cls.services["small"]["template"] = template.id - - cls.services["iso1"]["zoneid"] = cls.zone.id - - # Create VMs, NAT Rules etc - cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=domain.id - ) - - cls.small_offering = ServiceOffering.create( - cls.apiclient, - cls.services["service_offerings"]["small"] - ) - - cls._cleanup = [ - cls.small_offering, - cls.account - ] - - @classmethod - def tearDownClass(cls): - super(TestKVMLiveMigration,cls).tearDownClass() - - def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.dbclient = self.testClient.getDbConnection() - self.cleanup = [] - - if self.hypervisor.lower() not in ["kvm"]: - self.skipTest("VM Live Migration with Volumes is not supported on other than KVM") - - self.hosts = Host.list( - self.apiclient, - zoneid=self.zone.id, - type='Routing', - hypervisor='KVM') - - if len(self.hosts) < 2: - self.skipTest("Requires at least two hosts for performing migration related tests") - - for host in self.hosts: - if host.details['Host.OS'] and host.details['Host.OS'].startswith('CentOS'): - self.skipTest("live migration is not stabily supported on CentOS") - - def tearDown(self): - super(TestKVMLiveMigration,self).tearDown() - - def get_target_host(self, virtualmachineid): - target_hosts = Host.listForMigration(self.apiclient, - virtualmachineid=virtualmachineid) - if len(target_hosts) < 1: - self.skipTest("No target hosts found") - - return target_hosts[0] - - def get_target_pool(self, volid): - target_pools = StoragePool.listForMigration(self.apiclient, id=volid) - - if len(target_pools) < 1: - self.skipTest("Not enough storage pools found") - - return target_pools[0] - - def get_vm_volumes(self, id): - return Volume.list(self.apiclient, virtualmachineid=id, listall=True) - - def deploy_vm(self): - return VirtualMachine.create( - self.apiclient, - self.services["small"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.small_offering.id, - mode=self.services["mode"]) - - def create_volume(self): - small_disk_offering = DiskOffering.list(self.apiclient, name='Small')[0] - - return Volume.create( - self.apiclient, - self.services, - account=self.account.name, - diskofferingid=small_disk_offering.id, - domainid=self.account.domainid, - zoneid=self.zone.id - ) - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_01_migrate_VM_and_root_volume(self): - """Test VM will be migrated with it's root volume""" - # Validates the following: - # 1. Deploys a VM - # 2. Migrates the VM and the root volume to another host and storage pool - # 3. Asserts migration success and checks for location - - vm = self.deploy_vm() - - root_volume = self.get_vm_volumes(vm.id)[0] - - target_pool = self.get_target_pool(root_volume.id) - - target_host = self.get_target_host(vm.id) - - cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd() - - cmd.migrateto = [{"volume": str(root_volume.id), "pool": str(target_pool.id)}] - - cmd.virtualmachineid = vm.id - cmd.hostid = target_host.id - - response = self.apiclient.migrateVirtualMachineWithVolume(cmd) - - self.assertEqual(response.hostid, target_host.id) - - self.assertEqual(Volume.list(self.apiclient, id=root_volume.id)[0].storageid, - target_pool.id, - "Pool ID was not as expected") - - @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_02_migrate_VM_with_two_data_disks(self): - """Test VM will be migrated with it's root volume""" - # Validate the following - # 1. Deploys a VM and attaches 2 data disks - # 2. Finds suitable host for migration - # 3. Finds suitable storage pool for volumes - # 4. Migrate the VM to new host and storage pool and assert migration successful - - vm = self.deploy_vm() - - volume1 = self.create_volume() - volume2 = self.create_volume() - - vm.attach_volume(self.apiclient, volume1) - vm.attach_volume(self.apiclient, volume2) - - root_volume = self.get_vm_volumes(vm.id)[0] - - target_pool = self.get_target_pool(root_volume.id) - volume1.target_pool = self.get_target_pool(volume1.id) - volume2.target_pool = self.get_target_pool(volume2.id) - - target_host = self.get_target_host(vm.id) - - cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd() - - cmd.migrateto = [{"volume": str(root_volume.id), "pool": str(target_pool.id)}, - {"volume": str(volume1.id), "pool": str(volume1.target_pool.id)}, - {"volume": str(volume2.id), "pool": str(volume2.target_pool.id)}] - cmd.virtualmachineid = vm.id - cmd.hostid = target_host.id - - response = self.apiclient.migrateVirtualMachineWithVolume(cmd) - - self.assertEqual(Volume.list(self.apiclient, id=root_volume.id)[0].storageid, - target_pool.id, - "Pool ID not as expected") - - self.assertEqual(Volume.list(self.apiclient, id=volume1.id)[0].storageid, - volume1.target_pool.id, - "Pool ID not as expected") - - self.assertEqual(Volume.list(self.apiclient, id=volume2.id)[0].storageid, - volume2.target_pool.id, - "Pool ID not as expected") - - self.assertEqual(response.hostid, - target_host.id, - "HostID not as expected") - - -class TestVAppsVM(cloudstackTestCase): - - @classmethod - def setUpClass(cls): - testClient = super(TestVAppsVM, cls).getClsTestClient() - cls.apiclient = testClient.getApiClient() - cls.services = testClient.getParsedTestDataConfig() - cls.hypervisor = testClient.getHypervisorInfo() - cls._cleanup = [] - - # Get Zone, Domain and templates - cls.domain = get_domain(cls.apiclient) - cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) - cls.services['mode'] = cls.zone.networktype - - cls.hypervisorNotSupported = cls.hypervisor.lower() != "vmware" - - if cls.hypervisorNotSupported == False: - - cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=cls.domain.id - ) - - cls.templates = get_test_ovf_templates( - cls.apiclient, - cls.zone.id, - cls.services['test_ovf_templates'], - cls.hypervisor - ) - if len(cls.templates) == 0: - assert False, "get_test_ovf_templates() failed to return templates" - - cls.custom_offering = ServiceOffering.create( - cls.apiclient, - cls.services["custom_service_offering"] - ) - - cls.isolated_network_offering = NetworkOffering.create( - cls.apiclient, - cls.services["isolated_network_offering"], - ) - cls.isolated_network_offering.update(cls.apiclient, state='Enabled') - - cls.l2_network_offering = NetworkOffering.create( - cls.apiclient, - cls.services["l2-network_offering"], - ) - cls.l2_network_offering.update(cls.apiclient, state='Enabled') - - cls._cleanup = [ - cls.account, - cls.custom_offering, - cls.isolated_network_offering, - cls.l2_network_offering - ] - - - @classmethod - def tearDownClass(cls): - try: - cleanup_resources(cls.apiclient, cls._cleanup) - except Exception as e: - raise Exception("Warning: Exception during class cleanup : %s" % e) - - def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.cleanup = [] - - def tearDown(self): - try: - cleanup_resources(self.apiclient, self.cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - - def get_ova_parsed_information_from_template(self, template): - if not template: - return None - details = template.deployasisdetails.__dict__ - configurations = [] - disks = [] - isos = [] - networks = [] - for propKey in details: - if propKey.startswith('configuration'): - configurations.append(json.loads(details[propKey])) - elif propKey.startswith('disk'): - detail = json.loads(details[propKey]) - if detail['isIso'] == True: - isos.append(detail) - else: - disks.append(detail) - elif propKey.startswith('network'): - networks.append(json.loads(details[propKey])) - - return configurations, disks, isos, networks - - def verify_nics(self, nic_networks, vm_id): - cmd = listNics.listNicsCmd() - cmd.virtualmachineid = vm_id - vm_nics = self.apiclient.listNics(cmd) - self.assertEqual( - isinstance(vm_nics, list), - True, - "Check listNics response returns a valid list" - ) - self.assertEqual( - len(nic_networks), - len(vm_nics), - msg="VM NIC count is different, expected = {}, result = {}".format(len(nic_networks), len(vm_nics)) - ) - nic_networks.sort(key=itemgetter('nic')) # CS will create NIC in order of InstanceID. Check network order - vm_nics.sort(key=itemgetter('deviceid')) - for i in range(len(vm_nics)): - nic = vm_nics[i] - nic_network = nic_networks[i] - self.assertEqual( - nic.networkid, - nic_network["network"], - msg="VM NIC(InstanceID: {}) network mismatch, expected = {}, result = {}".format(nic_network["nic"], nic_network["network"], nic.networkid) - ) - - @attr(tags=["advanced", "advancedns", "smoke", "sg", "dev"], required_hardware="false") - @skipTestIf("hypervisorNotSupported") - def test_01_vapps_vm_cycle(self): - """ - Test the following for all found ovf templates: - 1. Deploy VM - 2. Verify VM has correct properties - 3. Verify VM has correct disks - 4. Verify VM has correct nics - 5. Destroy VM - """ - - for template in self.templates: - configurations, disks, isos, network = self.get_ova_parsed_information_from_template(template) - - if configurations: - conf = configurations[0] - items = conf['hardwareItems'] - cpu_speed = 1000 - cpu_number = 0 - memory = 0 - for item in items: - if item['resourceType'] == 'Memory': - memory = item['virtualQuantity'] - elif item['resourceType'] == 'Processor': - cpu_number = item['virtualQuantity'] - - nicnetworklist = [] - networks = [] - vm_service = self.services["virtual_machine_vapps"][template.name] - network_mappings = vm_service["nicnetworklist"] - for network_mapping in network_mappings: - network_service = self.services["isolated_network"] - network_offering_id = self.isolated_network_offering.id - if network_mapping["network"] == 'l2': - network_service = self.services["l2-network"] - network_offering_id = self.l2_network_offering.id - network = Network.create( - self.apiclient, - network_service, - networkofferingid=network_offering_id, - accountid=self.account.name, - domainid=self.account.domainid, - zoneid=self.zone.id) - networks.append(network) - for interface in network_mapping["nic"]: - nicnetworklist.append({"nic": interface, "network": network.id}) - - vm = VirtualMachine.create( - self.apiclient, - vm_service, - accountid=self.account.name, - domainid=self.account.domainid, - templateid=template.id, - serviceofferingid=self.custom_offering.id, - zoneid=self.zone.id, - customcpunumber=cpu_number, - customcpuspeed=cpu_speed, - custommemory=memory, - properties=vm_service['properties'], - nicnetworklist=nicnetworklist - ) - - list_vm_response = VirtualMachine.list( - self.apiclient, - id=vm.id - ) - self.debug( - "Verify listVirtualMachines response for virtual machine: %s" \ - % vm.id - ) - self.assertEqual( - isinstance(list_vm_response, list), - True, - "Check list response returns a valid list" - ) - self.assertNotEqual( - len(list_vm_response), - 0, - "Check VM available in List Virtual Machines" - ) - vm_response = list_vm_response[0] - self.assertEqual( - vm_response.id, - vm.id, - "Check virtual machine id in listVirtualMachines" - ) - self.assertEqual( - vm_response.name, - vm.name, - "Check virtual machine name in listVirtualMachines" - ) - self.assertEqual( - vm_response.state, - 'Running', - msg="VM is not in Running state" - ) - - # Verify nics - self.verify_nics(nicnetworklist, vm.id) - # Verify properties - original_properties = vm_service['properties'] - vm_properties = get_vm_vapp_configs(self.apiclient, self.config, self.zone, vm.instancename) - for property in original_properties: - if property["key"] in vm_properties: - self.assertEqual( - vm_properties[property["key"]], - property["value"], - "Check VM property %s with original value" % property["key"] - ) - - cmd = destroyVirtualMachine.destroyVirtualMachineCmd() - cmd.id = vm.id - self.apiclient.destroyVirtualMachine(cmd) diff --git a/test/integration/smoke/test_vmware_vapps.py b/test/integration/smoke/test_vmware_vapps.py new file mode 100644 index 000000000000..f2e8f092c8ff --- /dev/null +++ b/test/integration/smoke/test_vmware_vapps.py @@ -0,0 +1,289 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for Virtual Machine Life Cycle +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (recoverVirtualMachine, + destroyVirtualMachine, + attachIso, + detachIso, + provisionCertificate, + updateConfiguration, + migrateVirtualMachine, + migrateVirtualMachineWithVolume, + listNics, + listVolumes) +from marvin.lib.utils import * + +from marvin.lib.base import (Account, + ServiceOffering, + VirtualMachine, + Host, + Iso, + Router, + Configurations, + StoragePool, + Volume, + DiskOffering, + NetworkOffering, + Network) +from marvin.lib.common import (get_domain, + get_zone, + get_suitable_test_template, + get_test_ovf_templates, + list_hosts, + get_vm_vapp_configs) +from marvin.codes import FAILED, PASS +from nose.plugins.attrib import attr +from marvin.lib.decoratorGenerators import skipTestIf +# Import System modules +import time +import json +from operator import itemgetter + +_multiprocess_shared_ = True + + +class TestVAppsVM(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestVAppsVM, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.hypervisor = testClient.getHypervisorInfo() + cls._cleanup = [] + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + + cls.hypervisorNotSupported = cls.hypervisor.lower() != "vmware" + cls._cleanup = [] + + if cls.hypervisorNotSupported == False: + + cls.templates = get_test_ovf_templates( + cls.apiclient, + cls.zone.id, + cls.services['test_ovf_templates'], + cls.hypervisor + ) + if len(cls.templates) == 0: + assert False, "get_test_ovf_templates() failed to return templates" + + cls.custom_offering = ServiceOffering.create( + cls.apiclient, + cls.services["custom_service_offering"] + ) + cls._cleanup.append(cls.custom_offering) + + cls.isolated_network_offering = NetworkOffering.create( + cls.apiclient, + cls.services["isolated_network_offering"], + ) + cls._cleanup.append(cls.isolated_network_offering) + cls.isolated_network_offering.update(cls.apiclient, state='Enabled') + + cls.l2_network_offering = NetworkOffering.create( + cls.apiclient, + cls.services["l2-network_offering"], + ) + cls._cleanup.append(cls.l2_network_offering) + cls.l2_network_offering.update(cls.apiclient, state='Enabled') + + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + + @classmethod + def tearDownClass(cls): + super(TestVAppsVM, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.cleanup = [] + + def tearDown(self): + super(TestVAppsVM, self).tearDown() + + def get_ova_parsed_information_from_template(self, template): + if not template: + return None + details = template.deployasisdetails.__dict__ + configurations = [] + disks = [] + isos = [] + networks = [] + for propKey in details: + if propKey.startswith('configuration'): + configurations.append(json.loads(details[propKey])) + elif propKey.startswith('disk'): + detail = json.loads(details[propKey]) + if detail['isIso'] == True: + isos.append(detail) + else: + disks.append(detail) + elif propKey.startswith('network'): + networks.append(json.loads(details[propKey])) + + return configurations, disks, isos, networks + + def verify_nics(self, nic_networks, vm_id): + cmd = listNics.listNicsCmd() + cmd.virtualmachineid = vm_id + vm_nics = self.apiclient.listNics(cmd) + self.assertEqual( + isinstance(vm_nics, list), + True, + "Check listNics response returns a valid list" + ) + self.assertEqual( + len(nic_networks), + len(vm_nics), + msg="VM NIC count is different, expected = {}, result = {}".format(len(nic_networks), len(vm_nics)) + ) + nic_networks.sort(key=itemgetter('nic')) # CS will create NIC in order of InstanceID. Check network order + vm_nics.sort(key=itemgetter('deviceid')) + for i in range(len(vm_nics)): + nic = vm_nics[i] + nic_network = nic_networks[i] + self.assertEqual( + nic.networkid, + nic_network["network"], + msg="VM NIC(InstanceID: {}) network mismatch, expected = {}, result = {}".format(nic_network["nic"], nic_network["network"], nic.networkid) + ) + + @attr(tags=["advanced", "advancedns", "smoke", "sg", "dev"], required_hardware="false") + @skipTestIf("hypervisorNotSupported") + def test_01_vapps_vm_cycle(self): + """ + Test the following for all found ovf templates: + 1. Deploy VM + 2. Verify VM has correct properties + 3. Verify VM has correct disks + 4. Verify VM has correct nics + 5. Destroy VM + """ + + for template in self.templates: + configurations, disks, isos, network = self.get_ova_parsed_information_from_template(template) + + if configurations: + conf = configurations[0] + items = conf['hardwareItems'] + cpu_speed = 1000 + cpu_number = 0 + memory = 0 + for item in items: + if item['resourceType'] == 'Memory': + memory = item['virtualQuantity'] + elif item['resourceType'] == 'Processor': + cpu_number = item['virtualQuantity'] + + nicnetworklist = [] + networks = [] + vm_service = self.services["virtual_machine_vapps"][template.name] + network_mappings = vm_service["nicnetworklist"] + for network_mapping in network_mappings: + network_service = self.services["isolated_network"] + network_offering_id = self.isolated_network_offering.id + if network_mapping["network"] == 'l2': + network_service = self.services["l2-network"] + network_offering_id = self.l2_network_offering.id + network = Network.create( + self.apiclient, + network_service, + networkofferingid=network_offering_id, + accountid=self.account.name, + domainid=self.account.domainid, + zoneid=self.zone.id) + networks.append(network) + for interface in network_mapping["nic"]: + nicnetworklist.append({"nic": interface, "network": network.id}) + + vm = VirtualMachine.create( + self.apiclient, + vm_service, + accountid=self.account.name, + domainid=self.account.domainid, + templateid=template.id, + serviceofferingid=self.custom_offering.id, + zoneid=self.zone.id, + customcpunumber=cpu_number, + customcpuspeed=cpu_speed, + custommemory=memory, + properties=vm_service['properties'], + nicnetworklist=nicnetworklist + ) + + list_vm_response = VirtualMachine.list( + self.apiclient, + id=vm.id + ) + self.debug( + "Verify listVirtualMachines response for virtual machine: %s" \ + % vm.id + ) + self.assertEqual( + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + len(list_vm_response), + 0, + "Check VM available in List Virtual Machines" + ) + vm_response = list_vm_response[0] + self.assertEqual( + vm_response.id, + vm.id, + "Check virtual machine id in listVirtualMachines" + ) + self.assertEqual( + vm_response.name, + vm.name, + "Check virtual machine name in listVirtualMachines" + ) + self.assertEqual( + vm_response.state, + 'Running', + msg="VM is not in Running state" + ) + + # Verify nics + self.verify_nics(nicnetworklist, vm.id) + # Verify properties + original_properties = vm_service['properties'] + vm_properties = get_vm_vapp_configs(self.apiclient, self.config, self.zone, vm.instancename) + for property in original_properties: + if property["key"] in vm_properties: + self.assertEqual( + vm_properties[property["key"]], + property["value"], + "Check VM property %s with original value" % property["key"] + ) + + cmd = destroyVirtualMachine.destroyVirtualMachineCmd() + cmd.id = vm.id + self.apiclient.destroyVirtualMachine(cmd)