2121from nova .compute import manager as compute_manager
2222from nova import context
2323from nova import objects
24+ from nova import test
2425from nova .tests .functional import integrated_helpers
2526from nova .tests .functional .libvirt import base
2627from nova .tests .unit .virt .libvirt import fake_os_brick_connector
@@ -66,37 +67,35 @@ def setUp(self):
6667 'nova.virt.libvirt.driver.connector' ,
6768 fake_os_brick_connector ))
6869
70+ def _migrate_stub (self , domain , destination , params , flags ):
71+ raise test .TestingException ('_migrate_stub() must be implemented in '
72+ ' tests that expect the live migration '
73+ ' to start.' )
74+
6975 def get_host (self , server_id ):
7076 server = self .api .get_server (server_id )
7177 return server ['OS-EXT-SRV-ATTR:host' ]
7278
73- def _get_host_numa_topology (self , host ):
74- ctxt = context .get_admin_context ()
75- return objects .NUMATopology .obj_from_db_obj (
76- objects .ComputeNode .get_by_nodename (ctxt , host ).numa_topology )
77-
78- def _assert_no_migration_context (self , instance_uuid ):
79- ctxt = context .get_admin_context ()
80- self .assertFalse (
81- objects .MigrationContext .get_by_instance_uuid (ctxt , instance_uuid ))
82-
83- def _assert_has_migration_context (self , instance_uuid ):
79+ def _get_migration_context (self , instance_uuid ):
8480 ctxt = context .get_admin_context ()
85- self . assertTrue (
86- objects . MigrationContext . get_by_instance_uuid ( ctxt , instance_uuid ) )
81+ return objects . MigrationContext . get_by_instance_uuid ( ctxt ,
82+ instance_uuid )
8783
8884 def _assert_instance_pinned_cpus (self , uuid , instance_cpus , host_cpus ):
8985 ctxt = context .get_admin_context ()
9086 topology = objects .InstanceNUMATopology .get_by_instance_uuid (
9187 ctxt , uuid )
9288 self .assertEqual (1 , len (topology .cells ))
93- self .assertItemsEqual (instance_cpus ,
89+ # NOTE(artom) DictOfIntegersField has strings as keys, need to convert
90+ self .assertItemsEqual ([str (cpu ) for cpu in instance_cpus ],
9491 topology .cells [0 ].cpu_pinning_raw .keys ())
9592 self .assertItemsEqual (host_cpus ,
9693 topology .cells [0 ].cpu_pinning_raw .values ())
9794
9895 def _assert_host_consumed_cpus (self , host , cpus ):
99- topology = self ._get_host_numa_topology (host )
96+ ctxt = context .get_admin_context ()
97+ topology = objects .NUMATopology .obj_from_db_obj (
98+ objects .ComputeNode .get_by_nodename (ctxt , host ).numa_topology )
10099 self .assertItemsEqual (cpus , topology .cells [0 ].pinned_cpus )
101100
102101
@@ -132,17 +131,12 @@ def start_computes_and_servers(self):
132131 # CPUs 0,1.
133132 for server_name , host in [('server_a' , 'host_a' ),
134133 ('server_b' , 'host_b' )]:
135- server = self ._build_server (
136- flavor_id = flavor ,
137- image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6' )
138- server .update ({'networks' : 'none' ,
139- 'host' : host })
140- post = {'server' : server }
141- server = self .api .post_server (post )
134+ server = self ._create_server (flavor_id = flavor , host = host ,
135+ networks = 'none' )
142136 setattr (self , server_name ,
143137 self ._wait_for_state_change (server , 'ACTIVE' ))
144138 self .assertEqual (host , self .get_host (server ['id' ]))
145- self ._assert_instance_pinned_cpus (server ['id' ], ['0' , '1' ], [0 , 1 ])
139+ self ._assert_instance_pinned_cpus (server ['id' ], [0 , 1 ], [0 , 1 ])
146140
147141 def _rpc_pin_host (self , hostname ):
148142 ctxt = context .get_admin_context ()
@@ -153,14 +147,6 @@ def _rpc_pin_host(self, hostname):
153147 dest_mgr .compute_rpcapi .router .client (
154148 ctxt ).can_send_version ('5.3' ))
155149
156- def _live_migrate (self , server , migration_final_status ):
157- self .api .post_server_action (
158- server ['id' ],
159- {'os-migrateLive' : {'host' : None ,
160- 'block_migration' : 'auto' }})
161- self ._wait_for_state_change (server , 'ACTIVE' )
162- self ._wait_for_migration_status (server , [migration_final_status ])
163-
164150
165151class NUMALiveMigrationPositiveTests (NUMALiveMigrationPositiveBase ):
166152 """Tests that expect the live migration to succeed. Stubs out fakelibvirt's
@@ -177,14 +163,23 @@ def _migrate_stub(self, domain, destination, params, flags):
177163 until this method is done, the last thing we do is make fakelibvirt's
178164 Domain.jobStats() return VIR_DOMAIN_JOB_COMPLETED.
179165 """
180- self ._assert_has_migration_context (self .server_a ['id' ])
166+ self .assertIsInstance (
167+ self ._get_migration_context (self .server_a ['id' ]),
168+ objects .MigrationContext )
181169
182170 # During the migration, server_a is consuming CPUs 0,1 on host_a, while
183171 # all 4 of host_b's CPU are consumed by server_b and the incoming
184172 # migration.
185173 self ._assert_host_consumed_cpus ('host_a' , [0 , 1 ])
186174 self ._assert_host_consumed_cpus ('host_b' , [0 , 1 , 2 , 3 ])
187175
176+ host_a_rp = self ._get_provider_uuid_by_name ('host_a' )
177+ host_b_rp = self ._get_provider_uuid_by_name ('host_b' )
178+ usages_a = self ._get_provider_usages (host_a_rp )
179+ usages_b = self ._get_provider_usages (host_b_rp )
180+ self .assertEqual (2 , usages_a ['PCPU' ])
181+ self .assertEqual (4 , usages_b ['PCPU' ])
182+
188183 # In a real live migration, libvirt and QEMU on the source and
189184 # destination talk it out, resulting in the instance starting to exist
190185 # on the destination. Fakelibvirt cannot do that, so we have to
@@ -230,7 +225,7 @@ def _test(self, pin_dest=False):
230225 self ._rpc_pin_host ('host_b' )
231226 self ._live_migrate (self .server_a , 'completed' )
232227 self .assertEqual ('host_b' , self .get_host (self .server_a ['id' ]))
233- self ._assert_no_migration_context (self .server_a ['id' ])
228+ self .assertIsNone (self ._get_migration_context ( self . server_a ['id' ]) )
234229
235230 # At this point host_a should have no CPUs consumed (server_a has moved
236231 # to host_b), and host_b should have all of its CPUs consumed. In
@@ -241,14 +236,14 @@ def _test(self, pin_dest=False):
241236 self ._assert_host_consumed_cpus ('host_a' , [])
242237 self ._assert_host_consumed_cpus ('host_b' , [0 , 1 , 2 , 3 ])
243238 self ._assert_instance_pinned_cpus (self .server_a ['id' ],
244- ['0' , '1' ], [2 , 3 ])
239+ [0 , 1 ], [2 , 3 ])
245240
246241 self ._run_periodics ()
247242
248243 self ._assert_host_consumed_cpus ('host_a' , [])
249244 self ._assert_host_consumed_cpus ('host_b' , [0 , 1 , 2 , 3 ])
250245 self ._assert_instance_pinned_cpus (self .server_a ['id' ],
251- ['0' , '1' ], [2 , 3 ])
246+ [0 , 1 ], [2 , 3 ])
252247
253248 self .assertTrue (self .migrate_stub_ran )
254249
@@ -262,8 +257,22 @@ def test_numa_live_migration_dest_pinned(self):
262257 self ._test (pin_dest = True )
263258
264259 def test_bug_1843639 (self ):
265- orig_live_migration = \
266- compute_manager .ComputeManager .live_migration
260+ """Live migrations in 'accepted' status were not considered in progress
261+ before the fix for 1845146 merged, and were ignored by the update
262+ available resources periodic task. From the task's POV, live-migrating
263+ instances with migration status 'accepted' were considered to be on the
264+ source, and any resource claims on the destination would get
265+ erroneously removed. For that to happen, the task had to run at just
266+ the "right" time, when the migration was in 'accepted' and had not yet
267+ been moved to 'queued' by live_migration() in the compute manager.
268+
269+ This test triggers this race by wrapping around live_migration() and
270+ running the update available resources periodic task while the
271+ migration is still in 'accepted'.
272+ """
273+
274+ self .live_migration_ran = False
275+ orig_live_migration = compute_manager .ComputeManager .live_migration
267276
268277 def live_migration (* args , ** kwargs ):
269278 self ._run_periodics ()
@@ -272,12 +281,22 @@ def live_migration(*args, **kwargs):
272281 # incoming # migration.
273282 self ._assert_host_consumed_cpus ('host_a' , [0 , 1 ])
274283 self ._assert_host_consumed_cpus ('host_b' , [0 , 1 , 2 , 3 ])
284+
285+ # The migration should also be in 'accepted' at this point in time.
286+ ctxt = context .get_admin_context ()
287+ self .assertIsInstance (
288+ objects .Migration .get_by_instance_and_status (
289+ ctxt , self .server_a ['id' ], 'accepted' ),
290+ objects .Migration )
291+
292+ self .live_migration_ran = True
275293 return orig_live_migration (* args , ** kwargs )
276294
277295 self .useFixture (fixtures .MonkeyPatch (
278296 'nova.compute.manager.ComputeManager.live_migration' ,
279297 live_migration ))
280298 self ._test ()
299+ self .assertTrue (self .live_migration_ran )
281300
282301
283302class NUMALiveMigrationRollbackTests (NUMALiveMigrationPositiveBase ):
@@ -290,7 +309,9 @@ def _migrate_stub(self, domain, destination, params, flags):
290309 """Designed to stub fakelibvirt's migrateToURI3 and "fail" the
291310 live migration by monkeypatching jobStats() to return an error.
292311 """
293- self ._assert_has_migration_context (self .server_a ['id' ])
312+ self .assertIsInstance (
313+ self ._get_migration_context (self .server_a ['id' ]),
314+ objects .MigrationContext )
294315
295316 # During the migration, server_a is consuming CPUs 0,1 on host_a, while
296317 # all 4 of host_b's CPU are consumed by server_b and the incoming
@@ -332,15 +353,15 @@ def _test(self, pin_dest=False):
332353 self ._rpc_pin_host ('host_b' )
333354 self ._live_migrate (self .server_a , 'error' )
334355 self .assertEqual ('host_a' , self .get_host (self .server_a ['id' ]))
335- self ._assert_no_migration_context (self .server_a ['id' ])
356+ self .assertIsNone (self ._get_migration_context ( self . server_a ['id' ]) )
336357
337358 # Check consumed and pinned CPUs. Things should be as they were before
338359 # the live migration, with CPUs 0,1 consumed on both hosts by the 2
339360 # servers.
340361 self ._assert_host_consumed_cpus ('host_a' , [0 , 1 ])
341362 self ._assert_host_consumed_cpus ('host_b' , [0 , 1 ])
342363 self ._assert_instance_pinned_cpus (self .server_a ['id' ],
343- ['0' , '1' ], [0 , 1 ])
364+ [0 , 1 ], [0 , 1 ])
344365
345366 def test_rollback (self ):
346367 self ._test ()
@@ -403,15 +424,8 @@ def _test(self, pin_source, pin_cond, expect_success=True):
403424 extra_spec = {'hw:numa_nodes' : 1 ,
404425 'hw:cpu_policy' : 'dedicated' }
405426 flavor = self ._create_flavor (vcpu = 2 , extra_spec = extra_spec )
406- server = self ._build_server (
407- flavor_id = flavor ,
408- image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6' )
409- server ['networks' ] = 'none'
410- post = {'server' : server }
411- server1 = self .api .post_server (post )
412- server2 = self .api .post_server (post )
413- self ._wait_for_state_change (server1 , 'ACTIVE' )
414- self ._wait_for_state_change (server2 , 'ACTIVE' )
427+ server1 = self ._create_server (flavor_id = flavor , networks = 'none' )
428+ server2 = self ._create_server (flavor_id = flavor , networks = 'none' )
415429 if self .get_host (server1 ['id' ]) == 'source' :
416430 self .migrating_server = server1
417431 else :
@@ -445,7 +459,8 @@ def _migrate_stub(self, domain, destination, params, flags):
445459 # NOTE(artom) This is the crucial bit: by asserting that the migrating
446460 # instance has no migration context, we're making sure that we're
447461 # hitting the old, pre-claims code paths.
448- self ._assert_no_migration_context (self .migrating_server ['id' ])
462+ self .assertIsNone (
463+ self ._get_migration_context (self .migrating_server ['id' ]))
449464 dest = self .computes ['dest' ]
450465 dest .driver ._host .get_connection ().createXML (
451466 params ['destination_xml' ],
@@ -475,7 +490,8 @@ def _migrate_stub(self, domain, destination, params, flags):
475490 # NOTE(artom) This is the crucial bit: by asserting that the migrating
476491 # instance has no migration context, we're making sure that we're
477492 # hitting the old, pre-claims code paths.
478- self ._assert_no_migration_context (self .migrating_server ['id' ])
493+ self .assertIsNone (
494+ self ._get_migration_context (self .migrating_server ['id' ]))
479495 source = self .computes ['source' ]
480496 conn = source .driver ._host .get_connection ()
481497 dom = conn .lookupByUUIDString (self .migrating_server ['id' ])
@@ -531,7 +547,7 @@ def test_insufficient_resources(self):
531547 check_response_status = [500 ])
532548 self ._wait_for_state_change (server , 'ACTIVE' )
533549 self ._wait_for_migration_status (server , ['error' ])
534- self ._assert_no_migration_context ( server ['id' ])
550+ self .assertIsNone ( self . _get_migration_context ( server ['id' ]) )
535551 self .assertEqual ('host_a' , self .get_host (server ['id' ]))
536552 log_out = self .stdlog .logger .output
537553 self .assertIn ('Migration pre-check error: '
@@ -577,7 +593,7 @@ def test_different_page_sizes(self):
577593 self ._wait_for_state_change (server , 'ACTIVE' )
578594 self ._wait_for_migration_status (server , ['error' ])
579595 self .assertEqual (initial_host , self .get_host (server ['id' ]))
580- self ._assert_no_migration_context ( server ['id' ])
596+ self .assertIsNone ( self . _get_migration_context ( server ['id' ]) )
581597 log_out = self .stdlog .logger .output
582598 self .assertIn ('Migration pre-check error: '
583599 'Insufficient compute resources: '
0 commit comments