@@ -929,6 +929,87 @@ def test_resize_revert_bug_1944759(self):
929
929
self ._assert_pinned_cpus (src_host , 2 )
930
930
self ._assert_pinned_cpus (dst_host , 0 )
931
931
932
+ def test_resize_dedicated_policy_race_on_dest_bug_1953359 (self ):
933
+
934
+ self .flags (cpu_dedicated_set = '0-2' , cpu_shared_set = None ,
935
+ group = 'compute' )
936
+ self .flags (vcpu_pin_set = None )
937
+
938
+ host_info = fakelibvirt .HostInfo (cpu_nodes = 1 , cpu_sockets = 1 ,
939
+ cpu_cores = 2 , cpu_threads = 1 )
940
+ self .start_compute (host_info = host_info , hostname = 'compute1' )
941
+
942
+ extra_spec = {
943
+ 'hw:cpu_policy' : 'dedicated' ,
944
+ }
945
+ flavor_id = self ._create_flavor (vcpu = 1 , extra_spec = extra_spec )
946
+ expected_usage = {'DISK_GB' : 20 , 'MEMORY_MB' : 2048 , 'PCPU' : 1 }
947
+
948
+ server = self ._run_build_test (flavor_id , expected_usage = expected_usage )
949
+
950
+ inst = objects .Instance .get_by_uuid (self .ctxt , server ['id' ])
951
+ self .assertEqual (1 , len (inst .numa_topology .cells ))
952
+ # assert that the pcpu 0 is used on compute1
953
+ self .assertEqual ({'0' : 0 }, inst .numa_topology .cells [0 ].cpu_pinning_raw )
954
+
955
+ # start another compute with the same config
956
+ self .start_compute (host_info = host_info , hostname = 'compute2' )
957
+
958
+ # boot another instance but now on compute2 so that it occupies the
959
+ # pcpu 0 on compute2
960
+ # NOTE(gibi): _run_build_test cannot be used here as it assumes only
961
+ # compute1 exists
962
+ server2 = self ._create_server (
963
+ flavor_id = flavor_id ,
964
+ host = 'compute2' ,
965
+ )
966
+ inst2 = objects .Instance .get_by_uuid (self .ctxt , server2 ['id' ])
967
+ self .assertEqual (1 , len (inst2 .numa_topology .cells ))
968
+ # assert that the pcpu 0 is used
969
+ self .assertEqual (
970
+ {'0' : 0 }, inst2 .numa_topology .cells [0 ].cpu_pinning_raw )
971
+
972
+ # migrate the first instance from compute1 to compute2 but stop
973
+ # migrating at the start of finish_resize. Then start a racing periodic
974
+ # update_available_resources.
975
+
976
+ def fake_finish_resize (* args , ** kwargs ):
977
+ # start a racing update_available_resource periodic
978
+ self ._run_periodics ()
979
+ # we expect it that CPU pinning fails on the destination node
980
+ # as the resource_tracker will use the source node numa_topology
981
+ # and that does not fit to the dest node as pcpu 0 in the dest
982
+ # is already occupied.
983
+
984
+ # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
985
+ # probably be less...dumb
986
+ with mock .patch ('nova.virt.libvirt.driver.LibvirtDriver'
987
+ '.migrate_disk_and_power_off' , return_value = '{}' ):
988
+ with mock .patch (
989
+ 'nova.compute.manager.ComputeManager.finish_resize'
990
+ ) as mock_finish_resize :
991
+ mock_finish_resize .side_effect = fake_finish_resize
992
+ post = {'migrate' : None }
993
+ self .admin_api .post_server_action (server ['id' ], post )
994
+
995
+ log = self .stdlog .logger .output
996
+ # The resize_claim correctly calculates that the inst1 should be pinned
997
+ # to pcpu id 1 instead of 0
998
+ self .assertIn (
999
+ 'Computed NUMA topology CPU pinning: usable pCPUs: [[1]], '
1000
+ 'vCPUs mapping: [(0, 1)]' ,
1001
+ log ,
1002
+ )
1003
+ # But the periodic fails as it tries to apply the source topology on
1004
+ # the dest. This is bug 1953359.
1005
+ log = self .stdlog .logger .output
1006
+ self .assertIn ('Error updating resources for node compute2' , log )
1007
+ self .assertIn (
1008
+ 'nova.exception.CPUPinningInvalid: CPU set to pin [0] must be '
1009
+ 'a subset of free CPU set [1]' ,
1010
+ log ,
1011
+ )
1012
+
932
1013
933
1014
class NUMAServerTestWithCountingQuotaFromPlacement (NUMAServersTest ):
934
1015
0 commit comments