49
49
from nova .scheduler .client import report
50
50
from nova import utils
51
51
from nova .virt import hardware
52
+ from nova .virt import node
52
53
53
54
54
55
CONF = nova .conf .CONF
@@ -668,50 +669,6 @@ def disabled(self, nodename):
668
669
return (nodename not in self .compute_nodes or
669
670
not self .driver .node_is_available (nodename ))
670
671
671
- def _check_for_nodes_rebalance (self , context , resources , nodename ):
672
- """Check if nodes rebalance has happened.
673
-
674
- The ironic driver maintains a hash ring mapping bare metal nodes
675
- to compute nodes. If a compute dies, the hash ring is rebuilt, and
676
- some of its bare metal nodes (more precisely, those not in ACTIVE
677
- state) are assigned to other computes.
678
-
679
- This method checks for this condition and adjusts the database
680
- accordingly.
681
-
682
- :param context: security context
683
- :param resources: initial values
684
- :param nodename: node name
685
- :returns: True if a suitable compute node record was found, else False
686
- """
687
- if not self .driver .rebalances_nodes :
688
- return False
689
-
690
- # Its possible ironic just did a node re-balance, so let's
691
- # check if there is a compute node that already has the correct
692
- # hypervisor_hostname. We can re-use that rather than create a
693
- # new one and have to move existing placement allocations
694
- cn_candidates = objects .ComputeNodeList .get_by_hypervisor (
695
- context , nodename )
696
-
697
- if len (cn_candidates ) == 1 :
698
- cn = cn_candidates [0 ]
699
- LOG .info ("ComputeNode %(name)s moving from %(old)s to %(new)s" ,
700
- {"name" : nodename , "old" : cn .host , "new" : self .host })
701
- cn .host = self .host
702
- self .compute_nodes [nodename ] = cn
703
- self ._copy_resources (cn , resources )
704
- self ._setup_pci_tracker (context , cn , resources )
705
- self ._update (context , cn )
706
- return True
707
- elif len (cn_candidates ) > 1 :
708
- LOG .error (
709
- "Found more than one ComputeNode for nodename %s. "
710
- "Please clean up the orphaned ComputeNode records in your DB." ,
711
- nodename )
712
-
713
- return False
714
-
715
672
def _init_compute_node (self , context , resources ):
716
673
"""Initialize the compute node if it does not already exist.
717
674
@@ -729,6 +686,7 @@ def _init_compute_node(self, context, resources):
729
686
False otherwise
730
687
"""
731
688
nodename = resources ['hypervisor_hostname' ]
689
+ node_uuid = resources ['uuid' ]
732
690
733
691
# if there is already a compute node just use resources
734
692
# to initialize
@@ -740,16 +698,30 @@ def _init_compute_node(self, context, resources):
740
698
741
699
# now try to get the compute node record from the
742
700
# database. If we get one we use resources to initialize
743
- cn = self ._get_compute_node (context , nodename )
701
+
702
+ # We use read_deleted=True so that we will find and recover a deleted
703
+ # node object, if necessary.
704
+ with utils .temporary_mutation (context , read_deleted = 'yes' ):
705
+ cn = self ._get_compute_node (context , node_uuid )
706
+ if cn and cn .deleted :
707
+ # Undelete and save this right now so that everything below
708
+ # can continue without read_deleted=yes
709
+ LOG .info ('Undeleting compute node %s' , cn .uuid )
710
+ cn .deleted = False
711
+ cn .deleted_at = None
712
+ cn .save ()
744
713
if cn :
714
+ if cn .host != self .host :
715
+ LOG .info ("ComputeNode %(name)s moving from %(old)s to %(new)s" ,
716
+ {"name" : nodename , "old" : cn .host , "new" : self .host })
717
+ cn .host = self .host
718
+ self ._update (context , cn )
719
+
745
720
self .compute_nodes [nodename ] = cn
746
721
self ._copy_resources (cn , resources )
747
722
self ._setup_pci_tracker (context , cn , resources )
748
723
return False
749
724
750
- if self ._check_for_nodes_rebalance (context , resources , nodename ):
751
- return False
752
-
753
725
# there was no local copy and none in the database
754
726
# so we need to create a new compute node. This needs
755
727
# to be initialized with resource values.
@@ -889,6 +861,14 @@ def update_available_resource(self, context, nodename, startup=False):
889
861
# contains a non-None value, even for non-Ironic nova-compute hosts. It
890
862
# is this value that will be populated in the compute_nodes table.
891
863
resources ['host_ip' ] = CONF .my_ip
864
+ if 'uuid' not in resources :
865
+ # NOTE(danms): Any driver that does not provide a uuid per
866
+ # node gets the locally-persistent compute_id. Only ironic
867
+ # should be setting the per-node uuid (and returning
868
+ # multiple nodes in general). If this is the first time we
869
+ # are creating a compute node on this host, we will
870
+ # generate and persist this uuid for the future.
871
+ resources ['uuid' ] = node .get_local_node_uuid ()
892
872
893
873
# We want the 'cpu_info' to be None from the POV of the
894
874
# virt driver, but the DB requires it to be non-null so
@@ -1014,14 +994,13 @@ def _update_available_resource(self, context, resources, startup=False):
1014
994
if startup :
1015
995
self ._check_resources (context )
1016
996
1017
- def _get_compute_node (self , context , nodename ):
997
+ def _get_compute_node (self , context , node_uuid ):
1018
998
"""Returns compute node for the host and nodename."""
1019
999
try :
1020
- return objects .ComputeNode .get_by_host_and_nodename (
1021
- context , self .host , nodename )
1000
+ return objects .ComputeNode .get_by_uuid (context , node_uuid )
1022
1001
except exception .NotFound :
1023
1002
LOG .warning ("No compute node record for %(host)s:%(node)s" ,
1024
- {'host' : self .host , 'node' : nodename })
1003
+ {'host' : self .host , 'node' : node_uuid })
1025
1004
1026
1005
def _report_hypervisor_resource_view (self , resources ):
1027
1006
"""Log the hypervisor's view of free resources.
0 commit comments