Skip to content

Commit 0318016

Browse files
Zuulopenstack-gerrit
authored andcommitted
Merge "Add nova-manage ironic-compute-node-move"
2 parents 68b2131 + 9068db0 commit 0318016

File tree

4 files changed

+141
-1
lines changed

4 files changed

+141
-1
lines changed

doc/source/cli/nova-manage.rst

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -465,6 +465,47 @@ command completed successfully with exit code 0.
465465
* - 127
466466
- Invalid input was provided.
467467

468+
db ironic_compute_node_move
469+
---------------------------
470+
471+
.. program:: nova-manage db ironic_compute_node_move
472+
473+
.. code-block:: shell
474+
475+
nova-manage db ironic_compute_node_move --ironic-node-uuid <uuid> --destination-host <host>
476+
477+
Move ironic nodes, along with any associated instances,
478+
between nova-compute services.
479+
480+
This is useful when migrating away from using peer_list and multiple
481+
hash ring balanced nova-compute servers to the new ironic shard system.
482+
483+
First you must turn off the nova-compute service that currently manages
484+
the Ironic host. Second you mark that nova-compute service as forced down
485+
via the Nova API. Third, you ensure the new nova-compute service is
486+
correctly configured to target the appropriate shard (and optionally
487+
also a conductor group). Finally, most Ironic nodes should now move to
488+
the new service, but any Ironic nodes with instances on them
489+
will need to be manually moved to their new Ironic service
490+
by using this nova-manage command.
491+
492+
.. versionadded:: 28.0.0 (2023.2 Bobcat)
493+
494+
.. rubric:: Options
495+
496+
.. option:: --ironic-node-uuid <uuid>
497+
498+
Ironic node uuid to be moved (which is also the Nova compute node uuid
499+
and the uuid of corresponding resource provider in Placement).
500+
501+
The Nova compute service that currently manages this Ironic node
502+
must first be marked a "forced down" via the Nova API, in a similar
503+
way to a down hypervisor that is about to have its VMs evacuated to
504+
a replacement hypervisor.
505+
506+
.. option:: --destination-host <host>
507+
508+
Destination ironic nova-compute service CONF.host.
468509

469510
API Database Commands
470511
=====================

nova/cmd/manage.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -623,6 +623,47 @@ def online_data_migrations(self, max_count=None):
623623
# "there are more migrations, but not completable right now"
624624
return ran and 1 or 0
625625

626+
@args('--ironic-node-uuid', metavar='<uuid>', dest='compute_node_uuid',
627+
help='UUID of Ironic node to be moved between services')
628+
@args('--destination-host', metavar='<host>',
629+
dest='destination_service_host',
630+
help='Destination ironic nova-compute service CONF.host')
631+
def ironic_compute_node_move(self, compute_node_uuid,
632+
destination_service_host):
633+
ctxt = context.get_admin_context()
634+
635+
destination_service = objects.Service.get_by_compute_host(
636+
ctxt, destination_service_host)
637+
if destination_service.forced_down:
638+
raise exception.NovaException(
639+
"Destination compute is forced down!")
640+
641+
target_compute_node = objects.ComputeNode.get_by_uuid(
642+
ctxt, compute_node_uuid)
643+
source_service = objects.Service.get_by_id(
644+
ctxt, target_compute_node.service_id)
645+
if not source_service.forced_down:
646+
raise exception.NovaException(
647+
"Source service is not yet forced down!")
648+
649+
instances = objects.InstanceList.get_by_host_and_node(
650+
ctxt, target_compute_node.host,
651+
target_compute_node.hypervisor_hostname)
652+
if len(instances) > 1:
653+
raise exception.NovaException(
654+
"Found an ironic host with more than one instance! "
655+
"Please delete all Nova instances that do not match "
656+
"the instance uuid recorded on the Ironic node.")
657+
658+
target_compute_node.service_id = destination_service.id
659+
target_compute_node.host = destination_service.host
660+
target_compute_node.save()
661+
662+
for instance in instances:
663+
# this is a bit like evacuate, except no need to rebuild
664+
instance.host = destination_service.host
665+
instance.save()
666+
626667

627668
class ApiDbCommands(object):
628669
"""Class for managing the api database."""

nova/tests/functional/test_nova_manage.py

Lines changed: 54 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,9 @@ def setUp(self):
163163
user_id=self.context.user_id,
164164
project_id=self.context.project_id,
165165
flavor=flavor,
166-
node=cn.hypervisor_hostname)
166+
node=cn.hypervisor_hostname,
167+
host=cn.host,
168+
compute_id=cn.id)
167169
inst.create()
168170
self.insts.append(inst)
169171

@@ -173,6 +175,57 @@ def setUp(self):
173175
if i.node == self.cn4.hypervisor_hostname]
174176

175177

178+
class TestIronicComputeNodeMove(NovaManageDBIronicTest):
179+
"""Functional tests for "nova-manage db ironic_compute_node_move" CLI."""
180+
api_major_version = 'v2.1'
181+
182+
def setUp(self):
183+
super(TestIronicComputeNodeMove, self).setUp()
184+
self.enforce_fk_constraints()
185+
self.cli = manage.DbCommands()
186+
self.output = StringIO()
187+
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
188+
189+
def test_ironic_compute_node_move_success(self):
190+
self.service1.forced_down = True
191+
self.service1.save()
192+
self.assertEqual(self.service1.id, self.cn1.service_id)
193+
# move cn1 on service1 to service2
194+
node_uuid = self.cn1.uuid
195+
dest_host = self.service2.host
196+
197+
self.commands.ironic_compute_node_move(node_uuid, dest_host)
198+
199+
# check the compute node got moved to service 2
200+
updated_cn1 = objects.ComputeNode.get_by_id(self.context, self.cn1.id)
201+
self.assertEqual(self.service2.id, updated_cn1.service_id)
202+
self.assertEqual(self.service2.host, updated_cn1.host)
203+
# check the instance got moved too
204+
updated_instance = objects.Instance.get_by_id(
205+
self.context, self.insts[0].id)
206+
self.assertEqual(self.service2.host, updated_instance.host)
207+
208+
def test_ironic_compute_node_move_raise_not_forced_down(self):
209+
node_uuid = self.cn1.uuid
210+
dest_host = self.service2.host
211+
212+
self.assertRaises(exception.NovaException,
213+
self.commands.ironic_compute_node_move,
214+
node_uuid, dest_host)
215+
216+
def test_ironic_compute_node_move_raise_forced_down(self):
217+
self.service1.forced_down = True
218+
self.service1.save()
219+
self.service2.forced_down = True
220+
self.service2.save()
221+
node_uuid = self.cn1.uuid
222+
dest_host = self.service2.host
223+
224+
self.assertRaises(exception.NovaException,
225+
self.commands.ironic_compute_node_move,
226+
node_uuid, dest_host)
227+
228+
176229
class NovaManageCellV2Test(test.TestCase):
177230
def setUp(self):
178231
super(NovaManageCellV2Test, self).setUp()

releasenotes/notes/ironic-shards-5641e4b1ab5bb7aa.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,8 @@ features:
1010
which ironic nodes are managed by each nova-compute service.
1111
Note that when you use ``[ironic]shard`` the ``[ironic]peer_list``
1212
is hard coded to a single nova-compute service.
13+
14+
There is a new nova-manage command ``db ironic_compute_node_move`` that
15+
can be used to move ironic nodes, and the associated instances, between
16+
nova-compute services. This is useful when migrating from the legacy
17+
hash ring based HA towards the new sharding approach.

0 commit comments

Comments
 (0)