diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index 7d67b5fc83..69a6e21415 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -237,6 +237,12 @@ fn process_command( } Commands::SledUpdateRot(args) => cmd_sled_update_rot(sim, args), Commands::SledUpdateSp(args) => cmd_sled_update_sp(sim, args), + Commands::SledUpdateHostPhase1(args) => { + cmd_sled_update_host_phase_1(sim, args) + } + Commands::SledUpdateHostPhase2(args) => { + cmd_sled_update_host_phase_2(sim, args) + } Commands::SledUpdateRotBootloader(args) => { cmd_sled_update_rot_bootlaoder(sim, args) } @@ -300,6 +306,10 @@ enum Commands { SledUpdateRotBootloader(SledUpdateRotBootloaderArgs), /// simulate updating the sled's SP versions SledUpdateSp(SledUpdateSpArgs), + /// simulate updating the sled's host OS phase 1 artifacts + SledUpdateHostPhase1(SledUpdateHostPhase1Args), + /// simulate updating the sled's host OS phase 2 artifacts + SledUpdateHostPhase2(SledUpdateHostPhase2Args), /// list silos SiloList, @@ -545,6 +555,42 @@ struct SledUpdateRotArgs { slot_b: Option, } +#[derive(Debug, Args)] +struct SledUpdateHostPhase1Args { + /// id of the sled + sled_id: SledOpt, + + /// sets which phase 1 slot is active + #[clap(long, value_parser = parse_m2_slot)] + active: Option, + + /// sets the artifact hash reported for host OS phase 1 slot A + #[clap(long)] + slot_a: Option, + + /// sets the artifact hash reported for host OS phase 1 slot B + #[clap(long)] + slot_b: Option, +} + +#[derive(Debug, Args)] +struct SledUpdateHostPhase2Args { + /// id of the sled + sled_id: SledOpt, + + /// sets which phase 2 slot is the boot disk + #[clap(long, value_parser = parse_m2_slot)] + boot_disk: Option, + + /// sets the artifact hash reported for host OS phase 2 slot A + #[clap(long)] + slot_a: Option, + + /// sets the artifact hash reported for host OS phase 2 slot B + #[clap(long)] + slot_b: Option, +} + #[derive(Debug, Args)] struct SledSetMupdateOverrideArgs { #[clap(flatten)] @@ -1689,6 +1735,90 @@ fn cmd_sled_update_rot( ))) } +fn cmd_sled_update_host_phase_1( + sim: &mut ReconfiguratorSim, + args: SledUpdateHostPhase1Args, +) -> anyhow::Result> { + let SledUpdateHostPhase1Args { sled_id, active, slot_a, slot_b } = args; + + let mut labels = Vec::new(); + if let Some(active) = active { + labels.push(format!("active -> {active:?}")); + } + if let Some(slot_a) = slot_a { + labels.push(format!("A -> {slot_a}")); + } + if let Some(slot_b) = slot_b { + labels.push(format!("B -> {slot_b}")); + } + if labels.is_empty() { + bail!("sled-update-host-phase1 called with no changes"); + } + + let mut state = sim.current_state().to_mut(); + let system = state.system_mut(); + let sled_id = sled_id.to_sled_id(system.description())?; + system + .description_mut() + .sled_update_host_phase_1_artifacts(sled_id, active, slot_a, slot_b)?; + + sim.commit_and_bump( + format!( + "reconfigurator-cli sled-update-host-phase1: {sled_id}: {}", + labels.join(", "), + ), + state, + ); + + Ok(Some(format!( + "set sled {} host phase 1 details: {}", + sled_id, + labels.join(", ") + ))) +} + +fn cmd_sled_update_host_phase_2( + sim: &mut ReconfiguratorSim, + args: SledUpdateHostPhase2Args, +) -> anyhow::Result> { + let SledUpdateHostPhase2Args { sled_id, boot_disk, slot_a, slot_b } = args; + + let mut labels = Vec::new(); + if let Some(boot_disk) = boot_disk { + labels.push(format!("boot_disk -> {boot_disk:?}")); + } + if let Some(slot_a) = slot_a { + labels.push(format!("A -> {slot_a}")); + } + if let Some(slot_b) = slot_b { + labels.push(format!("B -> {slot_b}")); + } + if labels.is_empty() { + bail!("sled-update-host-phase2 called with no changes"); + } + + let mut state = sim.current_state().to_mut(); + let system = state.system_mut(); + let sled_id = sled_id.to_sled_id(system.description())?; + system.description_mut().sled_update_host_phase_2_artifacts( + sled_id, boot_disk, slot_a, slot_b, + )?; + + sim.commit_and_bump( + format!( + "reconfigurator-cli sled-update-host-phase2: {sled_id}: {}", + labels.join(", "), + ), + state, + ); + + Ok(Some(format!( + "set sled {} host phase 2 details: {}", + sled_id, + labels.join(", ") + ))) +} + fn cmd_inventory_list( sim: &mut ReconfiguratorSim, ) -> anyhow::Result> { diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt index 910db0cfce..5c4585aa2f 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt @@ -123,9 +123,33 @@ blueprint-diff latest # planner starts working. sled-set serial1 mupdate-override unset inventory-generate + +# This will attempt to update the first sled's host OS. Walk through that update +# and the host OS of the two other sleds. blueprint-plan latest latest blueprint-show latest blueprint-diff latest +sled-update-host-phase2 serial0 --boot-disk B --slot-b 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3 +sled-update-host-phase1 serial0 --active B --slot-b 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 +inventory-generate + +# Second sled host OS +blueprint-plan latest latest +blueprint-diff latest +sled-update-host-phase2 serial1 --boot-disk B --slot-b 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3 +sled-update-host-phase1 serial1 --active B --slot-b 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 +inventory-generate + +# Third sled host OS +blueprint-plan latest latest +blueprint-diff latest +sled-update-host-phase2 serial2 --boot-disk B --slot-b 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3 +sled-update-host-phase1 serial2 --active B --slot-b 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 +inventory-generate + +# All host OS updates complete +blueprint-plan latest latest +blueprint-diff latest # Test that the add-zones-with-mupdate-override chicken switch works as # expected. We do this by: diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt index 13b7e15cee..7cf40d7261 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt @@ -42,18 +42,16 @@ blueprint-diff latest # Now, update the simulated RoT bootloader to reflect that the update completed. # Collect inventory from it and use that collection for another planning step. # This should report that the update completed, remove that update, and add one -# for an SP on the same sled. +# for an RoT on the same sled. sled-update-rot-bootloader 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --stage0 1.0.0 inventory-generate blueprint-plan latest latest blueprint-diff latest # After the RoT bootloader update has completed, we update the simulated RoT to -# reflect that update has completed as well. -# Like before, collect inventory from it and use that collection for the next -# step. -# This should report that the update completed, remove that update, and add one -# for another sled. +# reflect that update has completed as well. Like before, collect inventory from +# it and use that collection for the next step. This should report that the +# update completed, remove that update, and add one for another sled. sled-update-rot 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --slot-a 1.0.0 inventory-generate blueprint-plan latest latest @@ -61,7 +59,43 @@ blueprint-diff latest # We repeat the same procedure with the SP sled-update-sp 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --active 1.0.0 + +# Collect inventory from it and use that collection for another planning step. +# This should report that the update completed, remove that update, and add a +# host OS update for this same sled. +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# If we generate another plan, there should be no change. +blueprint-plan latest latest +blueprint-diff latest + +# Update only the simulated host phase 2; this is a prerequisite for updating +# the phase 1, and must be done first. +sled-update-host-phase2 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --slot-b f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008 +inventory-generate + +# Planning after only phase 2 has changed should make no changes. We're still +# waiting on phase 1 to change. +blueprint-plan latest latest +blueprint-diff latest + +# Now update the simulated SP to reflect that the phase 1 update is done. +sled-update-host-phase1 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --active B --slot-b 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 inventory-generate + +# Planning _still_ shouldn't make any new changes; the OS update as a whole +# isn't done until sled-agent reports it has booted from the new image. +blueprint-plan latest latest +blueprint-diff latest + +# Update the sled's boot disk; this finishes the host OS update. +sled-update-host-phase2 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --boot-disk B +inventory-generate + +# Planning should now remove the host OS update and plan the next RoT bootloader +# update. blueprint-plan latest latest blueprint-diff latest @@ -107,9 +141,30 @@ blueprint-plan latest latest blueprint-diff latest # Let's simulate the successful SP update as well. -# A few more planning steps should try to update the last sled. sled-update-sp 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --active 1.0.0 inventory-generate + +# Planning should remove this update and add an OS update for this sled. +blueprint-plan latest latest +blueprint-diff latest + +# Try a host OS impossible update replacement: write an unknown artifact to the +# sled's phase 1. The planner should realize the update is impossible and +# replace it. As with the impossible SP update test above, we have to bump the +# "ignore impossible MGS updates" timestamp.) +set ignore-impossible-mgs-updates-since now +sled-update-host-phase1 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-b ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# Now simulate the update completing successfully. +sled-update-host-phase2 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --boot-disk B --slot-b f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008 +sled-update-host-phase1 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --active B --slot-b 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 +inventory-generate + +# Another planning step should try to update the last sled, starting with the +# RoT bootloader. blueprint-plan latest latest blueprint-diff latest @@ -127,13 +182,22 @@ inventory-generate blueprint-plan latest latest blueprint-diff latest -# Finish updating the last sled and do one more planning run. -# This should update one control plane zone. +# Finish updating the last sled's SP. +# There should be a pending host phase 1 update. sled-update-sp d81c6a84-79b8-4958-ae41-ea46c9b19763 --active 1.0.0 inventory-generate blueprint-plan latest latest blueprint-diff latest +# Finish updating the last sled's host OS. +sled-update-host-phase2 d81c6a84-79b8-4958-ae41-ea46c9b19763 --boot-disk B --slot-b f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008 +sled-update-host-phase1 d81c6a84-79b8-4958-ae41-ea46c9b19763 --active B --slot-b 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 +inventory-generate + +# Do one more planning run. This should update one control plane zone. +blueprint-plan latest latest +blueprint-diff latest + # We should continue walking through the update. We need to build out a # reconfigurator-cli subcommand to simulate updated zone image sources (just -# like we have sled-update-sp for simulated SP updates). \ No newline at end of file +# like we have sled-update-sp for simulated SP updates). diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 77969fd7d1..9634b16e63 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -2178,6 +2178,9 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c mupdate override: error -> unset > inventory-generate generated inventory collection 0b5efbb3-0b1b-4bbf-b7d8-a2d6fca074c6 from configured sleds + +> # This will attempt to update the first sled's host OS. Walk through that update +> # and the host OS of the two other sleds. > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 @@ -2185,17 +2188,8 @@ INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae4 WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial0, part_number: model0 WARN cannot configure RoT update for board (no matching artifact), serial_number: serial0, part_number: model0 WARN cannot configure SP update for board (no matching artifact), serial_number: serial0, part_number: model0 -INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial1, part_number: model1 -WARN cannot configure RoT update for board (no matching artifact), serial_number: serial1, part_number: model1 -WARN cannot configure SP update for board (no matching artifact), serial_number: serial1, part_number: model1 -INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial2, part_number: model2 -WARN cannot configure RoT update for board (no matching artifact), serial_number: serial2, part_number: model2 -WARN cannot configure SP update for board (no matching artifact), serial_number: serial2, part_number: model2 -INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 -INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }] +INFO configuring MGS-driven update, artifact_version: 2.0.0, artifact_hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 +INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 based on parent blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: chicken switches: @@ -2203,6 +2197,9 @@ chicken switches: * skipping noop image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * skipping noop image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts +* 1 pending MGS update: + * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) > blueprint-show latest @@ -2259,7 +2256,54 @@ parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 - sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 7) + sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 8) + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 2.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 1.0.0 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 1.0.0 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 1.0.0 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 1.0.0 in service fd00:1122:3344:101::22 + + + + sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 6) host phase 2 contents: ------------------------ @@ -2269,6 +2313,95 @@ parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 B current contents + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 2.0.0 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 2.0.0 in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 2.0.0 in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 2.0.0 in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 2.0.0 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 2.0.0 in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + OXIMETER SETTINGS: + generation: 1 + read from:: SingleNode + + METADATA: + created by::::::::::::: reconfigurator-sim + created at::::::::::::: + comment:::::::::::::::: update Sled 0 (serial0) host phase 1 to 2.0.0 + internal DNS version::: 1 + external DNS version::: 1 + target release min gen: 4 + + PENDING MGS-MANAGED UPDATES: 1 + Pending MGS-managed updates (all baseboards): + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sled 0 model0 serial0 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 2.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) + + +planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: +chicken switches: + add zones with mupdate override: false + +* skipping noop image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts +* skipping noop image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts +* 1 pending MGS update: + * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + + +> blueprint-diff latest +from: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 +to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 7 -> 8): + + host phase 2 contents: + -------------------------------- + slot boot image source + -------------------------------- + A current contents +* B - current contents + └─ + artifact: version 2.0.0 + + physical disks: ------------------------------------------------------------------------------------ vendor model serial disposition @@ -2276,44 +2409,457 @@ parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service - datasets: - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - dataset name dataset id disposition quota reservation compression - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off - oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 1.0.0 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 1.0.0 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 1.0.0 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 1.0.0 in service fd00:1122:3344:101::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 4 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ++ sled 0 model0 serial0 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 2.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + +> sled-update-host-phase2 serial0 --boot-disk B --slot-b 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3 +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 2 details: boot_disk -> B, B -> 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3 + +> sled-update-host-phase1 serial0 --active B --slot-b 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 1 details: active -> B, B -> 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 + +> inventory-generate +generated inventory collection 78f72e8d-46a9-40a9-8618-602f54454d80 from configured sleds + + +> # Second sled host OS +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 2.0.0, artifact_hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 +WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial0, part_number: model0 +WARN cannot configure RoT update for board (no matching artifact), serial_number: serial0, part_number: model0 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial1, part_number: model1 +WARN cannot configure RoT update for board (no matching artifact), serial_number: serial1, part_number: model1 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial1, part_number: model1 +INFO configuring MGS-driven update, artifact_version: 2.0.0, artifact_hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 +planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: +chicken switches: + add zones with mupdate override: false + +* skipping noop image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts +* skipping noop image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts +* 1 pending MGS update: + * model1:serial1: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 +to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 5 -> 6): + + host phase 2 contents: + -------------------------------- + slot boot image source + -------------------------------- + A current contents +* B - current contents + └─ + artifact: version 2.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/clickhouse 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/external_dns 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/internal_dns 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 2db6b7c1-0f46-4ced-a3ad-48872793360e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 4 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +- sled 0 model0 serial0 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 2.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) ++ sled 1 model1 serial1 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 2.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + +> sled-update-host-phase2 serial1 --boot-disk B --slot-b 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c host phase 2 details: boot_disk -> B, B -> 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3 + +> sled-update-host-phase1 serial1 --active B --slot-b 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c host phase 1 details: active -> B, B -> 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 + +> inventory-generate +generated inventory collection 39363465-89ae-4ac2-9be1-099068da9d45 from configured sleds + + +> # Third sled host OS +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 2.0.0, artifact_hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial1, part_number: model1 +WARN cannot configure RoT update for board (no matching artifact), serial_number: serial1, part_number: model1 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial0, part_number: model0 +WARN cannot configure RoT update for board (no matching artifact), serial_number: serial0, part_number: model0 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial2, part_number: model2 +WARN cannot configure RoT update for board (no matching artifact), serial_number: serial2, part_number: model2 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial2, part_number: model2 +INFO configuring MGS-driven update, artifact_version: 2.0.0, artifact_hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, sled_agent_address: [fd00:1122:3344:103::1]:12345, expected_inactive_phase_2_hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 based on parent blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: +chicken switches: + add zones with mupdate override: false +* skipping noop image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts +* skipping noop image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts +* 1 pending MGS update: + * model2:serial2: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:103::1]:12345 }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) - omicron zones: - ----------------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - ----------------------------------------------------------------------------------------------------------------------- - crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 1.0.0 in service fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 1.0.0 in service fd00:1122:3344:101::23 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 1.0.0 in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 1.0.0 in service fd00:1122:3344:101::21 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 1.0.0 in service fd00:1122:3344:101::22 +> blueprint-diff latest +from: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 + MODIFIED SLEDS: - sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 6) + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 6 -> 7): host phase 2 contents: - ------------------------ - slot boot image source - ------------------------ - A current contents - B current contents + -------------------------------- + slot boot image source + -------------------------------- + A current contents +* B - current contents + └─ + artifact: version 2.0.0 physical disks: @@ -2353,24 +2899,174 @@ parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 COCKROACHDB SETTINGS: - state fingerprint::::::::::::::::: (none) - cluster.preserve_downgrade_option: (do not modify) + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 4 (unchanged) OXIMETER SETTINGS: - generation: 1 - read from:: SingleNode + generation: 1 (unchanged) + read from:: SingleNode (unchanged) - METADATA: - created by::::::::::::: reconfigurator-sim - created at::::::::::::: - comment:::::::::::::::: (none) - internal DNS version::: 1 - external DNS version::: 1 - target release min gen: 4 + PENDING MGS UPDATES: - PENDING MGS-MANAGED UPDATES: 0 + Pending MGS-managed updates (all baseboards): + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +- sled 1 model1 serial1 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 2.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) ++ sled 2 model2 serial2 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 2.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:103::1]:12345 }) -planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + +> sled-update-host-phase2 serial2 --boot-disk B --slot-b 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3 +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 2 details: boot_disk -> B, B -> 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3 + +> sled-update-host-phase1 serial2 --active B --slot-b 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 1 details: active -> B, B -> 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 + +> inventory-generate +generated inventory collection 04bc9001-0836-4fec-b9cb-9d4760caf8b4 from configured sleds + + +> # All host OS updates complete +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 2.0.0, artifact_hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, sled_agent_address: [fd00:1122:3344:103::1]:12345, expected_inactive_phase_2_hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial2, part_number: model2 +WARN cannot configure RoT update for board (no matching artifact), serial_number: serial2, part_number: model2 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial2, part_number: model2 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial0, part_number: model0 +WARN cannot configure RoT update for board (no matching artifact), serial_number: serial0, part_number: model0 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure RoT bootloader update for board (no matching artifact), serial_number: serial1, part_number: model1 +WARN cannot configure RoT update for board (no matching artifact), serial_number: serial1, part_number: model1 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO ran out of boards for MGS-driven update +INFO some zones not yet up-to-date, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }] +generated blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c based on parent blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: chicken switches: add zones with mupdate override: false @@ -2378,10 +3074,9 @@ chicken switches: * skipping noop image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts - > blueprint-diff latest -from: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 -to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 +from: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -2396,6 +3091,14 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 generation: 1 (unchanged) read from:: SingleNode (unchanged) + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +- sled 2 model2 serial2 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6 2.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3"), sled_agent_address: [fd00:1122:3344:103::1]:12345 }) + internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) @@ -2527,7 +3230,7 @@ set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 mupdate override: unset -> c8fba91 added sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b (serial: serial3) > inventory-generate -generated inventory collection 78f72e8d-46a9-40a9-8618-602f54454d80 from configured sleds +generated inventory collection 08abe624-4b5f-491c-90cb-d74a84e4ba3e from configured sleds > # This will *not* generate the datasets and internal NTP zone on the new @@ -2542,7 +3245,7 @@ INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupda - zone ba4994a8-23f9-4b1a-a84f-a08d74591389 (CruciblePantry) updated from artifact: version 1.0.0 to install dataset , host_phase_2: - host phase 2 slot A: current contents (unchanged) - - host phase 2 slot B: current contents (unchanged) + - host phase 2 slot B: updated from artifact (version version 2.0.0, hash 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3) to preserving current contents INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 INFO updating target release minimum generation based on new set-override actions, phase: do_plan_mupdate_override, current_generation: 4, new_generation: 5 @@ -2550,8 +3253,8 @@ INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc5 INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, reason: remove_mupdate_override is set in the blueprint (c8fba912-63ae-473a-9115-0495d10fb3bc) INFO performed noop image source checks on sled, sled_id: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b, num_total: 0, num_already_artifact: 0, num_eligible: 0, num_ineligible: 0 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 -generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 -planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: +generated blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f based on parent blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c +planning report for blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f: chicken switches: add zones with mupdate override: false @@ -2563,20 +3266,21 @@ chicken switches: > blueprint-diff latest -from: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 -to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +from: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c +to: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 7 -> 8): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 8 -> 9): + will remove mupdate override: (none) -> c8fba912-63ae-473a-9115-0495d10fb3bc host phase 2 contents: - ------------------------ - slot boot image source - ------------------------ - A current contents - B current contents + -------------------------------- + slot boot image source + -------------------------------- + A current contents +* B - artifact: version 2.0.0 + └─ + current contents physical disks: @@ -2777,8 +3481,8 @@ INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0 INFO performed noop image source checks on sled, sled_id: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b, num_total: 0, num_already_artifact: 0, num_eligible: 0, num_ineligible: 0 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 INFO altered physical disks, sled_id: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b, sled_edits: SledEditCounts { disks: EditCounts { added: 10, updated: 0, expunged: 0, removed: 0 }, datasets: EditCounts { added: 20, updated: 0, expunged: 0, removed: 0 }, zones: EditCounts { added: 0, updated: 0, expunged: 0, removed: 0 } } -generated blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 based on parent blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 -planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: +generated blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 based on parent blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f +planning report for blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35: * skipping noop image source check on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b: all 0 zones are already from artifacts * skipping noop image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * MUPdate overrides exist @@ -2789,8 +3493,8 @@ planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: > blueprint-diff latest -from: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 -to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +from: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f +to: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 MODIFIED SLEDS: @@ -2824,34 +3528,34 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- dataset name dataset id disposition quota reservation compression ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -+ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/zone dd47a076-ab84-4d80-83ea-470bafffe532 in service none none off -+ oxp_2454c824-3b2b-4350-a5c7-ce9b1ff5a903/crypt/zone 0ac30a0d-a5bc-49b3-b1b1-8995ce991e20 in service none none off -+ oxp_5369d002-dd19-48f2-8c08-ba063cb1e010/crypt/zone 14b1ed59-e9ff-414d-94c5-8646f7866f58 in service none none off -+ oxp_5e25e3a5-d115-4df0-a54b-81f29fbb9d61/crypt/zone afd3ca8e-fe27-45df-a7a6-71b98041961b in service none none off -+ oxp_6082ad32-e210-4786-9656-4b6bfcec5d05/crypt/zone 411c783e-8f44-4509-8e7b-e34a6378dbfb in service none none off -+ oxp_88659529-1c87-4107-8227-c9276a330bf5/crypt/zone fa0dfbaf-c4d2-4c73-8224-da1919b4dc2a in service none none off -+ oxp_9cde7847-ff58-41af-95bf-aecebc3f1344/crypt/zone 406fbb94-b87b-4b66-b320-5a8cc4985523 in service none none off -+ oxp_9e9ca75a-6a79-4706-bcce-35714d79913c/crypt/zone e34ab578-5b8e-4396-9a8b-3360e3cd3eae in service none none off -+ oxp_d8c90412-fbdb-480d-8a07-c3de73b441bc/crypt/zone 00eef236-afb5-4ec3-936e-24a6aaafbb3b in service none none off -+ oxp_f3052185-bebb-4e75-842b-d8125b70bb8a/crypt/zone 4372db80-e002-4826-b075-6621e7c6a90f in service none none off -+ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/zone/oxz_ntp_256db9ea-f0d9-4b0f-bb54-99ad2e98ebee c1e2b33c-8c6b-43e5-929e-05298035d8ea in service none none off -+ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/debug 295db56c-8cb9-4947-918b-cf83061bf7a0 in service 100 GiB none gzip-9 -+ oxp_2454c824-3b2b-4350-a5c7-ce9b1ff5a903/crypt/debug c104b108-c685-41a2-a7cc-400f741da68b in service 100 GiB none gzip-9 -+ oxp_5369d002-dd19-48f2-8c08-ba063cb1e010/crypt/debug 878ac289-9ec5-4b54-8ad8-256df68b1056 in service 100 GiB none gzip-9 -+ oxp_5e25e3a5-d115-4df0-a54b-81f29fbb9d61/crypt/debug d365860b-7d08-4a6d-a7bb-b23af4dccaa3 in service 100 GiB none gzip-9 -+ oxp_6082ad32-e210-4786-9656-4b6bfcec5d05/crypt/debug 2ac6d1eb-969c-4ccc-b4c9-7cca6b59acb4 in service 100 GiB none gzip-9 -+ oxp_88659529-1c87-4107-8227-c9276a330bf5/crypt/debug 6721ff08-b286-4090-a486-5e3fef2d0659 in service 100 GiB none gzip-9 -+ oxp_9cde7847-ff58-41af-95bf-aecebc3f1344/crypt/debug b96e1d98-533a-4b44-8522-c507d8ddce8d in service 100 GiB none gzip-9 -+ oxp_9e9ca75a-6a79-4706-bcce-35714d79913c/crypt/debug 30ea33c2-d4ce-4f11-9554-e0061fa773bc in service 100 GiB none gzip-9 -+ oxp_d8c90412-fbdb-480d-8a07-c3de73b441bc/crypt/debug 2609168b-a573-4e37-a06e-a6b920d185fc in service 100 GiB none gzip-9 -+ oxp_f3052185-bebb-4e75-842b-d8125b70bb8a/crypt/debug d3d52ca9-e99d-492a-ada9-d628a2593f4c in service 100 GiB none gzip-9 ++ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/zone 5b45f9cf-cfb5-4ab5-a1c5-c32409b9055c in service none none off ++ oxp_2454c824-3b2b-4350-a5c7-ce9b1ff5a903/crypt/zone 45b0ac9f-5e52-4da4-9521-4bab8c42c748 in service none none off ++ oxp_5369d002-dd19-48f2-8c08-ba063cb1e010/crypt/zone 5521d6e5-f3d6-4743-9b86-acd9febe0b47 in service none none off ++ oxp_5e25e3a5-d115-4df0-a54b-81f29fbb9d61/crypt/zone 98acd8fb-856c-4a22-b359-1acefb507d67 in service none none off ++ oxp_6082ad32-e210-4786-9656-4b6bfcec5d05/crypt/zone 41bae319-97f7-458e-8b7d-94bbeb457279 in service none none off ++ oxp_88659529-1c87-4107-8227-c9276a330bf5/crypt/zone dbe8d2c2-5618-4234-ad17-d749808e2881 in service none none off ++ oxp_9cde7847-ff58-41af-95bf-aecebc3f1344/crypt/zone a50abc4d-ae17-4076-a35a-e3481fff8319 in service none none off ++ oxp_9e9ca75a-6a79-4706-bcce-35714d79913c/crypt/zone b1fc1b85-a6b8-407c-afdd-7d2f16349241 in service none none off ++ oxp_d8c90412-fbdb-480d-8a07-c3de73b441bc/crypt/zone 4753a210-ce06-41ae-97a7-5690ed7e41b4 in service none none off ++ oxp_f3052185-bebb-4e75-842b-d8125b70bb8a/crypt/zone bc4a82c1-b64e-484d-be6a-e62a61f8c575 in service none none off ++ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/zone/oxz_ntp_cee5d0c6-412c-49e9-a9a8-dab9addaa182 6e688437-1dd2-4fa6-8ae5-27d03b9facc7 in service none none off ++ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/debug a775b06e-d41a-4999-921d-92382de5b104 in service 100 GiB none gzip-9 ++ oxp_2454c824-3b2b-4350-a5c7-ce9b1ff5a903/crypt/debug a5761160-87b2-4f6e-8f9b-534bbb6e5b90 in service 100 GiB none gzip-9 ++ oxp_5369d002-dd19-48f2-8c08-ba063cb1e010/crypt/debug 361810ea-a221-4889-afff-c3c39800148e in service 100 GiB none gzip-9 ++ oxp_5e25e3a5-d115-4df0-a54b-81f29fbb9d61/crypt/debug b95b9b99-4499-4bd8-9d63-689c4f3e5643 in service 100 GiB none gzip-9 ++ oxp_6082ad32-e210-4786-9656-4b6bfcec5d05/crypt/debug 1c2df152-b15a-4581-b577-59c1191bffcd in service 100 GiB none gzip-9 ++ oxp_88659529-1c87-4107-8227-c9276a330bf5/crypt/debug 23815e89-3785-4e91-8211-0047004d0d73 in service 100 GiB none gzip-9 ++ oxp_9cde7847-ff58-41af-95bf-aecebc3f1344/crypt/debug 127a5d0a-e1f2-45be-9e02-d237943bd572 in service 100 GiB none gzip-9 ++ oxp_9e9ca75a-6a79-4706-bcce-35714d79913c/crypt/debug 92a3c4c9-a8f1-46c2-97d0-3ee013ce8e63 in service 100 GiB none gzip-9 ++ oxp_d8c90412-fbdb-480d-8a07-c3de73b441bc/crypt/debug b0c66ed8-0814-4fdf-bf98-3134e6b4f835 in service 100 GiB none gzip-9 ++ oxp_f3052185-bebb-4e75-842b-d8125b70bb8a/crypt/debug ede50fed-90a2-40c3-a2b3-368c5a94a868 in service 100 GiB none gzip-9 omicron zones: -------------------------------------------------------------------------------------------------------------------- zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------- -+ internal_ntp 256db9ea-f0d9-4b0f-bb54-99ad2e98ebee artifact: version 2.0.0 in service fd00:1122:3344:104::21 ++ internal_ntp cee5d0c6-412c-49e9-a9a8-dab9addaa182 artifact: version 2.0.0 in service fd00:1122:3344:104::21 COCKROACHDB SETTINGS: @@ -2872,8 +3576,6 @@ internal DNS: * DNS zone: "control-plane.oxide.internal": name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 -+ name: 256db9ea-f0d9-4b0f-bb54-99ad2e98ebee.host (records: 1) -+ AAAA fd00:1122:3344:104::21 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -2928,9 +3630,9 @@ internal DNS: - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal -+ SRV port 123 256db9ea-f0d9-4b0f-bb54-99ad2e98ebee.host.control-plane.oxide.internal + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal ++ SRV port 123 cee5d0c6-412c-49e9-a9a8-dab9addaa182.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal name: _nameservice._tcp (records: 3) SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal @@ -2955,6 +3657,8 @@ internal DNS: AAAA fd00:1122:3344:102::26 name: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b.sled (records: 1) AAAA fd00:1122:3344:104::1 ++ name: cee5d0c6-412c-49e9-a9a8-dab9addaa182.host (records: 1) ++ AAAA fd00:1122:3344:104::21 name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) AAAA fd00:1122:3344:103::1 name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) @@ -2996,13 +3700,13 @@ external DNS: > # test that the planner bails if it attempts a rollback of the target release > # minimum generation. > blueprint-edit latest set-target-release-min-gen 1000 -blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c created from latest blueprint (61a93ea3-c872-48e0-aace-e86b0c52b839): set target release minimum generation to 1000 +blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 created from latest blueprint (9a9e6c32-5a84-4020-a159-33dceff18d35): set target release minimum generation to 1000 > sled-set serial1 mupdate-override cc724abe-80c1-47e6-9771-19e6540531a9 set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c mupdate override: unset -> cc724abe-80c1-47e6-9771-19e6540531a9 > inventory-generate -generated inventory collection 39363465-89ae-4ac2-9be1-099068da9d45 from configured sleds +generated inventory collection 005f6a30-7f65-4593-9f78-ee68f766f42b from configured sleds > blueprint-plan latest latest INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, new_bp_override: cc724abe-80c1-47e6-9771-19e6540531a9, prev_bp_override: None, zones: @@ -3015,7 +3719,7 @@ INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupda - zone bd354eef-d8a6-4165-9124-283fb5e46d77 (Crucible) left unchanged, image source: install dataset , host_phase_2: - host phase 2 slot A: current contents (unchanged) - - host phase 2 slot B: current contents (unchanged) + - host phase 2 slot B: updated from artifact (version version 2.0.0, hash 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3) to preserving current contents INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c error: generating blueprint: target release minimum generation was set to 1000, but we tried to set it to the older generation 5, indicating a possible table rollback which should not happen diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 07f7c10aaf..0c2730f498 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -560,7 +560,7 @@ external DNS: > # Now, update the simulated RoT bootloader to reflect that the update completed. > # Collect inventory from it and use that collection for another planning step. > # This should report that the update completed, remove that update, and add one -> # for an SP on the same sled. +> # for an RoT on the same sled. > sled-update-rot-bootloader 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --stage0 1.0.0 set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 RoT bootloader versions: stage0 -> 1.0.0 @@ -751,11 +751,9 @@ external DNS: > # After the RoT bootloader update has completed, we update the simulated RoT to -> # reflect that update has completed as well. -> # Like before, collect inventory from it and use that collection for the next -> # step. -> # This should report that the update completed, remove that update, and add one -> # for another sled. +> # reflect that update has completed as well. Like before, collect inventory from +> # it and use that collection for the next step. This should report that the +> # update completed, remove that update, and add one for another sled. > sled-update-rot 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --slot-a 1.0.0 set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 RoT settings: slot a -> 1.0.0 @@ -949,6 +947,10 @@ external DNS: > sled-update-sp 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --active 1.0.0 set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 SP versions: active -> 1.0.0 + +> # Collect inventory from it and use that collection for another planning step. +> # This should report that the update completed, remove that update, and add a +> # host OS update for this same sled. > inventory-generate generated inventory collection b1bda47d-2c19-4fba-96e3-d9df28db7436 from configured sleds @@ -957,8 +959,7 @@ INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc5 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 -INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba based on parent blueprint df06bb57-ad42-4431-9206-abff322896c7 planning report for blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba: @@ -966,7 +967,7 @@ chicken switches: add zones with mupdate override: false * 1 pending MGS update: - * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) + * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -974,6 +975,67 @@ chicken switches: from: blueprint df06bb57-ad42-4431-9206-abff322896c7 to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 2 -> 3): + + host phase 2 contents: + -------------------------------- + slot boot image source + -------------------------------- + A current contents +* B - current contents + └─ + artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d install dataset in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 + + COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) cluster.preserve_downgrade_option: (do not modify) (unchanged) @@ -990,11 +1052,11 @@ to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba PENDING MGS UPDATES: Pending MGS-managed updates (all baseboards): - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -- sled 0 model0 serial0 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) -+ sled 1 model1 serial1 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + sp_type slot part_number serial_number artifact_hash artifact_version details + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +* sled 0 model0 serial0 - 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 - Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + └─ + 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 + HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) internal DNS: @@ -1136,25 +1198,12 @@ external DNS: -> # This time, make it more interesting. Change the inactive slot contents of -> # the simulated RoT bootloader. This should make the configured update -> # impossible and cause the planner to fix it. To test this, we also need to tell -> # the planner not to ignore this update even though it's quite new. -> set ignore-impossible-mgs-updates-since now -ignoring impossible MGS updates since - -> sled-update-rot-bootloader 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --stage0-next 0.5.0 -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT bootloader versions: stage0_next -> 0.5.0 - -> inventory-generate -generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configured sleds - +> # If we generate another plan, there should be no change. > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 -INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 -INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: Version(ArtifactVersion("0.5.0")), expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 9034c710-3e57-45f3-99e5-4316145e87ac based on parent blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba planning report for blueprint 9034c710-3e57-45f3-99e5-4316145e87ac: @@ -1162,7 +1211,7 @@ chicken switches: add zones with mupdate override: false * 1 pending MGS update: - * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: Version(ArtifactVersion("0.5.0")) }) + * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1183,15 +1232,6 @@ to: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac generation: 1 (unchanged) read from:: SingleNode (unchanged) - PENDING MGS UPDATES: - - Pending MGS-managed updates (all baseboards): - ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -* sled 1 model1 serial1 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 - RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) - └─ + RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: Version(ArtifactVersion("0.5.0")) }) - internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) @@ -1332,20 +1372,22 @@ external DNS: -> # Now simulate the update completing successfully. -> # Like before, we should see a pending RoT update for this sled. -> sled-update-rot-bootloader 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --stage0 1.0.0 -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT bootloader versions: stage0 -> 1.0.0 +> # Update only the simulated host phase 2; this is a prerequisite for updating +> # the phase 1, and must be done first. +> sled-update-host-phase2 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --slot-b f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008 +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 2 details: B -> f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008 > inventory-generate -generated inventory collection 0b5efbb3-0b1b-4bbf-b7d8-a2d6fca074c6 from configured sleds +generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configured sleds + +> # Planning after only phase 2 has changed should make no changes. We're still +> # waiting on phase 1 to change. > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 -INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: Version(ArtifactVersion("0.5.0")), expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 -INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 based on parent blueprint 9034c710-3e57-45f3-99e5-4316145e87ac planning report for blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976: @@ -1353,7 +1395,7 @@ chicken switches: add zones with mupdate override: false * 1 pending MGS update: - * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1374,15 +1416,6 @@ to: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 generation: 1 (unchanged) read from:: SingleNode (unchanged) - PENDING MGS UPDATES: - - Pending MGS-managed updates (all baseboards): - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -* sled 1 model1 serial1 - 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 - RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: Version(ArtifactVersion("0.5.0")) }) - └─ + 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a + Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) - internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) @@ -1523,24 +1556,21 @@ external DNS: -> # Now we'll change the inactive slot contents of the simulated RoT. Like with -> # the RoT bootloader, this should make the update impossible and cause the -> # planner to fix it. -> set ignore-impossible-mgs-updates-since now -ignoring impossible MGS updates since - -> sled-update-rot 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-b 0.5.0 -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot b -> 0.5.0 +> # Now update the simulated SP to reflect that the phase 1 update is done. +> sled-update-host-phase1 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --active B --slot-b 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 1 details: active -> B, B -> 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 > inventory-generate -generated inventory collection 78f72e8d-46a9-40a9-8618-602f54454d80 from configured sleds +generated inventory collection 0b5efbb3-0b1b-4bbf-b7d8-a2d6fca074c6 from configured sleds + +> # Planning _still_ shouldn't make any new changes; the OS update as a whole +> # isn't done until sled-agent reports it has booted from the new image. > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 -INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 -INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO keeping apparently-impossible MGS-driven update (waiting for recent update to be applied), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 based on parent blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: @@ -1548,7 +1578,7 @@ chicken switches: add zones with mupdate override: false * 1 pending MGS update: - * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1569,15 +1599,6 @@ to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 generation: 1 (unchanged) read from:: SingleNode (unchanged) - PENDING MGS UPDATES: - - Pending MGS-managed updates (all baseboards): - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - sp_type slot part_number serial_number artifact_hash artifact_version details - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -* sled 1 model1 serial1 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) - └─ + Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) - internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) @@ -1718,20 +1739,23 @@ external DNS: -> # Now simulate the update completing successfully. -> # Like before, we should see a pending SP update for this sled. -> sled-update-rot 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-a 1.0.0 -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot a -> 1.0.0 +> # Update the sled's boot disk; this finishes the host OS update. +> sled-update-host-phase2 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --boot-disk B +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 2 details: boot_disk -> B > inventory-generate -generated inventory collection 39363465-89ae-4ac2-9be1-099068da9d45 from configured sleds +generated inventory collection 78f72e8d-46a9-40a9-8618-602f54454d80 from configured sleds + +> # Planning should now remove the host OS update and plan the next RoT bootloader +> # update. > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 -INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 -INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 626487fa-7139-45ec-8416-902271fc730b based on parent blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: @@ -1739,7 +1763,7 @@ chicken switches: add zones with mupdate override: false * 1 pending MGS update: - * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1763,11 +1787,11 @@ to: blueprint 626487fa-7139-45ec-8416-902271fc730b PENDING MGS UPDATES: Pending MGS-managed updates (all baseboards): - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -* sled 1 model1 serial1 - 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) - └─ + 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 + Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +- sled 0 model0 serial0 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 1.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) ++ sled 1 model1 serial1 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) internal DNS: @@ -1909,23 +1933,25 @@ external DNS: -> # We repeat the same procedure with the SP. Like with the RoT, this should make -> # the update impossible and cause the planner to fix it. +> # This time, make it more interesting. Change the inactive slot contents of +> # the simulated RoT bootloader. This should make the configured update +> # impossible and cause the planner to fix it. To test this, we also need to tell +> # the planner not to ignore this update even though it's quite new. > set ignore-impossible-mgs-updates-since now ignoring impossible MGS updates since -> sled-update-sp 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --inactive 0.5.0 -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: inactive -> 0.5.0 +> sled-update-rot-bootloader 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --stage0-next 0.5.0 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT bootloader versions: stage0_next -> 0.5.0 > inventory-generate -generated inventory collection 04bc9001-0836-4fec-b9cb-9d4760caf8b4 from configured sleds +generated inventory collection 39363465-89ae-4ac2-9be1-099068da9d45 from configured sleds > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 -INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 -INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: Version(ArtifactVersion("0.5.0")), expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b based on parent blueprint 626487fa-7139-45ec-8416-902271fc730b planning report for blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b: @@ -1933,7 +1959,7 @@ chicken switches: add zones with mupdate override: false * 1 pending MGS update: - * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: Version(ArtifactVersion("0.5.0")) }) + * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: Version(ArtifactVersion("0.5.0")) }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1957,11 +1983,11 @@ to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b PENDING MGS UPDATES: Pending MGS-managed updates (all baseboards): - ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -* sled 1 model1 serial1 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 - Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) - └─ + Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: Version(ArtifactVersion("0.5.0")) }) + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 1 model1 serial1 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 - RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) + └─ + RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: Version(ArtifactVersion("0.5.0")) }) internal DNS: @@ -2103,30 +2129,28 @@ external DNS: -> # Let's simulate the successful SP update as well. -> # A few more planning steps should try to update the last sled. -> sled-update-sp 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --active 1.0.0 -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: active -> 1.0.0 +> # Now simulate the update completing successfully. +> # Like before, we should see a pending RoT update for this sled. +> sled-update-rot-bootloader 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --stage0 1.0.0 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT bootloader versions: stage0 -> 1.0.0 > inventory-generate -generated inventory collection 08abe624-4b5f-491c-90cb-d74a84e4ba3e from configured sleds +generated inventory collection 04bc9001-0836-4fec-b9cb-9d4760caf8b4 from configured sleds > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 -INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 -INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 -INFO ran out of boards for MGS-driven update +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: Version(ArtifactVersion("0.5.0")), expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 based on parent blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: chicken switches: add zones with mupdate override: false * 1 pending MGS update: - * model2:serial2: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) + * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -2150,11 +2174,11 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 PENDING MGS UPDATES: Pending MGS-managed updates (all baseboards): - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -- sled 1 model1 serial1 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: Version(ArtifactVersion("0.5.0")) }) -+ sled 2 model2 serial2 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 1 model1 serial1 - 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 - RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: Version(ArtifactVersion("0.5.0")) }) + └─ + 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a + Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) internal DNS: @@ -2296,20 +2320,24 @@ external DNS: -> # Update the RoT bootloader on the last sled. -> # There should be a pending RoT update. -> sled-update-rot-bootloader d81c6a84-79b8-4958-ae41-ea46c9b19763 --stage0 1.0.0 -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT bootloader versions: stage0 -> 1.0.0 +> # Now we'll change the inactive slot contents of the simulated RoT. Like with +> # the RoT bootloader, this should make the update impossible and cause the +> # planner to fix it. +> set ignore-impossible-mgs-updates-since now +ignoring impossible MGS updates since + +> sled-update-rot 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-b 0.5.0 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot b -> 0.5.0 > inventory-generate -generated inventory collection 005f6a30-7f65-4593-9f78-ee68f766f42b from configured sleds +generated inventory collection 08abe624-4b5f-491c-90cb-d74a84e4ba3e from configured sleds > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 -INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 -INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 based on parent blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: @@ -2317,7 +2345,7 @@ chicken switches: add zones with mupdate override: false * 1 pending MGS update: - * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -2341,11 +2369,11 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 PENDING MGS UPDATES: Pending MGS-managed updates (all baseboards): - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -* sled 2 model2 serial2 - 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 - RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) - └─ + 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a + Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + sp_type slot part_number serial_number artifact_hash artifact_version details + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +* sled 1 model1 serial1 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + └─ + Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) internal DNS: @@ -2487,20 +2515,20 @@ external DNS: -> # Update the RoT on the last sled. -> # There should be one last pending SP update. -> sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-a 1.0.0 -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot a -> 1.0.0 +> # Now simulate the update completing successfully. +> # Like before, we should see a pending SP update for this sled. +> sled-update-rot 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-a 1.0.0 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot a -> 1.0.0 > inventory-generate -generated inventory collection b5263998-e486-4cea-8842-b32bd326fa3a from configured sleds +generated inventory collection 005f6a30-7f65-4593-9f78-ee68f766f42b from configured sleds > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 -INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 -INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 based on parent blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: @@ -2508,7 +2536,7 @@ chicken switches: add zones with mupdate override: false * 1 pending MGS update: - * model2:serial2: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -2532,11 +2560,11 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 PENDING MGS UPDATES: Pending MGS-managed updates (all baseboards): - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -* sled 2 model2 serial2 - 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) - └─ + 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 + Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 1 model1 serial1 - 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + └─ + 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 + Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) internal DNS: @@ -2678,47 +2706,1529 @@ external DNS: -> # Finish updating the last sled and do one more planning run. -> # This should update one control plane zone. -> sled-update-sp d81c6a84-79b8-4958-ae41-ea46c9b19763 --active 1.0.0 -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 SP versions: active -> 1.0.0 +> # We repeat the same procedure with the SP. Like with the RoT, this should make +> # the update impossible and cause the planner to fix it. +> set ignore-impossible-mgs-updates-since now +ignoring impossible MGS updates since + +> sled-update-sp 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --inactive 0.5.0 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: inactive -> 0.5.0 > inventory-generate -generated inventory collection 68767302-7fed-4eb1-9611-3dfd807ff0cd from configured sleds +generated inventory collection b5263998-e486-4cea-8842-b32bd326fa3a from configured sleds > blueprint-plan latest latest INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 -INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 -INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 -INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -INFO ran out of boards for MGS-driven update +INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: chicken switches: add zones with mupdate override: false -* 1 out-of-date zone updated in-place: - * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (clickhouse) -* 25 remaining out-of-date zones +* 1 pending MGS update: + * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: Version(ArtifactVersion("0.5.0")) }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) > blueprint-diff latest from: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 - MODIFIED SLEDS: + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 1 model1 serial1 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 - Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + └─ + Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: Version(ArtifactVersion("0.5.0")) }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Let's simulate the successful SP update as well. +> sled-update-sp 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --active 1.0.0 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: active -> 1.0.0 + +> inventory-generate +generated inventory collection 68767302-7fed-4eb1-9611-3dfd807ff0cd from configured sleds + + +> # Planning should remove this update and add an OS update for this sled. +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 based on parent blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: +chicken switches: + add zones with mupdate override: false + +* 1 pending MGS update: + * model1:serial1: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 + + MODIFIED SLEDS: sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2 -> 3): host phase 2 contents: - ------------------------ - slot boot image source - ------------------------ - A current contents - B current contents + -------------------------------- + slot boot image source + -------------------------------- + A current contents +* B - current contents + └─ + artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + sp_type slot part_number serial_number artifact_hash artifact_version details + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +* sled 1 model1 serial1 - 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 - Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: Version(ArtifactVersion("0.5.0")) }) + └─ + 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 + HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Try a host OS impossible update replacement: write an unknown artifact to the +> # sled's phase 1. The planner should realize the update is impossible and +> # replace it. As with the impossible SP update test above, we have to bump the +> # "ignore impossible MGS updates" timestamp.) +> set ignore-impossible-mgs-updates-since now +ignoring impossible MGS updates since + +> sled-update-host-phase1 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-b ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c host phase 1 details: B -> ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff + +> inventory-generate +generated inventory collection 62898097-2ff1-48d0-8bc1-91b475daa33d from configured sleds + +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c based on parent blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: +chicken switches: + add zones with mupdate override: false + +* 1 pending MGS update: + * model1:serial1: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 1 model1 serial1 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 1.0.0 - HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) + └─ + HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Now simulate the update completing successfully. +> sled-update-host-phase2 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --boot-disk B --slot-b f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c host phase 2 details: boot_disk -> B, B -> f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008 + +> sled-update-host-phase1 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --active B --slot-b 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c host phase 1 details: active -> B, B -> 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 + +> inventory-generate +generated inventory collection 3086f142-62d3-4f77-bda3-674afbb42d0d from configured sleds + + +> # Another planning step should try to update the last sled, starting with the +> # RoT bootloader. +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f based on parent blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c +planning report for blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f: +chicken switches: + add zones with mupdate override: false + +* 1 pending MGS update: + * model2:serial2: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c +to: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +- sled 1 model1 serial1 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 1.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) ++ sled 2 model2 serial2 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Update the RoT bootloader on the last sled. +> # There should be a pending RoT update. +> sled-update-rot-bootloader d81c6a84-79b8-4958-ae41-ea46c9b19763 --stage0 1.0.0 +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT bootloader versions: stage0 -> 1.0.0 + +> inventory-generate +generated inventory collection ae5b3bb4-ce21-465f-b18e-857614732d66 from configured sleds + +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 based on parent blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f +planning report for blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35: +chicken switches: + add zones with mupdate override: false + +* 1 pending MGS update: + * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f +to: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 2 model2 serial2 - 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236 1.0.0 - RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) + └─ + 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a + Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Update the RoT on the last sled. +> # There should be one last pending SP update. +> sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-a 1.0.0 +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot a -> 1.0.0 + +> inventory-generate +generated inventory collection 34c3258c-b2ab-4da9-9720-41a3a703c3d7 from configured sleds + +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 based on parent blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 +planning report for blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746: +chicken switches: + add zones with mupdate override: false + +* 1 pending MGS update: + * model2:serial2: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 +to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 2 model2 serial2 - 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + └─ + 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 + Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Finish updating the last sled's SP. +> # There should be a pending host phase 1 update. +> sled-update-sp d81c6a84-79b8-4958-ae41-ea46c9b19763 --active 1.0.0 +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 SP versions: active -> 1.0.0 + +> inventory-generate +generated inventory collection 5e106b73-6a14-4955-b8a8-a4f8afed6405 from configured sleds + +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:103::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint b82656b0-a9be-433d-83d0-e2bdf371777a based on parent blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 +planning report for blueprint b82656b0-a9be-433d-83d0-e2bdf371777a: +chicken switches: + add zones with mupdate override: false + +* 1 pending MGS update: + * model2:serial2: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:103::1]:12345 }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 +to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 2 -> 3): + + host phase 2 contents: + -------------------------------- + slot boot image source + -------------------------------- + A current contents +* B - current contents + └─ + artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 install dataset in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + sp_type slot part_number serial_number artifact_hash artifact_version details + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +* sled 2 model2 serial2 - 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 - Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + └─ + 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 + HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:103::1]:12345 }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Finish updating the last sled's host OS. +> sled-update-host-phase2 d81c6a84-79b8-4958-ae41-ea46c9b19763 --boot-disk B --slot-b f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008 +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 2 details: boot_disk -> B, B -> f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008 + +> sled-update-host-phase1 d81c6a84-79b8-4958-ae41-ea46c9b19763 --active B --slot-b 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 1 details: active -> B, B -> 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 + +> inventory-generate +generated inventory collection 36ef425f-a672-4bf4-8d29-14815a84ccad from configured sleds + + +> # Do one more planning run. This should update one control plane zone. +> blueprint-plan latest latest +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:103::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO ran out of boards for MGS-driven update +generated blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 based on parent blueprint b82656b0-a9be-433d-83d0-e2bdf371777a +planning report for blueprint 31c84831-be52-4630-bc3f-128d72cd8f22: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (clickhouse) +* 25 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a +to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 3 -> 4): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 physical disks: @@ -2789,10 +4299,10 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 PENDING MGS UPDATES: Pending MGS-managed updates (all baseboards): - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -- sled 2 model2 serial2 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +- sled 2 model2 serial2 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 1.0.0 HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:103::1]:12345 }) internal DNS: diff --git a/nexus-sled-agent-shared/src/inventory.rs b/nexus-sled-agent-shared/src/inventory.rs index 32599095f3..176106c12a 100644 --- a/nexus-sled-agent-shared/src/inventory.rs +++ b/nexus-sled-agent-shared/src/inventory.rs @@ -276,6 +276,18 @@ pub struct BootPartitionContents { pub slot_b: Result, } +impl BootPartitionContents { + pub fn slot_details( + &self, + slot: M2Slot, + ) -> &Result { + match slot { + M2Slot::A => &self.slot_a, + M2Slot::B => &self.slot_b, + } + } +} + #[derive(Clone, Debug, PartialEq, Eq, Deserialize, JsonSchema, Serialize)] pub struct BootPartitionDetails { pub header: BootImageHeader, diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index ce8b02f314..29c9c67bad 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -14,6 +14,7 @@ use crate::blueprint_editor::ExternalSnatNetworkingChoice; use crate::blueprint_editor::NoAvailableDnsSubnets; use crate::blueprint_editor::SledEditError; use crate::blueprint_editor::SledEditor; +use crate::mgs_updates::PendingHostPhase2Changes; use crate::planner::NoopConvertGlobalIneligibleReason; use crate::planner::NoopConvertInfo; use crate::planner::NoopConvertSledIneligibleReason; @@ -1945,6 +1946,16 @@ impl<'a> BlueprintBuilder<'a> { Ok(final_counts.difference_since(initial_counts)) } + pub(crate) fn apply_pending_host_phase_2_changes( + &mut self, + changes: PendingHostPhase2Changes, + ) -> Result<(), Error> { + for (sled_id, slot, contents) in changes.into_iter() { + self.sled_set_host_phase_2_slot(sled_id, slot, contents)?; + } + Ok(()) + } + pub fn sled_set_host_phase_2( &mut self, sled_id: SledUuid, diff --git a/nexus/reconfigurator/planning/src/mgs_updates/host_phase_1.rs b/nexus/reconfigurator/planning/src/mgs_updates/host_phase_1.rs new file mode 100644 index 0000000000..ba84e858ea --- /dev/null +++ b/nexus/reconfigurator/planning/src/mgs_updates/host_phase_1.rs @@ -0,0 +1,998 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Facilities for making choices about MGS-managed host phase 1 updates + +use super::MgsUpdateStatus; +use super::MgsUpdateStatusError; +use gateway_client::types::SpType; +use nexus_types::deployment::BlueprintArtifactVersion; +use nexus_types::deployment::BlueprintHostPhase2DesiredContents; +use nexus_types::deployment::PendingMgsUpdate; +use nexus_types::deployment::PendingMgsUpdateDetails; +use nexus_types::deployment::PendingMgsUpdateHostPhase1Details; +use nexus_types::inventory::BaseboardId; +use nexus_types::inventory::Collection; +use omicron_common::api::external::TufArtifactMeta; +use omicron_common::api::external::TufRepoDescription; +use omicron_common::disk::M2Slot; +use omicron_uuid_kinds::SledUuid; +use slog::Logger; +use slog::debug; +use slog::error; +use slog::warn; +use std::collections::BTreeMap; +use std::sync::Arc; +use tufaceous_artifact::ArtifactHash; +use tufaceous_artifact::ArtifactKind; + +/// Describes a set of blueprint changes to the desired host phase 2 contents +/// for a number of sleds +/// +/// This is generated by the planning process whenever it also generates host +/// phase 1 updates. +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct PendingHostPhase2Changes { + by_sled: BTreeMap, +} + +impl PendingHostPhase2Changes { + pub(super) fn empty() -> Self { + Self { by_sled: BTreeMap::new() } + } + + fn insert( + &mut self, + sled_id: SledUuid, + slot: M2Slot, + artifact: &TufArtifactMeta, + ) { + let contents = BlueprintHostPhase2DesiredContents::Artifact { + version: BlueprintArtifactVersion::Available { + version: artifact.id.version.clone(), + }, + hash: artifact.hash, + }; + let previous = self.by_sled.insert(sled_id, (slot, contents)); + assert!( + previous.is_none(), + "recorded multiple changes for sled {sled_id}" + ); + } + + pub(super) fn append(&mut self, other: &mut Self) { + let expected_count = self.by_sled.len() + other.by_sled.len(); + self.by_sled.append(&mut other.by_sled); + assert_eq!( + self.by_sled.len(), + expected_count, + "appended PendingHostPhase2Changes with duplicate sled IDs" + ); + } + + pub(crate) fn into_iter( + self, + ) -> impl Iterator + { + self.by_sled + .into_iter() + .map(|(sled_id, (slot, contents))| (sled_id, slot, contents)) + } + + #[cfg(test)] + pub(crate) fn iter( + &self, + ) -> impl Iterator< + Item = (SledUuid, M2Slot, &BlueprintHostPhase2DesiredContents), + > + '_ { + self.by_sled + .iter() + .map(|(sled_id, (slot, contents))| (*sled_id, *slot, contents)) + } + + #[cfg(test)] + pub(super) fn remove( + &mut self, + sled_id: &SledUuid, + ) -> Option<(M2Slot, BlueprintHostPhase2DesiredContents)> { + self.by_sled.remove(sled_id) + } + + #[cfg(test)] + pub(super) fn is_empty(&self) -> bool { + self.by_sled.is_empty() + } + + #[cfg(test)] + fn len(&self) -> usize { + self.by_sled.len() + } +} + +/// Determines the status of a pending host phase 1 update by comparing the +/// expected preconditions and postconditions to what's in `inventory` +pub(super) fn update_status( + baseboard_id: &Arc, + desired_artifact: ArtifactHash, + inventory: &Collection, + details: &PendingMgsUpdateHostPhase1Details, + log: &Logger, +) -> Result { + let active_phase_1_slot = inventory + .host_phase_1_active_slot_for(baseboard_id) + .ok_or_else(|| MgsUpdateStatusError::MissingHostPhase1ActiveSlot)? + .slot; + + let active_phase_1_hash = inventory + .host_phase_1_flash_hash_for(active_phase_1_slot, baseboard_id) + .ok_or_else(|| { + MgsUpdateStatusError::MissingHostPhase1FlashHash( + active_phase_1_slot, + ) + })? + .hash; + + // Get the latest inventory report from sled-agent; we need this to confirm + // that it's actually booted the OS image we're trying to update to. If it's + // not present in inventory at all, we'll assume it's in the process of + // rebooting and report this as "not done". + let Some(sled_agent) = inventory.sled_agents.iter().find(|sled_agent| { + sled_agent.baseboard_id.as_ref() == Some(baseboard_id) + }) else { + return Ok(MgsUpdateStatus::NotDone); + }; + + let last_reconciliation = + sled_agent.last_reconciliation.as_ref().ok_or_else(|| { + MgsUpdateStatusError::MissingSledAgentLastReconciliation + })?; + let boot_disk = *last_reconciliation + .boot_partitions + .boot_disk + .as_ref() + .map_err(|err| { + MgsUpdateStatusError::SledAgentErrorDeterminingBootDisk(err.clone()) + })?; + + // If we find the desired artifact in the active slot _and_ we see that + // sled-agent has successfully booted from that same slot, we're done. + if active_phase_1_hash == desired_artifact + && boot_disk == active_phase_1_slot + { + return Ok(MgsUpdateStatus::Done); + } + + // The update hasn't completed. We need to compare the inventory contents + // (from both the SP and sled-agent) against the expectations we recorded in + // `details` to check whether the update is still in progress or has become + // impossible. + let PendingMgsUpdateHostPhase1Details { + expected_active_phase_1_slot, + expected_boot_disk, + expected_active_phase_1_hash, + expected_active_phase_2_hash, + expected_inactive_phase_1_hash, + // We don't need to check the inactive phase 2 hash at all because + // there's no way its current value would make this update impossible. + // This differs from the other "expected" fields. Those are set at + // planning time to the value found in inventory. They function as a + // guard that we don't execute an update if anything has changed since + // we planned the update. As a result, they need to be updated at + // planning-time if reality has changed. This value is used differently. + // There's one specific value we expect here for this phase 1 update + // (namely, the hash of the phase 2 image that goes with it) and a + // separate process is responsible for seeing this value here and + // triggering the corresponding phase 2 update. If we checked and found + // a mismatch here, that would just mean this phase 2 update hasn't + // happened. If we found a match, that just means the phase 1 update + // hasn't happened. + expected_inactive_phase_2_hash: _, + sled_agent_address, + } = details; + + // It should be impossible for the sled-agent address to change, unless this + // sled isn't the one we think it is. + if sled_agent.sled_agent_address != *sled_agent_address { + error!( + log, + "sled-agent with in-progress MGS-driven update has moved"; + "inventory_sled_agent_address" => %sled_agent.sled_agent_address, + ); + return Ok(MgsUpdateStatus::Impossible); + } + + // If the active slot or its contents do not match what we expect, we've + // changed the active slot _without_ completing the update. It's impossible + // to proceed. + if active_phase_1_slot != *expected_active_phase_1_slot + || active_phase_1_hash != *expected_active_phase_1_hash + { + return Ok(MgsUpdateStatus::Impossible); + } + + // Similarly, if the boot disk or active phase 2 hash does not match what we + // expect, we've changed boot disks _without_ completing the update. + if boot_disk != *expected_boot_disk { + return Ok(MgsUpdateStatus::Impossible); + } + let active_phase_2_hash = last_reconciliation + .boot_partitions + .slot_details(boot_disk) + .as_ref() + .map_err(|err| { + MgsUpdateStatusError::SledAgentErrorDeterminingBootPartitionDetails { + slot: boot_disk, + err: err.clone(), + } + })? + .artifact_hash; + if active_phase_2_hash != *expected_active_phase_2_hash { + return Ok(MgsUpdateStatus::Impossible); + } + + // If the inactive phase 1 hash doesn't match what we expect, we won't be + // able to pass our preconditions. This one is tricky because we could be in + // the process of writing to the inactive phase 1 slot, which could cause + // inventory to fail to collect the hash entirely (fine - we'll return an + // error and wait to make a decision until inventory didn't fail) or give us + // a hash that matches a partially-written artifact (which could mean the + // update is `NotDone` because we're actively writing it). However, we have + // to treat the latter case as `Impossible`: + // + // 1. It's possible we partially wrote the inactive slot and then the MGS + // instance sending that update died + // 2. We can't tell whether we have a hash of a partially-written + // `desired_artifact` or a hash of some completely unrelated thing. + // + // Returning `Impossible` could cause some unnecessary churn in planning + // steps, but it should eventually converge. + let inactive_phase_1_hash = inventory + .host_phase_1_flash_hash_for( + active_phase_1_slot.toggled(), + baseboard_id, + ) + .ok_or_else(|| { + MgsUpdateStatusError::MissingHostPhase1FlashHash( + active_phase_1_slot.toggled(), + ) + })? + .hash; + if inactive_phase_1_hash == *expected_inactive_phase_1_hash { + Ok(MgsUpdateStatus::NotDone) + } else { + Ok(MgsUpdateStatus::Impossible) + } +} + +pub(super) fn try_make_update( + log: &slog::Logger, + baseboard_id: &Arc, + inventory: &Collection, + current_artifacts: &TufRepoDescription, +) -> Option<(PendingMgsUpdate, PendingHostPhase2Changes)> { + let Some(sp_info) = inventory.sps.get(baseboard_id) else { + warn!( + log, + "cannot configure host OS update for board \ + (missing SP info from inventory)"; + baseboard_id, + ); + return None; + }; + + // Only configure host OS updates for sleds. + // + // We don't bother logging a return value of `None` for non-sleds, because + // we will never attempt to configure an update for them (nor should we). + match sp_info.sp_type { + SpType::Sled => (), + SpType::Power | SpType::Switch => return None, + } + + let Some(sled_agent) = inventory.sled_agents.iter().find(|sled_agent| { + sled_agent.baseboard_id.as_ref() == Some(baseboard_id) + }) else { + warn!( + log, + "cannot configure host OS update for board \ + (missing sled-agent info from inventory)"; + baseboard_id, + ); + return None; + }; + let Some(last_reconciliation) = sled_agent.last_reconciliation.as_ref() + else { + warn!( + log, + "cannot configure host OS update for board \ + (missing last reconciliation details from inventory)"; + baseboard_id, + ); + return None; + }; + let boot_disk = match &last_reconciliation.boot_partitions.boot_disk { + Ok(boot_disk) => *boot_disk, + Err(err) => { + // This error is a `String`; we can't use `InlineErrorChain`. + let err: &str = &err; + warn!( + log, + "cannot configure host OS update for board \ + (sled-agent reported an error determining boot disk)"; + baseboard_id, + "err" => err, + ); + return None; + } + }; + let active_phase_2_hash = + match &last_reconciliation.boot_partitions.slot_details(boot_disk) { + Ok(details) => details.artifact_hash, + Err(err) => { + // This error is a `String`; we can't use `InlineErrorChain`. + let err: &str = &err; + warn!( + log, + "cannot configure host OS update for board \ + (sled-agent reported an error boot disk phase 2 image)"; + baseboard_id, + "boot_disk" => ?boot_disk, + "err" => err, + ); + return None; + } + }; + + let Some(active_phase_1_slot) = + inventory.host_phase_1_active_slot_for(baseboard_id).map(|s| s.slot) + else { + warn!( + log, + "cannot configure host OS update for board \ + (inventory missing current active host phase 1 slot)"; + baseboard_id, + ); + return None; + }; + + // TODO-correctness What should we do if the active phase 1 slot doesn't + // match the boot disk? That means the active phase 1 slot has been changed + // since the last time the sled booted, which should only happen at the very + // end of a host OS update just before the sled is rebooted. It's possible + // (albeit unlikely) we collected inventory in that window; we don't want to + // plan a new update for this sled if it's about to reboot into some other + // update. + // + // If there are other ways we could get a mismatch between the active phase + // 1 slot and the boot disk, they'll induce a support case to recover, given + // this current implementation. As far as we know they shouldn't happen. + if active_phase_1_slot != boot_disk { + warn!( + log, + "cannot configure host OS update for board (active phase 1 slot \ + doesn't match boot disk; is the sled already being updated?)"; + baseboard_id, + "active_phase_1_slot" => ?active_phase_1_slot, + "boot_disk" => ?boot_disk, + ); + return None; + } + + let Some(active_phase_1_hash) = inventory + .host_phase_1_flash_hash_for(active_phase_1_slot, baseboard_id) + .map(|h| h.hash) + else { + warn!( + log, + "cannot configure host OS update for board \ + (missing active phase 1 hash from inventory)"; + baseboard_id, + "slot" => ?active_phase_1_slot, + ); + return None; + }; + + let Some(inactive_phase_1_hash) = inventory + .host_phase_1_flash_hash_for( + active_phase_1_slot.toggled(), + baseboard_id, + ) + .map(|h| h.hash) + else { + warn!( + log, + "cannot configure host OS update for board \ + (missing inactive phase 1 hash from inventory)"; + baseboard_id, + "slot" => ?active_phase_1_slot.toggled(), + ); + return None; + }; + + let mut phase_1_artifacts = Vec::with_capacity(1); + let mut phase_2_artifacts = Vec::with_capacity(1); + for artifact in ¤t_artifacts.artifacts { + // TODO-correctness Need to choose gimlet vs cosmo here! Need help from + // tufaceous to tell us which is which. + // https://github.com/oxidecomputer/omicron/issues/8777 + if artifact.id.kind == ArtifactKind::HOST_PHASE_1 { + phase_1_artifacts.push(artifact); + } else if artifact.id.kind == ArtifactKind::HOST_PHASE_2 { + phase_2_artifacts.push(artifact); + } + } + let (phase_1_artifact, phase_2_artifact) = + match (phase_1_artifacts.as_slice(), phase_2_artifacts.as_slice()) { + // Common case: Exactly 1 of each artifact. + ([p1], [p2]) => (p1, p2), + // "TUF is broken" cases: missing one or the other. + ([], _) => { + warn!( + log, + "cannot configure host OS update for board \ + (no phase 1 artifact)"; + baseboard_id, + ); + return None; + } + (_, []) => { + warn!( + log, + "cannot configure host OS update for board \ + (no phase 2 artifact)"; + baseboard_id, + ); + return None; + } + // "TUF is broken" cases: have multiple of one or the other. This + // should be impossible unless we shipped a TUF repo with multiple + // host OS images. We can't proceed, because we don't know how to + // pair up which phase 1 matches which phase 2. + (_, _) => { + warn!( + log, + "cannot configure host OS update for board \ + (multiple OS images in TUF repo)"; + baseboard_id, + "num-phase-1-images" => phase_1_artifacts.len(), + "num-phase-2-images" => phase_2_artifacts.len(), + ); + return None; + } + }; + + // If the artifact matches what's deployed, then no update is needed. We + // only need to look at the running phase 2; that tells us what we're + // _actually_ running. The currently-active phase 1 should certainly match + // it; what should we do if it's different? (That should be impossible! It + // would mean the active phase 1 contents have changed in such a way that + // this sled will fail to boot if it were rebooted now.) + if active_phase_2_hash == phase_2_artifact.hash { + debug!(log, "no host OS update needed for board"; baseboard_id); + return None; + } + + // Before we can proceed with the phase 1 update, we need sled-agent to + // write the corresponding phase 2 artifact to its inactive disk. This + // requires us updating its `OmicronSledConfig`. We don't thread the + // blueprint editor all the way down to this point, so instead we'll return + // the set of host phase 2 changes we want the planner to make on our + // behalf. + let mut pending_host_phase_2_changes = PendingHostPhase2Changes::empty(); + pending_host_phase_2_changes.insert( + sled_agent.sled_id, + boot_disk.toggled(), + phase_2_artifact, + ); + + Some(( + PendingMgsUpdate { + baseboard_id: baseboard_id.clone(), + sp_type: sp_info.sp_type, + slot_id: sp_info.sp_slot, + details: PendingMgsUpdateDetails::HostPhase1( + PendingMgsUpdateHostPhase1Details { + expected_active_phase_1_slot: active_phase_1_slot, + expected_boot_disk: boot_disk, + expected_active_phase_1_hash: active_phase_1_hash, + expected_active_phase_2_hash: active_phase_2_hash, + expected_inactive_phase_1_hash: inactive_phase_1_hash, + expected_inactive_phase_2_hash: phase_2_artifact.hash, + sled_agent_address: sled_agent.sled_agent_address, + }, + ), + artifact_hash: phase_1_artifact.hash, + artifact_version: phase_1_artifact.id.version.clone(), + }, + pending_host_phase_2_changes, + )) +} + +#[cfg(test)] +mod tests { + use crate::mgs_updates::ImpossibleUpdatePolicy; + use crate::mgs_updates::plan_mgs_updates; + use crate::mgs_updates::test_helpers::ARTIFACT_HASH_HOST_PHASE_1; + use crate::mgs_updates::test_helpers::ARTIFACT_HASH_HOST_PHASE_1_V1; + use crate::mgs_updates::test_helpers::ARTIFACT_HASH_HOST_PHASE_1_V1_5; + use crate::mgs_updates::test_helpers::ARTIFACT_HASH_HOST_PHASE_2; + use crate::mgs_updates::test_helpers::ARTIFACT_HASH_HOST_PHASE_2_V1; + use crate::mgs_updates::test_helpers::ARTIFACT_VERSION_2; + use crate::mgs_updates::test_helpers::TestBoards; + use dropshot::ConfigLogging; + use dropshot::ConfigLoggingLevel; + use dropshot::test_util::LogContext; + use gateway_client::types::SpType; + use nexus_types::deployment::BlueprintArtifactVersion; + use nexus_types::deployment::BlueprintHostPhase2DesiredContents; + use nexus_types::deployment::PendingMgsUpdateDetails; + use nexus_types::deployment::PendingMgsUpdateHostPhase1Details; + use nexus_types::deployment::PendingMgsUpdates; + use nexus_types::deployment::TargetReleaseDescription; + use omicron_common::disk::M2Slot; + use std::collections::BTreeSet; + + // Short hand-rolled update sequence that exercises some basic behavior for + // host OS updates. + #[test] + fn test_basic_host_os() { + let test_name = "planning_mgs_updates_basic_host_os"; + let logctx = LogContext::new( + test_name, + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Debug }, + ); + let log = &logctx.log; + let test_boards = TestBoards::new(test_name); + + // Test that with no updates pending and no TUF repo specified, there + // will remain no updates pending. + let collection = test_boards + .collection_builder() + .host_active_exception( + 0, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .build(); + let current_boards = &collection.baseboards; + let sled_0_id = test_boards.sled_id(0).expect("have sled 0"); + let sled_1_id = test_boards.sled_id(1).expect("have sled 1"); + let initial_updates = PendingMgsUpdates::new(); + let nmax_updates = 1; + let impossible_update_policy = ImpossibleUpdatePolicy::Reevaluate; + let planned = plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::Initial, + nmax_updates, + impossible_update_policy, + ); + assert!(planned.pending_updates.is_empty()); + assert!(planned.pending_host_phase_2_changes.is_empty()); + + // Test that when a TUF repo is specified and one host OS is outdated, + // then it's configured with an update (and the update looks correct). + let repo = test_boards.tuf_repo(); + let planned = plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + assert_eq!(planned.pending_updates.len(), 1); + let first_update = + planned.pending_updates.iter().next().expect("at least one update"); + assert_eq!(first_update.baseboard_id.serial_number, "sled_0"); + assert_eq!(first_update.sp_type, SpType::Sled); + assert_eq!(first_update.slot_id, 0); + assert_eq!(first_update.artifact_hash, ARTIFACT_HASH_HOST_PHASE_1); + assert_eq!(first_update.artifact_version, ARTIFACT_VERSION_2); + assert_eq!(planned.pending_host_phase_2_changes.len(), 1); + let (phase2_id, phase2_slot, phase2_contents) = + planned.pending_host_phase_2_changes.iter().next().unwrap(); + assert_eq!(phase2_id, sled_0_id); + assert_eq!(phase2_slot, M2Slot::B); + assert_eq!( + *phase2_contents, + BlueprintHostPhase2DesiredContents::Artifact { + version: BlueprintArtifactVersion::Available { + version: ARTIFACT_VERSION_2 + }, + hash: ARTIFACT_HASH_HOST_PHASE_2 + } + ); + + // Test that when an update is already pending, and nothing changes + // about the state of the world (i.e., the inventory), then the planner + // makes no changes. + let later_planned = plan_mgs_updates( + log, + &collection, + current_boards, + &planned.pending_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + // We should keep the pending MGS update, but not return any new phase 2 + // changes. (Those had already been applied to the sled configs.) + assert_eq!(planned.pending_updates, later_planned.pending_updates); + assert!(later_planned.pending_host_phase_2_changes.is_empty()); + + // Test that when two updates are needed, but one is already pending, + // then the other one is *not* started (because it exceeds + // nmax_updates). + let later_collection = test_boards + .collection_builder() + .host_active_exception( + 0, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .host_active_exception( + 1, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .build(); + let later_planned = plan_mgs_updates( + log, + &later_collection, + current_boards, + &planned.pending_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + assert_eq!(planned.pending_updates, later_planned.pending_updates); + assert!(later_planned.pending_host_phase_2_changes.is_empty()); + + // At this point, we're ready to test that when the first update + // completes, then the second one *is* started. This tests two + // different things: first that we noticed the first one completed, and + // second that we noticed another thing needed an update + let later_collection = test_boards + .collection_builder() + .host_active_exception( + 1, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .build(); + let later_planned = plan_mgs_updates( + log, + &later_collection, + current_boards, + &planned.pending_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + assert_eq!(later_planned.pending_updates.len(), 1); + let first_update = later_planned + .pending_updates + .iter() + .next() + .expect("at least one update"); + assert_eq!(first_update.baseboard_id.serial_number, "sled_1"); + assert_eq!(first_update.sp_type, SpType::Sled); + assert_eq!(first_update.slot_id, 1); + assert_eq!(first_update.artifact_hash, ARTIFACT_HASH_HOST_PHASE_1); + assert_eq!(first_update.artifact_version, ARTIFACT_VERSION_2); + assert_eq!(later_planned.pending_host_phase_2_changes.len(), 1); + let (phase2_id, phase2_slot, phase2_contents) = + later_planned.pending_host_phase_2_changes.iter().next().unwrap(); + assert_eq!(phase2_id, sled_1_id); + assert_eq!(phase2_slot, M2Slot::B); + assert_eq!( + *phase2_contents, + BlueprintHostPhase2DesiredContents::Artifact { + version: BlueprintArtifactVersion::Available { + version: ARTIFACT_VERSION_2 + }, + hash: ARTIFACT_HASH_HOST_PHASE_2 + } + ); + + // Finally, test that when all OSs are in spec, then no updates are + // configured. + let updated_collection = test_boards.collection_builder().build(); + let later_planned = plan_mgs_updates( + log, + &updated_collection, + current_boards, + &later_planned.pending_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + assert!(later_planned.pending_updates.is_empty()); + assert!(later_planned.pending_host_phase_2_changes.is_empty()); + + // Test that we don't try to update boards that aren't in + // `current_boards`, even if they're in inventory and outdated. + let collection = test_boards + .collection_builder() + .host_active_exception( + 0, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .build(); + let planned = plan_mgs_updates( + log, + &collection, + &BTreeSet::new(), + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + assert!(planned.pending_updates.is_empty()); + assert!(planned.pending_host_phase_2_changes.is_empty()); + let planned = plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + // We verified most of the details above. Here we're just double + // checking that the baseboard being missing is the only reason that no + // update was generated. + assert_eq!(planned.pending_updates.len(), 1); + assert_eq!(planned.pending_host_phase_2_changes.len(), 1); + + // Verify the precondition details of an ordinary update. + let old_update = planned + .pending_updates + .into_iter() + .next() + .expect("at least one update"); + let PendingMgsUpdateDetails::HostPhase1( + PendingMgsUpdateHostPhase1Details { + expected_active_phase_1_slot, + expected_boot_disk, + expected_active_phase_1_hash, + expected_active_phase_2_hash, + expected_inactive_phase_1_hash, + expected_inactive_phase_2_hash, + sled_agent_address: _, + }, + ) = &old_update.details + else { + panic!("expected host phase 1 update"); + }; + assert_eq!(M2Slot::A, *expected_active_phase_1_slot); + assert_eq!(M2Slot::A, *expected_boot_disk); + assert_eq!( + ARTIFACT_HASH_HOST_PHASE_1_V1, + *expected_active_phase_1_hash + ); + assert_eq!( + ARTIFACT_HASH_HOST_PHASE_2_V1, + *expected_active_phase_2_hash + ); + assert_eq!( + ARTIFACT_HASH_HOST_PHASE_1_V1, + *expected_inactive_phase_1_hash + ); + // Note: Not V1! This should be the _new_ artifact hash. + assert_eq!(ARTIFACT_HASH_HOST_PHASE_2, *expected_inactive_phase_2_hash); + + // Test that if the inactive slot contents have changed, then we'll get + // a new update reflecting that. + let collection = test_boards + .collection_builder() + .host_phase_1_artifacts( + ARTIFACT_HASH_HOST_PHASE_1, + ARTIFACT_HASH_HOST_PHASE_1_V1_5, + ) + .host_active_exception( + 0, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .build(); + let new_planned = plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &planned.pending_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + assert_ne!(planned.pending_updates, new_planned.pending_updates); + assert_eq!(new_planned.pending_updates.len(), 1); + let new_update = new_planned + .pending_updates + .into_iter() + .next() + .expect("at least one update"); + assert_eq!(old_update.baseboard_id, new_update.baseboard_id); + assert_eq!(old_update.sp_type, new_update.sp_type); + assert_eq!(old_update.slot_id, new_update.slot_id); + assert_eq!(old_update.artifact_hash, new_update.artifact_hash); + assert_eq!(old_update.artifact_version, new_update.artifact_version); + let PendingMgsUpdateDetails::HostPhase1( + PendingMgsUpdateHostPhase1Details { + expected_active_phase_1_slot, + expected_boot_disk, + expected_active_phase_1_hash, + expected_active_phase_2_hash, + expected_inactive_phase_1_hash, + expected_inactive_phase_2_hash, + sled_agent_address: _, + }, + ) = &new_update.details + else { + panic!("expected host phase 1 update"); + }; + assert_eq!(M2Slot::A, *expected_active_phase_1_slot); + assert_eq!(M2Slot::A, *expected_boot_disk); + assert_eq!( + ARTIFACT_HASH_HOST_PHASE_1_V1, + *expected_active_phase_1_hash + ); + assert_eq!( + ARTIFACT_HASH_HOST_PHASE_2_V1, + *expected_active_phase_2_hash + ); + assert_eq!( + ARTIFACT_HASH_HOST_PHASE_1_V1_5, + *expected_inactive_phase_1_hash + ); + assert_eq!(ARTIFACT_HASH_HOST_PHASE_2, *expected_inactive_phase_2_hash); + + // Test that if instead it's the active slot whose contents have changed + // to something other than the new expected version, then we'll also get + // a new update reflecting that. + let collection = test_boards + .collection_builder() + .host_active_exception( + 0, + ARTIFACT_HASH_HOST_PHASE_1_V1_5, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .build(); + let new_planned = plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &planned.pending_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + assert_ne!(planned.pending_updates, new_planned.pending_updates); + assert_eq!(new_planned.pending_updates.len(), 1); + let new_update = new_planned + .pending_updates + .into_iter() + .next() + .expect("at least one update"); + assert_eq!(old_update.baseboard_id, new_update.baseboard_id); + assert_eq!(old_update.sp_type, new_update.sp_type); + assert_eq!(old_update.slot_id, new_update.slot_id); + assert_eq!(old_update.artifact_hash, new_update.artifact_hash); + assert_eq!(old_update.artifact_version, new_update.artifact_version); + let PendingMgsUpdateDetails::HostPhase1( + PendingMgsUpdateHostPhase1Details { + expected_active_phase_1_slot, + expected_boot_disk, + expected_active_phase_1_hash, + expected_active_phase_2_hash, + expected_inactive_phase_1_hash, + expected_inactive_phase_2_hash, + sled_agent_address: _, + }, + ) = &new_update.details + else { + panic!("expected host phase 1 update"); + }; + assert_eq!(M2Slot::A, *expected_active_phase_1_slot); + assert_eq!(M2Slot::A, *expected_boot_disk); + assert_eq!( + ARTIFACT_HASH_HOST_PHASE_1_V1_5, + *expected_active_phase_1_hash + ); + assert_eq!( + ARTIFACT_HASH_HOST_PHASE_2_V1, + *expected_active_phase_2_hash + ); + assert_eq!( + ARTIFACT_HASH_HOST_PHASE_1_V1, + *expected_inactive_phase_1_hash + ); + assert_eq!(ARTIFACT_HASH_HOST_PHASE_2, *expected_inactive_phase_2_hash); + + logctx.cleanup_successful(); + } + + // Tests the case where a sled appears to move while a host OS update is + // pending + #[test] + fn test_sled_move() { + let test_name = "planning_mgs_updates_sled_move"; + let logctx = LogContext::new( + test_name, + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Debug }, + ); + let test_boards = TestBoards::new(test_name); + + // Configure an update for one SP. + let log = &logctx.log; + let repo = test_boards.tuf_repo(); + let mut collection = test_boards + .collection_builder() + .host_active_exception( + 0, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .build(); + let nmax_updates = 1; + let impossible_update_policy = ImpossibleUpdatePolicy::Reevaluate; + let planned = plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + assert!(!planned.pending_updates.is_empty()); + assert!(!planned.pending_host_phase_2_changes.is_empty()); + let update = planned + .pending_updates + .into_iter() + .next() + .expect("at least one update"); + + // Move an SP (as if someone had moved the sled to a different cubby). + // This is awful, but at least it's easy. + let sp_info = collection + .sps + .values_mut() + .find(|sp| sp.sp_type == SpType::Sled && sp.sp_slot == 0) + .expect("missing sled 0 SP"); + sp_info.sp_slot = 9; + + // Plan again. The configured update should be updated to reflect the + // new location. + let new_planned = plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &planned.pending_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); + assert!(!new_planned.pending_updates.is_empty()); + assert!(!new_planned.pending_host_phase_2_changes.is_empty()); + let new_update = new_planned + .pending_updates + .into_iter() + .next() + .expect("at least one update"); + assert_eq!(new_update.slot_id, 9); + assert_eq!(new_update.baseboard_id, update.baseboard_id); + assert_eq!(new_update.sp_type, update.sp_type); + assert_eq!(new_update.artifact_hash, update.artifact_hash); + assert_eq!(new_update.artifact_version, update.artifact_version); + assert_eq!(new_update.details, update.details); + + logctx.cleanup_successful(); + } +} diff --git a/nexus/reconfigurator/planning/src/mgs_updates/mod.rs b/nexus/reconfigurator/planning/src/mgs_updates/mod.rs index 6c7a07b118..6851b07a50 100644 --- a/nexus/reconfigurator/planning/src/mgs_updates/mod.rs +++ b/nexus/reconfigurator/planning/src/mgs_updates/mod.rs @@ -4,6 +4,7 @@ //! Facilities for making choices about MGS-managed updates +mod host_phase_1; mod rot; mod rot_bootloader; mod sp; @@ -30,6 +31,7 @@ use nexus_types::inventory::BaseboardId; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; use omicron_common::api::external::TufRepoDescription; +use omicron_common::disk::M2Slot; use slog::{error, info, warn}; use slog_error_chain::InlineErrorChain; use std::collections::BTreeSet; @@ -38,6 +40,8 @@ use thiserror::Error; use tufaceous_artifact::ArtifactVersion; use tufaceous_artifact::ArtifactVersionError; +pub(crate) use host_phase_1::PendingHostPhase2Changes; + /// How to handle an MGS-driven update that has become impossible due to /// unsatisfied preconditions. #[derive(Debug, Clone, Copy, strum::EnumIter)] @@ -51,6 +55,17 @@ pub enum ImpossibleUpdatePolicy { Reevaluate, } +/// Output of planning MGS updates. +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct PlannedMgsUpdates { + /// The actual pending updates; these should be added to the blueprint. + pub(crate) pending_updates: PendingMgsUpdates, + + /// Pending changes to sleds' host phase 2 contents; each of these should + /// result in a change to the respective sled's `BlueprintSledConfig`. + pub(crate) pending_host_phase_2_changes: PendingHostPhase2Changes, +} + /// Generates a new set of `PendingMgsUpdates` based on: /// /// * `inventory`: the latest inventory @@ -66,7 +81,7 @@ pub enum ImpossibleUpdatePolicy { /// /// By current policy, `nmax_updates` is always 1, but the implementation here /// supports more than one update per invocation. -pub fn plan_mgs_updates( +pub(crate) fn plan_mgs_updates( log: &slog::Logger, inventory: &Collection, current_boards: &BTreeSet>, @@ -74,8 +89,9 @@ pub fn plan_mgs_updates( current_artifacts: &TargetReleaseDescription, nmax_updates: usize, impossible_update_policy: ImpossibleUpdatePolicy, -) -> PendingMgsUpdates { - let mut rv = PendingMgsUpdates::new(); +) -> PlannedMgsUpdates { + let mut pending_updates = PendingMgsUpdates::new(); + let mut pending_host_phase_2_changes = PendingHostPhase2Changes::empty(); let mut boards_preferred = BTreeSet::new(); // Determine the status of all currently pending updates by comparing what @@ -110,7 +126,7 @@ pub fn plan_mgs_updates( (waiting for recent update to be applied)"; update ); - rv.insert(update.clone()); + pending_updates.insert(update.clone()); } ImpossibleUpdatePolicy::Reevaluate => { info!( @@ -128,7 +144,7 @@ pub fn plan_mgs_updates( "MGS-driven update not yet completed (will keep it)"; update ); - rv.insert(update.clone()); + pending_updates.insert(update.clone()); } Err(error) => { info!( @@ -137,7 +153,7 @@ pub fn plan_mgs_updates( update, InlineErrorChain::new(&error) ); - rv.insert(update.clone()); + pending_updates.insert(update.clone()); } } } @@ -150,7 +166,10 @@ pub fn plan_mgs_updates( log, "cannot issue more MGS-driven updates (no current artifacts)", ); - return rv; + return PlannedMgsUpdates { + pending_updates, + pending_host_phase_2_changes, + }; } TargetReleaseDescription::TufRepo(description) => description, }; @@ -166,19 +185,23 @@ pub fn plan_mgs_updates( current_boards.iter().filter(|b| !boards_preferred.contains(*b)); let candidates = boards_preferred.iter().chain(non_preferred); for board in candidates { - if rv.len() >= nmax_updates { + if pending_updates.len() >= nmax_updates { info!( log, "reached maximum number of pending MGS-driven updates"; "max" => nmax_updates ); - return rv; + return PlannedMgsUpdates { + pending_updates, + pending_host_phase_2_changes, + }; } match try_make_update(log, board, inventory, current_artifacts) { - Some(update) => { + Some((update, mut host_phase_2)) => { info!(log, "configuring MGS-driven update"; &update); - rv.insert(update); + pending_updates.insert(update); + pending_host_phase_2_changes.append(&mut host_phase_2); } None => { info!(log, "skipping board for MGS-driven update"; board); @@ -187,7 +210,7 @@ pub fn plan_mgs_updates( } info!(log, "ran out of boards for MGS-driven update"); - rv + PlannedMgsUpdates { pending_updates, pending_host_phase_2_changes } } #[derive(Debug)] @@ -219,8 +242,19 @@ enum MgsUpdateStatusError { MissingActiveCaboose, #[error("no RoT state found in inventory")] MissingRotState, - #[error("not yet implemented")] - NotYetImplemented, + #[error("no active host phase 1 slot found in inventory")] + MissingHostPhase1ActiveSlot, + #[error("no host phase 1 hash found in inventory for slot {0:?}")] + MissingHostPhase1FlashHash(M2Slot), + #[error("no sled-agent config reconciler result found in inventory")] + MissingSledAgentLastReconciliation, + #[error("sled-agent reported an error determining boot disk: {0}")] + SledAgentErrorDeterminingBootDisk(String), + #[error( + "sled-agent reported an error determining boot partition contents \ + for slot {slot}: {err}" + )] + SledAgentErrorDeterminingBootPartitionDetails { slot: M2Slot, err: String }, #[error("unable to parse input into ArtifactVersion: {0:?}")] FailedArtifactVersionParse(ArtifactVersionError), } @@ -233,6 +267,7 @@ fn mgs_update_status( update: &PendingMgsUpdate, ) -> Result { let baseboard_id = &update.baseboard_id; + let desired_artifact_hash = update.artifact_hash; let desired_version = &update.artifact_version; // Check the contents of the target of `update` against what we expect @@ -257,13 +292,13 @@ fn mgs_update_status( .caboose_for(CabooseWhich::Stage0Next, baseboard_id) .map(|c| c.caboose.version.as_ref()); - Ok(mgs_update_status_rot_bootloader( + mgs_update_status_rot_bootloader( desired_version, expected_stage0_version, expected_stage0_next_version, &stage0_caboose.caboose.version, found_stage0_next_version, - )) + ) } PendingMgsUpdateDetails::Sp(PendingMgsUpdateSpDetails { expected_active_version, @@ -279,13 +314,22 @@ fn mgs_update_status( .caboose_for(CabooseWhich::SpSlot1, baseboard_id) .map(|c| c.caboose.version.as_ref()); - Ok(mgs_update_status_sp( + mgs_update_status_sp( desired_version, expected_active_version, expected_inactive_version, &active_caboose.caboose.version, found_inactive_version, - )) + ) + } + PendingMgsUpdateDetails::HostPhase1(details) => { + host_phase_1::update_status( + baseboard_id, + desired_artifact_hash, + inventory, + details, + log, + )? } PendingMgsUpdateDetails::Rot(PendingMgsUpdateRotDetails { expected_active_slot, @@ -343,16 +387,13 @@ fn mgs_update_status( transient_boot_preference: rot_state.transient_boot_preference, }; - Ok(mgs_update_status_rot( + mgs_update_status_rot( desired_version, expected, found, expected_inactive_version, found_inactive_version, - )) - } - PendingMgsUpdateDetails::HostPhase1(_) => { - return Err(MgsUpdateStatusError::NotYetImplemented); + ) } }; @@ -360,9 +401,9 @@ fn mgs_update_status( // great. Return that. if matches!( update_status, - Err(_) | Ok(MgsUpdateStatus::Done) | Ok(MgsUpdateStatus::Impossible) + MgsUpdateStatus::Done | MgsUpdateStatus::Impossible ) { - return update_status; + return Ok(update_status); } // If based on the status we're only able to determine that the update is @@ -392,7 +433,7 @@ fn mgs_update_status( ); Ok(MgsUpdateStatus::Impossible) } else { - update_status + Ok(update_status) } } @@ -446,18 +487,19 @@ fn mgs_update_status_inactive_versions( } /// Determine if the given baseboard needs any MGS-driven update (e.g., update -/// to its SP, RoT, etc.). If so, returns the update. If not, returns `None`. +/// to its SP, RoT, etc.). If so, returns the update and a set of changes that +/// need to be made to sled configs related to host phase 2 images (this set +/// will be empty if we made a non-host update). If not, returns `None`. fn try_make_update( log: &slog::Logger, baseboard_id: &Arc, inventory: &Collection, current_artifacts: &TufRepoDescription, -) -> Option { +) -> Option<(PendingMgsUpdate, PendingHostPhase2Changes)> { // We try MGS-driven update components in a hardcoded priority order until - // any of them returns `Some`. The order is described in RFD 565 section - // "Update Sequence". For now, we only plan SP, RoT and RoT bootloader - // updates. When implemented, host OS updates will be the last to try. - try_make_update_rot_bootloader( + // any of them returns `Some`. The order is described in RFD 565 section + // "Update Sequence". + if let Some(update) = try_make_update_rot_bootloader( log, baseboard_id, inventory, @@ -468,7 +510,18 @@ fn try_make_update( }) .or_else(|| { try_make_update_sp(log, baseboard_id, inventory, current_artifacts) - }) + }) { + // We have a non-host update; there are no pending host phase 2 changes + // necessary. + return Some((update, PendingHostPhase2Changes::empty())); + } + + host_phase_1::try_make_update( + log, + baseboard_id, + inventory, + current_artifacts, + ) } #[cfg(test)] @@ -477,7 +530,12 @@ mod test_helpers; #[cfg(test)] mod test { use super::ImpossibleUpdatePolicy; + use super::PlannedMgsUpdates; use super::plan_mgs_updates; + use super::test_helpers::ARTIFACT_HASH_HOST_PHASE_1; + use super::test_helpers::ARTIFACT_HASH_HOST_PHASE_1_V1; + use super::test_helpers::ARTIFACT_HASH_HOST_PHASE_2; + use super::test_helpers::ARTIFACT_HASH_HOST_PHASE_2_V1; use super::test_helpers::ARTIFACT_HASH_ROT_BOOTLOADER_GIMLET; use super::test_helpers::ARTIFACT_HASH_ROT_BOOTLOADER_SWITCH; use super::test_helpers::ARTIFACT_HASH_ROT_GIMLET_B; @@ -517,39 +575,38 @@ mod test { // Test that with no updates pending and no TUF repo specified, there // will remain no updates pending. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .sp_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); let current_boards = &collection.baseboards; let initial_updates = PendingMgsUpdates::new(); let nmax_updates = 1; let impossible_update_policy = ImpossibleUpdatePolicy::Reevaluate; - let updates = plan_mgs_updates( - log, - &collection, - current_boards, - &initial_updates, - &TargetReleaseDescription::Initial, - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::Initial, + nmax_updates, + impossible_update_policy, + ); assert!(updates.is_empty()); // Test that when a TUF repo is specified and one SP is outdated, then // it's configured with an update (and the update looks correct). let repo = test_boards.tuf_repo(); - let updates = plan_mgs_updates( - log, - &collection, - current_boards, - &initial_updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(updates.len(), 1); let first_update = updates.iter().next().expect("at least one update"); assert_eq!(first_update.baseboard_id.serial_number, "sled_0"); @@ -561,37 +618,36 @@ mod test { // Test that when an update is already pending, and nothing changes // about the state of the world (i.e., the inventory), then the planner // makes no changes. - let later_updates = plan_mgs_updates( - log, - &collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(updates, later_updates); // Test that when two updates are needed, but one is already pending, // then the other one is *not* started (because it exceeds // nmax_updates). let later_collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .sp_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .sp_active_version_exception(SpType::Switch, 1, ARTIFACT_VERSION_1) .build(); - let later_updates = plan_mgs_updates( - log, - &later_collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &later_collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(updates, later_updates); // At this point, we're ready to test that when the first update @@ -599,21 +655,19 @@ mod test { // different things: first that we noticed the first one completed, and // second that we noticed another thing needed an update let later_collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .sp_active_version_exception(SpType::Switch, 1, ARTIFACT_VERSION_1) .build(); - let later_updates = plan_mgs_updates( - log, - &later_collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &later_collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(later_updates.len(), 1); let next_update = later_updates.iter().next().expect("at least one update"); @@ -626,51 +680,46 @@ mod test { // Finally, test that when all SPs are in spec, then no updates are // configured. - let updated_collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) - .build(); - let later_updates = plan_mgs_updates( - log, - &updated_collection, - current_boards, - &later_updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let updated_collection = test_boards.collection_builder().build(); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &updated_collection, + current_boards, + &later_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert!(later_updates.is_empty()); // Test that we don't try to update boards that aren't in // `current_boards`, even if they're in inventory and outdated. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .sp_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); - let updates = plan_mgs_updates( - log, - &collection, - &BTreeSet::new(), - &PendingMgsUpdates::new(), - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + &BTreeSet::new(), + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert!(updates.is_empty()); - let updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &PendingMgsUpdates::new(), - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); // We verified most of the details above. Here we're just double // checking that the baseboard being missing is the only reason that no // update was generated. @@ -695,25 +744,23 @@ mod test { // Test that if the inactive slot contents have changed, then we'll get // a new update reflecting that. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .sp_versions( ARTIFACT_VERSION_2, ExpectedVersion::Version(ARTIFACT_VERSION_1), ) .sp_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); - let new_updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: new_updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_ne!(updates, new_updates); assert_eq!(new_updates.len(), 1); let new_update = @@ -740,21 +787,19 @@ mod test { // to something other than the new expected version, then we'll also get // a new update reflecting that. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .sp_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1_5) .build(); - let new_updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: new_updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_ne!(updates, new_updates); assert_eq!(new_updates.len(), 1); let new_update = @@ -795,39 +840,38 @@ mod test { // Test that with no updates pending and no TUF repo specified, there // will remain no updates pending. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .rot_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); let current_boards = &collection.baseboards; let initial_updates = PendingMgsUpdates::new(); let nmax_updates = 1; let impossible_update_policy = ImpossibleUpdatePolicy::Reevaluate; - let updates = plan_mgs_updates( - log, - &collection, - current_boards, - &initial_updates, - &TargetReleaseDescription::Initial, - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::Initial, + nmax_updates, + impossible_update_policy, + ); assert!(updates.is_empty()); // Test that when a TUF repo is specified and one RoT is outdated, then // it's configured with an update (and the update looks correct). let repo = test_boards.tuf_repo(); - let updates = plan_mgs_updates( - log, - &collection, - current_boards, - &initial_updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(updates.len(), 1); let first_update = updates.iter().next().expect("at least one update"); assert_eq!(first_update.baseboard_id.serial_number, "sled_0"); @@ -839,37 +883,36 @@ mod test { // Test that when an update is already pending, and nothing changes // about the state of the world (i.e., the inventory), then the planner // makes no changes. - let later_updates = plan_mgs_updates( - log, - &collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(updates, later_updates); // Test that when two updates are needed, but one is already pending, // then the other one is *not* started (because it exceeds // nmax_updates). let later_collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .rot_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .rot_active_version_exception(SpType::Switch, 1, ARTIFACT_VERSION_1) .build(); - let later_updates = plan_mgs_updates( - log, - &later_collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &later_collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(updates, later_updates); // At this point, we're ready to test that when the first SpType update @@ -879,22 +922,20 @@ mod test { // the planner schedules the updates in the correct order: first RoT, // and second SP. let later_collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .sp_active_version_exception(SpType::Switch, 1, ARTIFACT_VERSION_1) .rot_active_version_exception(SpType::Switch, 1, ARTIFACT_VERSION_1) .build(); - let later_updates = plan_mgs_updates( - log, - &later_collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &later_collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(later_updates.len(), 1); let next_update = later_updates.iter().next().expect("at least one update"); @@ -907,51 +948,46 @@ mod test { // Finally, test that when all components are in spec, then no updates // are configured. - let updated_collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) - .build(); - let later_updates = plan_mgs_updates( - log, - &updated_collection, - current_boards, - &later_updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let updated_collection = test_boards.collection_builder().build(); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &updated_collection, + current_boards, + &later_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert!(later_updates.is_empty()); // Test that we don't try to update boards that aren't in // `current_boards`, even if they're in inventory and outdated. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .rot_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); - let updates = plan_mgs_updates( - log, - &collection, - &BTreeSet::new(), - &PendingMgsUpdates::new(), - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + &BTreeSet::new(), + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert!(updates.is_empty()); - let updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &PendingMgsUpdates::new(), - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); // We verified most of the details above. Here we're just double // checking that the baseboard being missing is the only reason that no // update was generated. @@ -977,21 +1013,31 @@ mod test { // Test that if the inactive slot contents have changed, then we'll get // a new update reflecting that. let collection = test_boards - .collection_builder( + .collection_builder() + .sp_versions( + ARTIFACT_VERSION_2, + ExpectedVersion::Version(ARTIFACT_VERSION_1), + ) + .rot_versions( + ARTIFACT_VERSION_2, + ExpectedVersion::Version(ARTIFACT_VERSION_1), + ) + .stage0_versions( ARTIFACT_VERSION_2, ExpectedVersion::Version(ARTIFACT_VERSION_1), ) .rot_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); - let new_updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: new_updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_ne!(updates, new_updates); assert_eq!(new_updates.len(), 1); let new_update = @@ -1019,21 +1065,19 @@ mod test { // to something other than the new expected version, then we'll also get // a new update reflecting that. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .rot_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1_5) .build(); - let new_updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: new_updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_ne!(updates, new_updates); assert_eq!(new_updates.len(), 1); let new_update = @@ -1075,39 +1119,38 @@ mod test { // Test that with no updates pending and no TUF repo specified, there // will remain no updates pending. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .stage0_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); let current_boards = &collection.baseboards; let initial_updates = PendingMgsUpdates::new(); let nmax_updates = 1; let impossible_update_policy = ImpossibleUpdatePolicy::Reevaluate; - let updates = plan_mgs_updates( - log, - &collection, - current_boards, - &initial_updates, - &TargetReleaseDescription::Initial, - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::Initial, + nmax_updates, + impossible_update_policy, + ); assert!(updates.is_empty()); // Test that when a TUF repo is specified and one RoT is outdated, then // it's configured with an update (and the update looks correct). let repo = test_boards.tuf_repo(); - let updates = plan_mgs_updates( - log, - &collection, - current_boards, - &initial_updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(updates.len(), 1); let first_update = updates.iter().next().expect("at least one update"); assert_eq!(first_update.baseboard_id.serial_number, "sled_0"); @@ -1122,37 +1165,36 @@ mod test { // Test that when an update is already pending, and nothing changes // about the state of the world (i.e., the inventory), then the planner // makes no changes. - let later_updates = plan_mgs_updates( - log, - &collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(updates, later_updates); // Test that when two updates are needed, but one is already pending, // then the other one is *not* started (because it exceeds // nmax_updates). let later_collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .stage0_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .stage0_version_exception(SpType::Switch, 1, ARTIFACT_VERSION_1) .build(); - let later_updates = plan_mgs_updates( - log, - &later_collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &later_collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(updates, later_updates); // At this point, we're ready to test that when the first SpType update @@ -1162,23 +1204,21 @@ mod test { // the planner schedules the updates in the correct order: first RoT // bootloader, second RoT and third SP. let later_collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .sp_active_version_exception(SpType::Switch, 1, ARTIFACT_VERSION_1) .rot_active_version_exception(SpType::Switch, 1, ARTIFACT_VERSION_1) .stage0_version_exception(SpType::Switch, 1, ARTIFACT_VERSION_1) .build(); - let later_updates = plan_mgs_updates( - log, - &later_collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &later_collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(later_updates.len(), 1); let next_update = later_updates.iter().next().expect("at least one update"); @@ -1194,51 +1234,46 @@ mod test { // Finally, test that when all components are in spec, then no updates // are configured. - let updated_collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) - .build(); - let later_updates = plan_mgs_updates( - log, - &updated_collection, - current_boards, - &later_updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let updated_collection = test_boards.collection_builder().build(); + let PlannedMgsUpdates { pending_updates: later_updates, .. } = + plan_mgs_updates( + log, + &updated_collection, + current_boards, + &later_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert!(later_updates.is_empty()); // Test that we don't try to update boards that aren't in // `current_boards`, even if they're in inventory and outdated. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .stage0_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); - let updates = plan_mgs_updates( - log, - &collection, - &BTreeSet::new(), - &PendingMgsUpdates::new(), - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + &BTreeSet::new(), + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert!(updates.is_empty()); - let updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &PendingMgsUpdates::new(), - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); // We verified most of the details above. Here we're just double // checking that the baseboard being missing is the only reason that no // update was generated. @@ -1265,25 +1300,23 @@ mod test { // Test that if the inactive slot contents have changed, then we'll get // a new update reflecting that. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .stage0_versions( ARTIFACT_VERSION_2, ExpectedVersion::Version(ARTIFACT_VERSION_1), ) .stage0_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); - let new_updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: new_updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_ne!(updates, new_updates); assert_eq!(new_updates.len(), 1); let new_update = @@ -1312,21 +1345,19 @@ mod test { // to something other than the new expected version, then we'll also get // a new update reflecting that. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .stage0_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1_5) .build(); - let new_updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: new_updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_ne!(updates, new_updates); assert_eq!(new_updates.len(), 1); let new_update = @@ -1367,7 +1398,16 @@ mod test { // Initial setup: sled 0 has active version 1 and inactive version 1.5. let collection = test_boards - .collection_builder( + .collection_builder() + .sp_versions( + ARTIFACT_VERSION_2, + ExpectedVersion::Version(ARTIFACT_VERSION_1_5), + ) + .rot_versions( + ARTIFACT_VERSION_2, + ExpectedVersion::Version(ARTIFACT_VERSION_1_5), + ) + .stage0_versions( ARTIFACT_VERSION_2, ExpectedVersion::Version(ARTIFACT_VERSION_1_5), ) @@ -1385,15 +1425,16 @@ mod test { // they're both the same. let mut updates = None; for impossible_update_policy in ImpossibleUpdatePolicy::iter() { - let planned_updates = plan_mgs_updates( - log, - &collection, - current_boards, - &initial_updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: planned_updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert_eq!(planned_updates.len(), 1); let first_update = planned_updates.iter().next().expect("at least one update"); @@ -1423,10 +1464,7 @@ mod test { // update in progress; we've partially written the contents, so there is // no caboose to read. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .rot_versions( ARTIFACT_VERSION_2, ExpectedVersion::Version(ARTIFACT_VERSION_1_5), @@ -1437,28 +1475,30 @@ mod test { // If we plan with `ImpossibleUpdatePolicy::Keep`, we should _not_ // replace the update, even though its preconditions are no longer // valid. - let keep_updates = plan_mgs_updates( - log, - &collection, - current_boards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - ImpossibleUpdatePolicy::Keep, - ); + let PlannedMgsUpdates { pending_updates: keep_updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + ImpossibleUpdatePolicy::Keep, + ); assert_eq!(updates, keep_updates); // On the other hand, if we plan with // `ImpossibleUpdatePolicy::Reevaluate`, we should replace the update. - let reeval_updates = plan_mgs_updates( - log, - &collection, - current_boards, - &initial_updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - ImpossibleUpdatePolicy::Keep, - ); + let PlannedMgsUpdates { pending_updates: reeval_updates, .. } = + plan_mgs_updates( + log, + &collection, + current_boards, + &initial_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + ImpossibleUpdatePolicy::Keep, + ); assert_eq!(reeval_updates.len(), 1); let first_update = reeval_updates.iter().next().expect("at least one update"); @@ -1503,10 +1543,22 @@ mod test { // Start with collections that record everything at version 1. We'll add // exceptions as we step through updates below. - let mut builder = test_boards.collection_builder( - ARTIFACT_VERSION_1, - ExpectedVersion::NoValidVersion, - ); + let mut builder = test_boards + .collection_builder() + .sp_versions(ARTIFACT_VERSION_1, ExpectedVersion::NoValidVersion) + .rot_versions(ARTIFACT_VERSION_1, ExpectedVersion::NoValidVersion) + .host_phase_1_artifacts( + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ) + .host_phase_2_artifacts( + ARTIFACT_HASH_HOST_PHASE_2_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .stage0_versions( + ARTIFACT_VERSION_1, + ExpectedVersion::NoValidVersion, + ); for _ in 0..expected_updates.len() { let collection = builder.clone().build(); @@ -1516,7 +1568,10 @@ mod test { // Run the planner and verify that we got one of our expected // updates. - let new_updates = plan_mgs_updates( + let PlannedMgsUpdates { + pending_updates: new_updates, + mut pending_host_phase_2_changes, + } = plan_mgs_updates( log, &collection, current_boards, @@ -1528,7 +1583,9 @@ mod test { assert_eq!(new_updates.len(), 1); let update = new_updates.iter().next().expect("at least one update"); - expected_updates.verify_one(update); + expected_updates + .verify_one(update, &mut pending_host_phase_2_changes); + assert!(pending_host_phase_2_changes.is_empty()); // Update our builder with an addition exception for the update we // just planned for the next iteration. @@ -1568,7 +1625,13 @@ mod test { ); } PendingMgsUpdateDetails::HostPhase1(_) => { - unimplemented!() + assert_eq!(sp_type, SpType::Sled); + assert!(!builder.has_host_active_exception(sp_slot)); + builder = builder.host_active_exception( + sp_slot, + ARTIFACT_HASH_HOST_PHASE_1, + ARTIFACT_HASH_HOST_PHASE_2, + ); } } @@ -1578,15 +1641,16 @@ mod test { // Take one more lap. It should reflect zero updates. let collection = builder.build(); - let last_updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &latest_updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: last_updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &latest_updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert!(last_updates.is_empty()); logctx.cleanup_successful(); @@ -1616,12 +1680,26 @@ mod test { // currently enforced by the fact that the real planner passes 1 instead // of usize::MAX, but we should probably fix this. let collection = test_boards - .collection_builder( + .collection_builder() + .sp_versions(ARTIFACT_VERSION_1, ExpectedVersion::NoValidVersion) + .rot_versions(ARTIFACT_VERSION_1, ExpectedVersion::NoValidVersion) + .stage0_versions( ARTIFACT_VERSION_1, ExpectedVersion::NoValidVersion, ) + .host_phase_1_artifacts( + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ) + .host_phase_2_artifacts( + ARTIFACT_HASH_HOST_PHASE_2_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) .build(); - let all_updates = plan_mgs_updates( + let PlannedMgsUpdates { + pending_updates: all_updates, + mut pending_host_phase_2_changes, + } = plan_mgs_updates( log, &collection, &collection.baseboards, @@ -1641,23 +1719,31 @@ mod test { panic!("unexpected update type: {update:?}") } } - expected_updates.verify_one(update); + expected_updates + .verify_one(update, &mut pending_host_phase_2_changes); } + assert!(pending_host_phase_2_changes.is_empty()); // Update the whole system at once again, but note the RoT bootloaders // have all been updated already; this should attempt to update all of // the RoTs. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_1, - ExpectedVersion::NoValidVersion, + .collection_builder() + .sp_versions(ARTIFACT_VERSION_1, ExpectedVersion::NoValidVersion) + .rot_versions(ARTIFACT_VERSION_1, ExpectedVersion::NoValidVersion) + .host_phase_1_artifacts( + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_1_V1, ) - .stage0_versions( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, + .host_phase_2_artifacts( + ARTIFACT_HASH_HOST_PHASE_2_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, ) .build(); - let all_updates = plan_mgs_updates( + let PlannedMgsUpdates { + pending_updates: all_updates, + mut pending_host_phase_2_changes, + } = plan_mgs_updates( log, &collection, &collection.baseboards, @@ -1676,24 +1762,30 @@ mod test { panic!("unexpected update type: {update:?}") } } - expected_updates.verify_one(update); + expected_updates + .verify_one(update, &mut pending_host_phase_2_changes); } + assert!(pending_host_phase_2_changes.is_empty()); // Update the whole system at once again, but note the RoT bootloaders // and RoTs have all been updated already; this should attempt to update // all of the SPs. let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_1, - ExpectedVersion::NoValidVersion, + .collection_builder() + .sp_versions(ARTIFACT_VERSION_1, ExpectedVersion::NoValidVersion) + .host_phase_1_artifacts( + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_1_V1, ) - .rot_versions(ARTIFACT_VERSION_2, ExpectedVersion::NoValidVersion) - .stage0_versions( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, + .host_phase_2_artifacts( + ARTIFACT_HASH_HOST_PHASE_2_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, ) .build(); - let all_updates = plan_mgs_updates( + let PlannedMgsUpdates { + pending_updates: all_updates, + mut pending_host_phase_2_changes, + } = plan_mgs_updates( log, &collection, &collection.baseboards, @@ -1712,21 +1804,62 @@ mod test { panic!("unexpected update type: {update:?}") } } - expected_updates.verify_one(update); + expected_updates + .verify_one(update, &mut pending_host_phase_2_changes); + } + assert!(pending_host_phase_2_changes.is_empty()); + + // Update the whole system at once again, but note the RoT bootloaders, + // RoTs, and SPs have all been updated already; this should attempt to + // update all the host OSs. + let collection = test_boards + .collection_builder() + .host_phase_1_artifacts( + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ) + .host_phase_2_artifacts( + ARTIFACT_HASH_HOST_PHASE_2_V1, + ARTIFACT_HASH_HOST_PHASE_2_V1, + ) + .build(); + let PlannedMgsUpdates { + pending_updates: all_updates, + mut pending_host_phase_2_changes, + } = plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + usize::MAX, + impossible_update_policy, + ); + for update in &all_updates { + // Confirm all our updates are to SPs. + match &update.details { + PendingMgsUpdateDetails::HostPhase1(..) => (), + PendingMgsUpdateDetails::Sp { .. } + | PendingMgsUpdateDetails::Rot { .. } + | PendingMgsUpdateDetails::RotBootloader { .. } => { + panic!("unexpected update type: {update:?}") + } + } + expected_updates + .verify_one(update, &mut pending_host_phase_2_changes); } + assert!(pending_host_phase_2_changes.is_empty()); // We should have performed all expected updates. assert!(expected_updates.is_empty()); // Now, notice when they've all been updated, even if the limit is only // one. - let collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) - .build(); - let all_updates_done = plan_mgs_updates( + let collection = test_boards.collection_builder().build(); + let PlannedMgsUpdates { + pending_updates: all_updates_done, + pending_host_phase_2_changes, + } = plan_mgs_updates( log, &collection, &collection.baseboards, @@ -1736,6 +1869,7 @@ mod test { impossible_update_policy, ); assert!(all_updates_done.is_empty()); + assert!(pending_host_phase_2_changes.is_empty()); logctx.cleanup_successful(); } @@ -1754,25 +1888,23 @@ mod test { let log = &logctx.log; let repo = test_boards.tuf_repo(); let mut collection = test_boards - .collection_builder( - ARTIFACT_VERSION_2, - ExpectedVersion::NoValidVersion, - ) + .collection_builder() .sp_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .rot_active_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .stage0_version_exception(SpType::Sled, 0, ARTIFACT_VERSION_1) .build(); let nmax_updates = 1; let impossible_update_policy = ImpossibleUpdatePolicy::Reevaluate; - let updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &PendingMgsUpdates::new(), - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &PendingMgsUpdates::new(), + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert!(!updates.is_empty()); let update = updates.into_iter().next().expect("at least one update"); @@ -1787,19 +1919,20 @@ mod test { // Plan again. The configured update should be updated to reflect the // new location. - let new_updates = plan_mgs_updates( - log, - &collection, - &collection.baseboards, - &updates, - &TargetReleaseDescription::TufRepo(repo.clone()), - nmax_updates, - impossible_update_policy, - ); + let PlannedMgsUpdates { pending_updates: new_updates, .. } = + plan_mgs_updates( + log, + &collection, + &collection.baseboards, + &updates, + &TargetReleaseDescription::TufRepo(repo.clone()), + nmax_updates, + impossible_update_policy, + ); assert!(!new_updates.is_empty()); let new_update = new_updates.into_iter().next().expect("at least one update"); - assert_ne!(new_update.slot_id, update.slot_id); + assert_eq!(new_update.slot_id, 9); assert_eq!(new_update.baseboard_id, update.baseboard_id); assert_eq!(new_update.sp_type, update.sp_type); assert_eq!(new_update.artifact_hash, update.artifact_hash); diff --git a/nexus/reconfigurator/planning/src/mgs_updates/test_helpers.rs b/nexus/reconfigurator/planning/src/mgs_updates/test_helpers.rs index 8b01bb40f1..b135ef7f6c 100644 --- a/nexus/reconfigurator/planning/src/mgs_updates/test_helpers.rs +++ b/nexus/reconfigurator/planning/src/mgs_updates/test_helpers.rs @@ -14,25 +14,46 @@ use gateway_client::types::SpIdentifier; use gateway_client::types::SpState; use gateway_client::types::SpType; use gateway_types::rot::RotSlot; +use id_map::IdMap; use iddqd::IdOrdItem; use iddqd::IdOrdMap; +use nexus_sled_agent_shared::inventory::Baseboard; +use nexus_sled_agent_shared::inventory::BootImageHeader; +use nexus_sled_agent_shared::inventory::BootPartitionContents; +use nexus_sled_agent_shared::inventory::BootPartitionDetails; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventory; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryStatus; +use nexus_sled_agent_shared::inventory::HostPhase2DesiredSlots; +use nexus_sled_agent_shared::inventory::Inventory; +use nexus_sled_agent_shared::inventory::OmicronSledConfig; +use nexus_sled_agent_shared::inventory::SledCpuFamily; +use nexus_sled_agent_shared::inventory::SledRole; +use nexus_sled_agent_shared::inventory::ZoneImageResolverInventory; +use nexus_types::deployment::BlueprintArtifactVersion; +use nexus_types::deployment::BlueprintHostPhase2DesiredContents; use nexus_types::deployment::ExpectedVersion; use nexus_types::deployment::PendingMgsUpdate; use nexus_types::deployment::PendingMgsUpdateDetails; +use nexus_types::deployment::PendingMgsUpdateHostPhase1Details; use nexus_types::deployment::PendingMgsUpdateRotBootloaderDetails; use nexus_types::deployment::PendingMgsUpdateRotDetails; use nexus_types::deployment::PendingMgsUpdateSpDetails; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; +use omicron_common::api::external::Generation; use omicron_common::api::external::TufArtifactMeta; use omicron_common::api::external::TufRepoDescription; use omicron_common::api::external::TufRepoMeta; +use omicron_common::disk::M2Slot; use omicron_common::update::ArtifactId; +use omicron_uuid_kinds::SledUuid; use tufaceous_artifact::ArtifactHash; use tufaceous_artifact::ArtifactKind; use tufaceous_artifact::ArtifactVersion; use tufaceous_artifact::KnownArtifactKind; +use crate::mgs_updates::PendingHostPhase2Changes; + /// Version that will be used for all artifacts in the TUF repo pub(super) const ARTIFACT_VERSION_2: ArtifactVersion = ArtifactVersion::new_const("2.0.0"); @@ -84,11 +105,32 @@ pub(super) const ARTIFACT_HASH_ROT_BOOTLOADER_PSC: ArtifactHash = /// Hash of fake artifact for fake switch RoT bootloader pub(super) const ARTIFACT_HASH_ROT_BOOTLOADER_SWITCH: ArtifactHash = ArtifactHash([28; 32]); +/// Hash of fake artifact for host OS phase 1 +pub(super) const ARTIFACT_HASH_HOST_PHASE_1: ArtifactHash = + ArtifactHash([29; 32]); +/// Hash of fake artifact for host OS phase 1 (for a fake version 1.5) +pub(super) const ARTIFACT_HASH_HOST_PHASE_1_V1_5: ArtifactHash = + ArtifactHash([30; 32]); +/// Hash of fake artifact for host OS phase 2 +pub(super) const ARTIFACT_HASH_HOST_PHASE_2: ArtifactHash = + ArtifactHash([31; 32]); + +/// Hash of a fake "version 1" artifact for host OS phase 1 +/// +/// This can be used to produce an inventory collection for a host slot that +/// needs an update. +pub(super) const ARTIFACT_HASH_HOST_PHASE_1_V1: ArtifactHash = + ArtifactHash([32; 32]); +/// Hash of a fake "version 1" artifact for host OS phase 1 +/// +/// This can be used to produce an inventory collection for a host slot that +/// needs an update. +pub(super) const ARTIFACT_HASH_HOST_PHASE_2_V1: ArtifactHash = + ArtifactHash([33; 32]); // unused artifact hashes contained in our fake TUF repo const ARTIFACT_HASH_CONTROL_PLANE: ArtifactHash = ArtifactHash([33; 32]); const ARTIFACT_HASH_NEXUS: ArtifactHash = ArtifactHash([34; 32]); -const ARTIFACT_HASH_HOST_OS: ArtifactHash = ArtifactHash([35; 32]); /// Hash of fake RoT signing keys const ROT_SIGN_GIMLET: &str = @@ -123,6 +165,10 @@ impl From<&'_ PendingMgsUpdateDetails> for MgsUpdateComponent { #[derive(Debug)] pub(super) struct TestBoard { pub(super) id: SpIdentifier, + /// `sled_id` is only meaningful for test boards of the `SpType::Sled` + /// variety, but it's simpler to just provide one for them all. If this + /// weren't test code, we should have something more correct. + pub(super) sled_id: SledUuid, pub(super) serial: &'static str, pub(super) sp_board: &'static str, pub(super) rot_board: &'static str, @@ -198,6 +244,7 @@ impl TestBoards { boards .insert_unique(TestBoard { id: SpIdentifier { type_, slot }, + sled_id: SledUuid::new_v4(), serial, sp_board, rot_board, @@ -209,21 +256,38 @@ impl TestBoards { Self { boards, test_name } } + /// Get the sled ID of a particular sled by SP slot number. + pub fn sled_id(&self, sp_slot: u16) -> Option { + self.boards.iter().find_map(|b| { + (b.id.type_ == SpType::Sled && b.id.slot == sp_slot) + .then_some(b.sled_id) + }) + } + /// Get a helper to build an inventory collection reflecting specific /// versions of our test boards. /// - /// The provided versions will be the default for the active and inactive - /// SP, RoT, and RoT bootloader slots. They can be overridden by methods on - /// the returned builder. - pub fn collection_builder<'a>( - &'a self, - default_active_version: ArtifactVersion, - default_inactive_version: ExpectedVersion, - ) -> TestBoardCollectionBuilder<'a> { + /// By default, the active version for all reported SPs, RoTs, and RoT + /// bootloaders will be `ARTIFACT_VERSION_2` and the inactive version will + /// be `ExpectedVersion::NoValidVersion`. All active host phase 1 slots will + /// be reported as containing `ARTIFACT_HASH_HOST_PHASE_1` and all inactive + /// host phase 1 slots will be reported as containing + /// `ARTIFACT_HASH_HOST_PHASE_1_V1`. + /// + /// These values are the defaults to produce a collection where all + /// updateable items do not need updates; these match the versions and + /// hashes produced by our `tuf_repo()` method. They can be overridden by + /// methods on the returned builder before the collection is created to + /// induce particular kinds of needed updates. + pub fn collection_builder<'a>(&'a self) -> TestBoardCollectionBuilder<'a> { TestBoardCollectionBuilder::new( self, - default_active_version, - default_inactive_version, + ARTIFACT_VERSION_2, + ExpectedVersion::NoValidVersion, + ARTIFACT_HASH_HOST_PHASE_1, + ARTIFACT_HASH_HOST_PHASE_1_V1, + ARTIFACT_HASH_HOST_PHASE_2, + ARTIFACT_HASH_HOST_PHASE_2_V1, ) } @@ -267,9 +331,15 @@ impl TestBoards { None, ), make_artifact( - "host-os", - KnownArtifactKind::Host.into(), - ARTIFACT_HASH_HOST_OS, + "host-os-phase-1", + ArtifactKind::HOST_PHASE_1, + ARTIFACT_HASH_HOST_PHASE_1, + None, + ), + make_artifact( + "host-os-phase-2", + ArtifactKind::HOST_PHASE_2, + ARTIFACT_HASH_HOST_PHASE_2, None, ), make_artifact( @@ -392,6 +462,7 @@ impl TestBoards { /// these test boards. pub fn expected_updates(&self) -> ExpectedUpdates { let mut updates = IdOrdMap::new(); + let mut phase2 = IdOrdMap::new(); for board in &self.boards { updates @@ -433,9 +504,35 @@ impl TestBoards { ), }) .expect("boards are unique"); + + if board.id.type_ == SpType::Sled { + updates + .insert_unique(ExpectedUpdate { + sp_type: board.id.type_, + sp_slot: board.id.slot, + component: MgsUpdateComponent::HostOs, + expected_serial: board.serial, + expected_artifact: ARTIFACT_HASH_HOST_PHASE_1, + }) + .expect("boards are unique"); + phase2 + .insert_unique(ExpectedHostPhase2Change { + sp_slot: board.id.slot, + sled_id: board.sled_id, + slot: M2Slot::B, + contents: + BlueprintHostPhase2DesiredContents::Artifact { + version: BlueprintArtifactVersion::Available { + version: ARTIFACT_VERSION_2, + }, + hash: ARTIFACT_HASH_HOST_PHASE_2, + }, + }) + .expect("boards are unique"); + } } - ExpectedUpdates { updates } + ExpectedUpdates { updates, phase2 } } } @@ -458,9 +555,28 @@ impl IdOrdItem for ExpectedUpdate { iddqd::id_upcast!(); } +#[derive(Debug)] +struct ExpectedHostPhase2Change { + sp_slot: u16, + sled_id: SledUuid, + slot: M2Slot, + contents: BlueprintHostPhase2DesiredContents, +} + +impl IdOrdItem for ExpectedHostPhase2Change { + type Key<'a> = u16; + + fn key(&self) -> Self::Key<'_> { + self.sp_slot + } + + iddqd::id_upcast!(); +} + /// Test helper containing all the expected updates from a `TestBoards`. pub(super) struct ExpectedUpdates { updates: IdOrdMap, + phase2: IdOrdMap, } impl ExpectedUpdates { @@ -477,7 +593,18 @@ impl ExpectedUpdates { /// /// Callers can confirm that all updates have been verified by calling this /// method for each expected update and then checking `self.is_empty()`. - pub fn verify_one(&mut self, update: &PendingMgsUpdate) { + /// + /// If `update` describes a host phase 1 update, we'll also confirm that + /// `pending_host_phase_2_changes` contains the expected corresponding + /// change, and _remove_ that change from `pending_host_phase_2_changes`. + /// This allows the calling tests to assert that + /// `pending_host_phase_2_changes` is empty once all updates have been + /// verified. + pub fn verify_one( + &mut self, + update: &PendingMgsUpdate, + pending_host_phase_2_changes: &mut PendingHostPhase2Changes, + ) { let sp_type = update.sp_type; let sp_slot = update.slot_id; let component = MgsUpdateComponent::from(&update.details); @@ -507,7 +634,56 @@ impl ExpectedUpdates { expected_stage0_next_version, }, ) => (expected_stage0_version, expected_stage0_next_version), - PendingMgsUpdateDetails::HostPhase1(_) => unimplemented!(), + PendingMgsUpdateDetails::HostPhase1( + PendingMgsUpdateHostPhase1Details { + expected_active_phase_1_slot, + expected_boot_disk, + expected_active_phase_1_hash, + expected_active_phase_2_hash, + expected_inactive_phase_1_hash, + expected_inactive_phase_2_hash, + sled_agent_address: _, + }, + ) => { + // Host OS updates aren't in terms of versions, so we can't + // return the expected versions in this match arm. Just do + // our own checks then return directly. + assert_eq!(*expected_active_phase_1_slot, M2Slot::A); + assert_eq!(*expected_boot_disk, M2Slot::A); + assert_eq!( + *expected_active_phase_1_hash, + ARTIFACT_HASH_HOST_PHASE_1_V1 + ); + assert_eq!( + *expected_inactive_phase_1_hash, + ARTIFACT_HASH_HOST_PHASE_1_V1 + ); + assert_eq!( + *expected_active_phase_2_hash, + ARTIFACT_HASH_HOST_PHASE_2_V1 + ); + // The inactive phase 2 hash should match the _new_ artifact + // in the TUF repo; our planner sets this precondition and + // execution waits for sled-agent to fulfill it. + assert_eq!( + *expected_inactive_phase_2_hash, + ARTIFACT_HASH_HOST_PHASE_2 + ); + + // We should also have a corresponding phase 2 change for this + // phase 1 update. + let expected_phase2 = self + .phase2 + .remove(&sp_slot) + .expect("missing phase2 update"); + let (actual_phase2_slot, actual_phase2_contents) = + pending_host_phase_2_changes + .remove(&expected_phase2.sled_id) + .expect("missing expected pending phase 2 change"); + assert_eq!(expected_phase2.slot, actual_phase2_slot); + assert_eq!(expected_phase2.contents, actual_phase2_contents); + return; + } }; assert_eq!(*expected_active_version, ARTIFACT_VERSION_1); assert_eq!(*expected_inactive_version, ExpectedVersion::NoValidVersion); @@ -537,10 +713,19 @@ pub(super) struct TestBoardCollectionBuilder<'a> { stage0_version: ArtifactVersion, stage0_next_version: ExpectedVersion, + // default artifacts (host OS updates don't work in terms of versions) + host_phase_1_active_artifact: ArtifactHash, + host_phase_1_inactive_artifact: ArtifactHash, + host_phase_2_active_artifact: ArtifactHash, + host_phase_2_inactive_artifact: ArtifactHash, + // fields that callers _may_ influence before calling `build()` sp_active_version_exceptions: BTreeMap, rot_active_version_exceptions: BTreeMap, stage0_version_exceptions: BTreeMap, + + // host exceptions are keyed only by slot; they only apply to sleds. + host_exceptions: BTreeMap, } impl<'a> TestBoardCollectionBuilder<'a> { @@ -548,6 +733,10 @@ impl<'a> TestBoardCollectionBuilder<'a> { boards: &'a TestBoards, default_active_version: ArtifactVersion, default_inactive_version: ExpectedVersion, + default_active_host_phase_1: ArtifactHash, + default_inactive_host_phase_1: ArtifactHash, + default_active_host_phase_2: ArtifactHash, + default_inactive_host_phase_2: ArtifactHash, ) -> Self { Self { boards, @@ -557,9 +746,14 @@ impl<'a> TestBoardCollectionBuilder<'a> { rot_inactive_version: default_inactive_version.clone(), stage0_version: default_active_version, stage0_next_version: default_inactive_version, + host_phase_1_active_artifact: default_active_host_phase_1, + host_phase_1_inactive_artifact: default_inactive_host_phase_1, + host_phase_2_active_artifact: default_active_host_phase_2, + host_phase_2_inactive_artifact: default_inactive_host_phase_2, sp_active_version_exceptions: BTreeMap::new(), rot_active_version_exceptions: BTreeMap::new(), stage0_version_exceptions: BTreeMap::new(), + host_exceptions: BTreeMap::new(), } } @@ -652,6 +846,41 @@ impl<'a> TestBoardCollectionBuilder<'a> { .contains_key(&SpIdentifier { type_, slot }) } + pub fn host_phase_1_artifacts( + mut self, + active: ArtifactHash, + inactive: ArtifactHash, + ) -> Self { + self.host_phase_1_active_artifact = active; + self.host_phase_1_inactive_artifact = inactive; + self + } + + pub fn host_phase_2_artifacts( + mut self, + active: ArtifactHash, + inactive: ArtifactHash, + ) -> Self { + self.host_phase_2_active_artifact = active; + self.host_phase_2_inactive_artifact = inactive; + self + } + + pub fn host_active_exception( + mut self, + sp_slot: u16, + phase_1: ArtifactHash, + phase_2: ArtifactHash, + ) -> Self { + self.host_exceptions + .insert(sp_slot, HostOsException { phase_1, phase_2 }); + self + } + + pub fn has_host_active_exception(&self, slot: u16) -> bool { + self.host_exceptions.contains_key(&slot) + } + pub fn build(self) -> Collection { let mut builder = nexus_inventory::CollectionBuilder::new(self.boards.test_name); @@ -682,6 +911,7 @@ impl<'a> TestBoardCollectionBuilder<'a> { for board in &self.boards.boards { let &TestBoard { id: sp_id, + sled_id, serial, sp_board: caboose_sp_board, rot_board: caboose_rot_board, @@ -695,7 +925,12 @@ impl<'a> TestBoardCollectionBuilder<'a> { }; let baseboard_id = builder - .found_sp_state("test", sp_id.type_, sp_id.slot, sp_state) + .found_sp_state( + "test", + sp_id.type_, + sp_id.slot, + sp_state.clone(), + ) .unwrap(); let sp_active_version = self .sp_active_version_exceptions @@ -817,12 +1052,132 @@ impl<'a> TestBoardCollectionBuilder<'a> { ) .unwrap(); } + + if board.id.type_ == SpType::Sled { + let phase_1_active_artifact = self + .host_exceptions + .get(&board.id.slot) + .map(|ex| ex.phase_1) + .unwrap_or(self.host_phase_1_active_artifact); + let phase_2_active_artifact = self + .host_exceptions + .get(&board.id.slot) + .map(|ex| ex.phase_2) + .unwrap_or(self.host_phase_2_active_artifact); + builder + .found_host_phase_1_active_slot( + &baseboard_id, + "test", + M2Slot::A, + ) + .unwrap(); + builder + .found_host_phase_1_flash_hash( + &baseboard_id, + M2Slot::A, + "test", + phase_1_active_artifact, + ) + .unwrap(); + builder + .found_host_phase_1_flash_hash( + &baseboard_id, + M2Slot::B, + "test", + self.host_phase_1_inactive_artifact, + ) + .unwrap(); + let fake_sled_config = OmicronSledConfig { + generation: Generation::new(), + disks: IdMap::new(), + datasets: IdMap::new(), + zones: IdMap::new(), + remove_mupdate_override: None, + host_phase_2: HostPhase2DesiredSlots::current_contents(), + }; + + // The only sled-agent fields that matter for the purposes of + // update testing are: + // + // * `sled_id` (used to validate expected phase 2 changes) + // * `baseboard` (must match this fake SP's) + // * `last_reconciliation` (must contain a valid boot disk and + // active slot phase 2 hash) + let fake_phase_2_header = BootImageHeader { + flags: 0, + data_size: 0, + image_size: 0, + target_size: 0, + sha256: [0; 32], + image_name: "fake image for planner tests".to_string(), + }; + let boot_partitions = BootPartitionContents { + boot_disk: Ok(M2Slot::A), + slot_a: Ok(BootPartitionDetails { + header: fake_phase_2_header.clone(), + artifact_hash: phase_2_active_artifact, + artifact_size: 0, + }), + slot_b: Ok(BootPartitionDetails { + header: fake_phase_2_header.clone(), + artifact_hash: self.host_phase_2_inactive_artifact, + artifact_size: 0, + }), + }; + builder + .found_sled_inventory( + "test", + Inventory { + // fields we care about + sled_id, + baseboard: Baseboard::Gimlet { + identifier: sp_state.serial_number.clone(), + model: sp_state.model.clone(), + revision: 0, + }, + last_reconciliation: Some( + ConfigReconcilerInventory { + last_reconciled_config: fake_sled_config + .clone(), + external_disks: BTreeMap::new(), + datasets: BTreeMap::new(), + orphaned_datasets: IdOrdMap::new(), + zones: BTreeMap::new(), + boot_partitions, + remove_mupdate_override: None, + }, + ), + // fields we never inspect; filled in with dummy data + sled_agent_address: "[::1]:0".parse().unwrap(), + sled_role: SledRole::Gimlet, + usable_hardware_threads: 0, + usable_physical_ram: 0.into(), + cpu_family: SledCpuFamily::Unknown, + reservoir_size: 0.into(), + disks: vec![], + zpools: vec![], + datasets: vec![], + ledgered_sled_config: Some(fake_sled_config), + reconciler_status: + ConfigReconcilerInventoryStatus::NotYetRun, + zone_image_resolver: + ZoneImageResolverInventory::new_fake(), + }, + ) + .unwrap(); + } } builder.build() } } +#[derive(Debug, Clone, Copy)] +struct HostOsException { + phase_1: ArtifactHash, + phase_2: ArtifactHash, +} + fn test_artifact_for_board(board: &str) -> ArtifactHash { match board { "gimlet-d" => ARTIFACT_HASH_SP_GIMLET_D, diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 4a38be0ce0..d9633258d0 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -15,6 +15,7 @@ use crate::blueprint_builder::Operation; use crate::blueprint_editor::DisksEditError; use crate::blueprint_editor::SledEditError; use crate::mgs_updates::ImpossibleUpdatePolicy; +use crate::mgs_updates::PlannedMgsUpdates; use crate::mgs_updates::plan_mgs_updates; use crate::planner::image_source::NoopConvertZoneStatus; use crate::planner::omicron_zone_placement::PlacementError; @@ -187,7 +188,7 @@ impl<'a> Planner<'a> { // Only plan MGS-based updates updates if there are no outstanding // MUPdate overrides. let mgs_updates = if plan_mupdate_override_res.is_empty() { - self.do_plan_mgs_updates() + self.do_plan_mgs_updates()? } else { PlanningMgsUpdatesStepReport::new(PendingMgsUpdates::new()) }; @@ -1117,7 +1118,9 @@ impl<'a> Planner<'a> { /// Update at most one MGS-managed device (SP, RoT, etc.), if any are out of /// date. - fn do_plan_mgs_updates(&mut self) -> PlanningMgsUpdatesStepReport { + fn do_plan_mgs_updates( + &mut self, + ) -> Result { // Determine which baseboards we will consider updating. // // Sleds may be present but not adopted as part of the control plane. @@ -1162,27 +1165,30 @@ impl<'a> Planner<'a> { } else { ImpossibleUpdatePolicy::Reevaluate }; - let next = plan_mgs_updates( - &self.log, - &self.inventory, - &included_baseboards, - current_updates, - current_artifacts, - NUM_CONCURRENT_MGS_UPDATES, - impossible_update_policy, - ); - if next != *current_updates { + let PlannedMgsUpdates { pending_updates, pending_host_phase_2_changes } = + plan_mgs_updates( + &self.log, + &self.inventory, + &included_baseboards, + current_updates, + current_artifacts, + NUM_CONCURRENT_MGS_UPDATES, + impossible_update_policy, + ); + if pending_updates != *current_updates { // This will only add comments if our set of updates changed _and_ // we have at least one update. If we went from "some updates" to // "no updates", that's not really comment-worthy; presumably we'll // do something else comment-worthy in a subsequent step. - for update in next.iter() { + for update in pending_updates.iter() { self.blueprint.comment(update.description()); } } + self.blueprint + .apply_pending_host_phase_2_changes(pending_host_phase_2_changes)?; - self.blueprint.pending_mgs_updates_replace_all(next.clone()); - PlanningMgsUpdatesStepReport::new(next) + self.blueprint.pending_mgs_updates_replace_all(pending_updates.clone()); + Ok(PlanningMgsUpdatesStepReport::new(pending_updates)) } /// Update at most one existing zone to use a new image source. diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index 00f007c838..c26801e5a3 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -449,10 +449,7 @@ impl SystemDescription { sled_id: SledUuid, sled_config: OmicronSledConfig, ) -> anyhow::Result<&mut Self> { - let sled = self.sleds.get_mut(&sled_id).with_context(|| { - format!("attempted to access sled {} not found in system", sled_id) - })?; - let sled = Arc::make_mut(sled); + let sled = self.get_sled_mut(sled_id)?; sled.inventory_sled_agent.ledgered_sled_config = Some(sled_config.clone()); @@ -475,10 +472,8 @@ impl SystemDescription { sled_id: SledUuid, policy: SledPolicy, ) -> anyhow::Result<&mut Self> { - let sled = self.sleds.get_mut(&sled_id).with_context(|| { - format!("attempted to access sled {} not found in system", sled_id) - })?; - Arc::make_mut(sled).policy = policy; + let sled = self.get_sled_mut(sled_id)?; + sled.policy = policy; Ok(self) } @@ -490,11 +485,9 @@ impl SystemDescription { sled_id: SledUuid, visibility: SledInventoryVisibility, ) -> anyhow::Result { - let sled = self.sleds.get_mut(&sled_id).with_context(|| { - format!("attempted to access sled {} not found in system", sled_id) - })?; - let prev = Arc::make_mut(sled).inventory_visibility; - Arc::make_mut(sled).inventory_visibility = visibility; + let sled = self.get_sled_mut(sled_id)?; + let prev = sled.inventory_visibility; + sled.inventory_visibility = visibility; Ok(prev) } @@ -507,10 +500,7 @@ impl SystemDescription { stage0_version: Option, stage0_next_version: Option, ) -> anyhow::Result<&mut Self> { - let sled = self.sleds.get_mut(&sled_id).with_context(|| { - format!("attempted to access sled {} not found in system", sled_id) - })?; - let sled = Arc::make_mut(sled); + let sled = self.get_sled_mut(sled_id)?; sled.set_rot_bootloader_versions(stage0_version, stage0_next_version); Ok(self) } @@ -544,24 +534,48 @@ impl SystemDescription { active_version: Option, inactive_version: Option, ) -> anyhow::Result<&mut Self> { - let sled = self.sleds.get_mut(&sled_id).with_context(|| { - format!("attempted to access sled {} not found in system", sled_id) - })?; - let sled = Arc::make_mut(sled); + let sled = self.get_sled_mut(sled_id)?; sled.set_sp_versions(active_version, inactive_version); Ok(self) } + /// Update the host OS phase 1 artifacts reported for a sled. + /// + /// Where `None` is provided, no changes are made. + pub fn sled_update_host_phase_1_artifacts( + &mut self, + sled_id: SledUuid, + active: Option, + slot_a: Option, + slot_b: Option, + ) -> anyhow::Result<&mut Self> { + let sled = self.get_sled_mut(sled_id)?; + sled.set_host_phase_1_artifacts(active, slot_a, slot_b); + Ok(self) + } + + /// Update the host OS phase 2 artifacts reported for a sled. + /// + /// Where `None` is provided, no changes are made. + pub fn sled_update_host_phase_2_artifacts( + &mut self, + sled_id: SledUuid, + boot_disk: Option, + slot_a: Option, + slot_b: Option, + ) -> anyhow::Result<&mut Self> { + let sled = self.get_sled_mut(sled_id)?; + sled.set_host_phase_2_artifacts(boot_disk, slot_a, slot_b); + Ok(self) + } + /// Set the zone manifest for a sled from a provided `TufRepoDescription`. pub fn sled_set_zone_manifest( &mut self, sled_id: SledUuid, boot_inventory: Result, ) -> anyhow::Result<&mut Self> { - let sled = self.sleds.get_mut(&sled_id).with_context(|| { - format!("attempted to access sled {} not found in system", sled_id) - })?; - let sled = Arc::make_mut(sled); + let sled = self.get_sled_mut(sled_id)?; sled.set_zone_manifest(boot_inventory); Ok(self) } @@ -715,10 +729,7 @@ impl SystemDescription { sled_id: SledUuid, mupdate_override: Option, ) -> anyhow::Result, String>> { - let sled = self.sleds.get_mut(&sled_id).with_context(|| { - format!("attempted to access sled {} not found in system", sled_id) - })?; - let sled = Arc::make_mut(sled); + let sled = self.get_sled_mut(sled_id)?; Ok(sled.set_mupdate_override(Ok(mupdate_override))) } @@ -730,10 +741,7 @@ impl SystemDescription { sled_id: SledUuid, message: String, ) -> anyhow::Result, String>> { - let sled = self.sleds.get_mut(&sled_id).with_context(|| { - format!("attempted to access sled {} not found in system", sled_id) - })?; - let sled = Arc::make_mut(sled); + let sled = self.get_sled_mut(sled_id)?; Ok(sled.set_mupdate_override(Err(message))) } @@ -1726,6 +1734,68 @@ impl Sled { } } + /// Update the reported host OS phase 1 artifacts + /// + /// If either field is `None`, that field is _unchanged_. + // Note that this means there's no way to _unset_ the version. + fn set_host_phase_1_artifacts( + &mut self, + active: Option, + slot_a: Option, + slot_b: Option, + ) { + if let Some(active) = active { + self.sp_host_phase_1_active_slot = Some(active); + } + + if let Some(slot_a) = slot_a { + self.sp_host_phase_1_hash_flash.insert(M2Slot::A, slot_a); + } + + if let Some(slot_b) = slot_b { + self.sp_host_phase_1_hash_flash.insert(M2Slot::B, slot_b); + } + } + + /// Update the reported host OS phase 2 artifacts + /// + /// If either field is `None`, that field is _unchanged_. + // Note that this means there's no way to _unset_ the version. + fn set_host_phase_2_artifacts( + &mut self, + boot_disk: Option, + slot_a: Option, + slot_b: Option, + ) { + let last_reconciliation = self + .inventory_sled_agent + .last_reconciliation + .as_mut() + .expect("simulated system populates last reconciliation"); + + if let Some(boot_disk) = boot_disk { + last_reconciliation.boot_partitions.boot_disk = Ok(boot_disk); + } + + if let Some(slot_a) = slot_a { + last_reconciliation + .boot_partitions + .slot_a + .as_mut() + .expect("simulated system populates OS slots") + .artifact_hash = slot_a; + } + + if let Some(slot_b) = slot_b { + last_reconciliation + .boot_partitions + .slot_b + .as_mut() + .expect("simulated system populates OS slots") + .artifact_hash = slot_b; + } + } + fn default_rot_bootloader_caboose(version: String) -> Caboose { let board = sp_sim::SIM_ROT_STAGE0_BOARD.to_string(); Caboose {