From 718ded78cf811b8500aa51deec5699fdb16c6e83 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 27 Aug 2025 15:35:13 -0400 Subject: [PATCH 01/38] TQ: Add support for reconfiguration (#8741) Builds upon #8682 This PR implements the ability to reconfigure the trust quorum after a commit. This includes the ability to fetch shares for the most recently committed configuration to recompute the rack secret and then include that in an encrypted form in the new configuration for key rotation purposes. The cluster proptest was enhanced to allow this, and it generates enough races - even without crashing and restarting nodes - that it forced the handling of `CommitAdvance` messages to be implemented. This implementation includes the ability to construct key shares for a new configuration when a node misses a prepare and commit for that configuration. This required adding a `KeyShareComputer` which collects key shares for the configuration returned in a `CommitAdvance` so that it can construct its own key share and commit the newly learned configuration. Importantly, constructing a key share and coordinating a reconfiguration are mutually exclusive, and so a new invariant was added to the cluster test. We also start keeping track of expunged nodes in the cluster test, although we don't yet inform them that they are expunged if they reach out to other nodes. There are a few places in the code where a runtime invariant is violated and an error message is logged. This always occurs on message receipt and we don't want to panic at runtime because of an errant message and take down the sled-agent. However, we'd like to be able to report these upstream. The first step here is to be able to report when these situations are hit and put the node in an `Alarm` state such that it is stuck until remedied via support. We should *never* see an Alarm in practice, but since the states are possible to reach, we should manage them appropriately. This will come in a follow up PR and be similar to what I implemented in #8062. --- trust-quorum/src/compute_key_share.rs | 149 ++++++++++ trust-quorum/src/coordinator_state.rs | 304 +++++++++++++++++--- trust-quorum/src/crypto.rs | 2 +- trust-quorum/src/lib.rs | 7 + trust-quorum/src/node.rs | 289 +++++++++++++++++-- trust-quorum/src/persistent_state.rs | 14 + trust-quorum/src/validators.rs | 9 +- trust-quorum/tests/cluster.rs | 397 ++++++++++++++++++++++---- 8 files changed, 1067 insertions(+), 104 deletions(-) create mode 100644 trust-quorum/src/compute_key_share.rs diff --git a/trust-quorum/src/compute_key_share.rs b/trust-quorum/src/compute_key_share.rs new file mode 100644 index 00000000000..2bee03abbea --- /dev/null +++ b/trust-quorum/src/compute_key_share.rs @@ -0,0 +1,149 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Mechanism to track and compute this node's key share for a configuration +//! +//! When a node learns of a committed configuration but does not have a key +//! share for that configuration it must collect a threshold of key shares from +//! other nodes so that it can compute its own key share. + +use crate::crypto::Sha3_256Digest; +use crate::{Configuration, Epoch, NodeHandlerCtx, PeerMsgKind, PlatformId}; +use gfss::gf256::Gf256; +use gfss::shamir::{self, Share}; +use slog::{Logger, error, o, warn}; +use std::collections::BTreeMap; + +/// In memory state that tracks retrieval of key shares in order to compute +/// this node's key share for a given configuration. +pub struct KeyShareComputer { + log: Logger, + + // A copy of the configuration stored in persistent state + config: Configuration, + + collected_shares: BTreeMap, +} + +impl KeyShareComputer { + pub fn new( + log: &Logger, + ctx: &mut impl NodeHandlerCtx, + config: Configuration, + ) -> KeyShareComputer { + let log = log.new(o!("component" => "tq-key-share-computer")); + + for id in config.members.keys() { + if ctx.connected().contains(id) { + ctx.send(id.clone(), PeerMsgKind::GetShare(config.epoch)); + } + } + + KeyShareComputer { log, config, collected_shares: BTreeMap::new() } + } + + pub fn config(&self) -> &Configuration { + &self.config + } + + pub fn on_connect( + &mut self, + ctx: &mut impl NodeHandlerCtx, + peer: PlatformId, + ) { + if !self.collected_shares.contains_key(&peer) { + ctx.send(peer, PeerMsgKind::GetShare(self.config.epoch)); + } + } + + /// We received a key share + /// + /// Return true if we have computed and saved our key share to the + /// persistent state, false otherwise. + pub fn handle_share( + &mut self, + ctx: &mut impl NodeHandlerCtx, + from: PlatformId, + epoch: Epoch, + share: Share, + ) -> bool { + // Are we trying to retrieve shares for `epoch`? + if epoch != self.config.epoch { + warn!( + self.log, + "Received Share from node with wrong epoch"; + "received_epoch" => %epoch, + "from" => %from + ); + return false; + } + + // Is the sender a member of the configuration `epoch`? + // Was the sender a member of the configuration at `old_epoch`? + let Some(expected_digest) = self.config.members.get(&from) else { + warn!( + self.log, + "Received Share from unexpected node"; + "epoch" => %epoch, + "from" => %from + ); + return false; + }; + + // Does the share hash match what we expect? + let mut digest = Sha3_256Digest::default(); + share.digest::(&mut digest.0); + if digest != *expected_digest { + error!( + self.log, + "Received share with invalid digest"; + "epoch" => %epoch, + "from" => %from + ); + } + + // A valid share was received. Is it new? + if self.collected_shares.insert(from, share).is_some() { + return false; + } + + // Do we have enough shares to computer our rack share? + if self.collected_shares.len() < self.config.threshold.0 as usize { + return false; + } + + // What index are we in the configuration? This is our "x-coordinate" + // for our key share calculation. We always start indexing from 1, since + // 0 is the rack secret. + let index = self + .config + .members + .keys() + .position(|id| id == ctx.platform_id()) + .expect("node exists"); + let x_coordinate = + Gf256::new(u8::try_from(index + 1).expect("index fits in u8")); + + let shares: Vec<_> = self.collected_shares.values().cloned().collect(); + + match shamir::compute_share(&shares, x_coordinate) { + Ok(our_share) => { + ctx.update_persistent_state(|ps| { + let inserted_share = + ps.shares.insert(epoch, our_share).is_none(); + let inserted_commit = ps.commits.insert(epoch); + inserted_share || inserted_commit + }); + true + } + Err(e) => { + // TODO: put the node into into an `Alarm` state similar to + // https://github.com/oxidecomputer/omicron/pull/8062 once we + // have alarms? + error!(self.log, "Failed to compute share: {}", e); + false + } + } + } +} diff --git a/trust-quorum/src/coordinator_state.rs b/trust-quorum/src/coordinator_state.rs index ee3ebb6229f..78e8c8b1254 100644 --- a/trust-quorum/src/coordinator_state.rs +++ b/trust-quorum/src/coordinator_state.rs @@ -5,12 +5,15 @@ //! State of a reconfiguration coordinator inside a [`crate::Node`] use crate::NodeHandlerCtx; -use crate::crypto::{LrtqShare, Sha3_256Digest, ShareDigestLrtq}; +use crate::crypto::{ + LrtqShare, PlaintextRackSecrets, Sha3_256Digest, ShareDigestLrtq, +}; use crate::validators::{ReconfigurationError, ValidatedReconfigureMsg}; -use crate::{Configuration, Epoch, PeerMsgKind, PlatformId}; +use crate::{Configuration, Epoch, PeerMsgKind, PlatformId, RackSecret}; use gfss::shamir::Share; -use slog::{Logger, o, warn}; +use slog::{Logger, error, info, o, warn}; use std::collections::{BTreeMap, BTreeSet}; +use std::mem; /// The state of a reconfiguration coordinator. /// @@ -68,9 +71,16 @@ impl CoordinatorState { } let op = CoordinatorOperation::Prepare { prepares, - prepare_acks: BTreeSet::new(), + // Always include ourself + prepare_acks: BTreeSet::from([msg.coordinator_id().clone()]), }; + info!( + log, + "Starting coordination on uninitialized node"; + "epoch" => %config.epoch + ); + let state = CoordinatorState::new(log, msg, config.clone(), op); // Safety: Construction of a `ValidatedReconfigureMsg` ensures that @@ -83,16 +93,28 @@ impl CoordinatorState { pub fn new_reconfiguration( log: Logger, msg: ValidatedReconfigureMsg, - last_committed_config: &Configuration, + latest_committed_config: &Configuration, + our_latest_committed_share: Share, ) -> Result { let (config, new_shares) = Configuration::new(&msg)?; - // We must collect shares from the last configuration - // so we can recompute the old rack secret. + info!( + log, + "Starting coordination on existing node"; + "epoch" => %config.epoch, + "last_committed_epoch" => %latest_committed_config.epoch + ); + + // We must collect shares from the last committed configuration so we + // can recompute the old rack secret. let op = CoordinatorOperation::CollectShares { - epoch: last_committed_config.epoch, - members: last_committed_config.members.clone(), - collected_shares: BTreeMap::new(), + // We save this so we can grab the old configuration + old_epoch: latest_committed_config.epoch, + // Always include ourself + old_collected_shares: BTreeMap::from([( + msg.coordinator_id().clone(), + our_latest_committed_share, + )]), new_shares, }; @@ -117,7 +139,7 @@ impl CoordinatorState { } } - // Return the `ValidatedReconfigureMsg` that started this reconfiguration + /// Return the `ValidatedReconfigureMsg` that started this reconfiguration pub fn reconfigure_msg(&self) -> &ValidatedReconfigureMsg { &self.reconfigure_msg } @@ -126,24 +148,34 @@ impl CoordinatorState { &self.op } - // Send any required messages as a reconfiguration coordinator - // - // This varies depending upon the current `CoordinatorState`. - // - // In some cases a `PrepareMsg` will be added locally to the - // `PersistentState`, requiring persistence from the caller. In this case we - // will return a copy of it. - // - // This method is "in progress" - allow unused parameters for now + /// Send any required messages as a reconfiguration coordinator + /// + /// This varies depending upon the current `CoordinatorState`. pub fn send_msgs(&mut self, ctx: &mut impl NodeHandlerCtx) { match &self.op { - #[expect(unused)] CoordinatorOperation::CollectShares { - epoch, - members, - collected_shares, + old_epoch, + old_collected_shares, .. - } => {} + } => { + // Send to all connected members in the last committed + // configuration that we haven't yet collected shares from. + let destinations: Vec<_> = ctx + .persistent_state() + .configuration(*old_epoch) + .expect("config exists") + .members + .keys() + .filter(|&m| { + !old_collected_shares.contains_key(m) + && ctx.connected().contains(m) + }) + .cloned() + .collect(); + for to in destinations { + ctx.send(to, PeerMsgKind::GetShare(*old_epoch)); + } + } #[expect(unused)] CoordinatorOperation::CollectLrtqShares { members, shares } => {} CoordinatorOperation::Prepare { prepares, .. } => { @@ -171,9 +203,8 @@ impl CoordinatorState { ) { match &self.op { CoordinatorOperation::CollectShares { - epoch, - members, - collected_shares, + old_epoch, + old_collected_shares, .. } => {} CoordinatorOperation::CollectLrtqShares { members, shares } => {} @@ -225,15 +256,224 @@ impl CoordinatorState { } } } + + pub fn handle_share( + &mut self, + ctx: &mut impl NodeHandlerCtx, + from: PlatformId, + epoch: Epoch, + share: Share, + ) { + match &mut self.op { + CoordinatorOperation::CollectShares { + old_epoch, + old_collected_shares, + new_shares, + } => { + // SAFETY: We started coordinating by looking up the last + // committed configuration, which gave us `old_epoch`. Therefore + // the configuration must exist. + let old_config = ctx + .persistent_state() + .configuration(*old_epoch) + .expect("config exists"); + + let new_epoch = self.configuration.epoch; + + let log = self.log.new(o!( + "last_committed_epoch" => old_epoch.to_string(), + "new_epoch" => new_epoch.to_string() + )); + + // Are we trying to retrieve shares for `epoch`? + if *old_epoch != epoch { + warn!( + log, + "Received Share from node with wrong epoch"; + "received_epoch" => %epoch, + "from" => %from + ); + return; + } + + // Was the sender a member of the configuration at `old_epoch`? + let Some(expected_digest) = old_config.members.get(&from) + else { + warn!( + log, + "Received Share from unexpected node"; + "received_epoch" => %epoch, + "from" => %from + ); + return; + }; + + // Does the share hash match what we expect? + let mut digest = Sha3_256Digest::default(); + share.digest::(&mut digest.0); + if digest != *expected_digest { + error!( + log, + "Received share with invalid digest"; + "received_epoch" => %epoch, + "from" => %from + ); + } + + // A valid share was received. Is it new? + if old_collected_shares.insert(from, share).is_some() { + return; + } + + // Do we have enough shares to recompute the old rack secret? + if old_collected_shares.len() < old_config.threshold.0 as usize + { + return; + } + + // Reconstruct the old rack secret from the shares we collected. + let shares: Vec<_> = + old_collected_shares.values().cloned().collect(); + let old_rack_secret = match RackSecret::reconstruct(&shares) { + Ok(secret) => { + info!( + log, + "Successfully reconstructed old rack secret" + ); + secret + } + Err(err) => { + error!( + log, + "Failed to reconstruct old rack secret"; + &err + ); + return; + } + }; + + // Reconstruct the new rack secret from the shares we created + // at coordination start time. + let shares: Vec<_> = new_shares.values().cloned().collect(); + let new_rack_secret = match RackSecret::reconstruct(&shares) { + Ok(secret) => { + info!( + log, + "Successfully reconstructed new rack secret" + ); + secret + } + Err(err) => { + error!( + log, + "Failed to reconstruct new rack secret"; + &err + ); + return; + } + }; + + // Decrypt the encrypted rack secrets from the old config so + // that we can add `old_rack_secret` to that set for use in the + // new configuration. + let mut plaintext_secrets = if let Some(encrypted_secrets) = + &old_config.encrypted_rack_secrets + { + match encrypted_secrets.decrypt( + old_config.rack_id, + old_config.epoch, + &old_rack_secret, + ) { + Ok(plaintext) => plaintext, + Err(err) => { + error!(log, "Rack secrets decryption error"; &err); + return; + } + } + } else { + PlaintextRackSecrets::new() + }; + plaintext_secrets.insert(*old_epoch, old_rack_secret); + + // Now encrypt the set of old rack secrets with the new rack + // secret. + let new_encrypted_rack_secrets = match plaintext_secrets + .encrypt( + self.configuration.rack_id, + new_epoch, + &new_rack_secret, + ) { + Ok(ciphertext) => ciphertext, + Err(_) => { + error!(log, "Failed to encrypt plaintext rack secrets"); + return; + } + }; + + // Save the encrypted rack secrets in the current configuration + assert!(self.configuration.encrypted_rack_secrets.is_none()); + self.configuration.encrypted_rack_secrets = + Some(new_encrypted_rack_secrets); + + // Take `new_shares` out of `self.op` so we can include them in + // `Prepare` messages; + let mut new_shares = mem::take(new_shares); + + // Update our persistent state + // + // We remove ourself because we don't send a `Prepare` message + // to ourself. + // + // SAFETY: our share already exists at this point and has been + // validated as part of the `Configuration` construction. + let share = new_shares + .remove(ctx.platform_id()) + .expect("my share exists"); + ctx.update_persistent_state(|ps| { + ps.shares.insert(new_epoch, share); + ps.configs + .insert_unique(self.configuration.clone()) + .expect("no existing configuration"); + true + }); + + // Now transition to `CoordinatorOperation::Prepare` + let prepares: BTreeMap<_, _> = new_shares + .into_iter() + .map(|(id, share)| { + (id, (self.configuration.clone(), share)) + }) + .collect(); + self.op = CoordinatorOperation::Prepare { + prepares, + // Always include ourself + prepare_acks: BTreeSet::from([ctx.platform_id().clone()]), + }; + + info!(log, "Starting to prepare after collecting shares"); + self.send_msgs(ctx); + } + op => { + warn!( + self.log, + "Share received when coordinator is not expecting it"; + "op" => op.name(), + "epoch" => %epoch, + "from" => %from + ); + } + } + } } /// What should the coordinator be doing? pub enum CoordinatorOperation { - // We haven't started implementing this yet CollectShares { - epoch: Epoch, - members: BTreeMap, - collected_shares: BTreeMap, + old_epoch: Epoch, + old_collected_shares: BTreeMap, + + // These are new shares that the coordinator created that we carry along + // until we get to `CoordinatorOperation::Prepare` new_shares: BTreeMap, }, // We haven't started implementing this yet diff --git a/trust-quorum/src/crypto.rs b/trust-quorum/src/crypto.rs index 8ae9e8d11a1..69d33c6cd66 100644 --- a/trust-quorum/src/crypto.rs +++ b/trust-quorum/src/crypto.rs @@ -271,7 +271,7 @@ pub struct EncryptedRackSecrets { data: Box<[u8]>, } -#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] +#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error, SlogInlineError)] pub enum DecryptionError { // An opaque error indicating decryption failed #[error("Failed to decrypt rack secrets")] diff --git a/trust-quorum/src/lib.rs b/trust-quorum/src/lib.rs index 4fef1c8a5a7..39418714296 100644 --- a/trust-quorum/src/lib.rs +++ b/trust-quorum/src/lib.rs @@ -12,6 +12,7 @@ use derive_more::Display; use serde::{Deserialize, Serialize}; +mod compute_key_share; mod configuration; mod coordinator_state; pub(crate) mod crypto; @@ -46,6 +47,12 @@ pub use persistent_state::{PersistentState, PersistentStateSummary}; )] pub struct Epoch(pub u64); +impl Epoch { + pub fn next(&self) -> Epoch { + Epoch(self.0.checked_add(1).expect("fewer than 2^64 epochs")) + } +} + /// The number of shares required to reconstruct the rack secret /// /// Typically referred to as `k` in the docs diff --git a/trust-quorum/src/node.rs b/trust-quorum/src/node.rs index 8e821fe2645..72d38a5b674 100644 --- a/trust-quorum/src/node.rs +++ b/trust-quorum/src/node.rs @@ -15,6 +15,7 @@ //! levels. Fortunately, tracking is easier with async code, which drives this //! Node, and so this should not be problematic. +use crate::compute_key_share::KeyShareComputer; use crate::validators::{ MismatchedRackIdError, ReconfigurationError, ValidatedReconfigureMsg, }; @@ -36,6 +37,10 @@ pub struct Node { /// In memory state for when this node is coordinating a reconfiguration coordinator_state: Option, + + /// In memory state for when this node is trying to compute its own key + /// share for a committed epoch. + key_share_computer: Option, } impl Node { @@ -43,12 +48,12 @@ impl Node { let id_str = format!("{:?}", ctx.platform_id()); let log = log.new(o!("component" => "trust-quorum", "platform_id" => id_str)); - Node { log, coordinator_state: None } + Node { log, coordinator_state: None, key_share_computer: None } } /// Start coordinating a reconfiguration /// - /// On success, puts messages that need sending to other nodes in `outbox` + /// On success, queues messages that need sending to other nodes via `ctx` /// and returns a `PersistentState` which the caller must write to disk. /// /// For upgrading from LRTQ, use `coordinate_upgrade_from_lrtq` @@ -69,11 +74,31 @@ impl Node { return Ok(()); }; + if let Some(kcs) = &self.key_share_computer { + // We know from our `ValidatedReconfigureMsg` that we haven't seen a newer + // configuration and we have the correct last committed configuration. Therefore if we are computing a key share, + // we must be doing it for a stale commit and should cancel it. + // + // I don't think it's actually possible to hit this condition, but + // we check anyway. + info!( + self.log, + "Reconfiguration started. Cancelling key share compute"; + "reconfiguration_epoch" => %validated_msg.epoch(), + "key_share_compute_epoch" => %kcs.config().epoch + ); + self.key_share_computer = None; + } + self.set_coordinator_state(ctx, validated_msg)?; self.send_coordinator_msgs(ctx); Ok(()) } + pub fn is_computing_key_share(&self) -> bool { + self.key_share_computer.is_some() + } + /// Commit a configuration /// /// This is triggered by a message from Nexus for each node in the @@ -110,6 +135,12 @@ impl Node { // Is this an idempotent or stale request? if let Some(config) = ps.latest_committed_configuration() { if config.epoch >= epoch { + info!( + self.log, + "Received stale or idempotent commit from Nexus"; + "latest_committed_epoch" => %config.epoch, + "received_epoch" => %epoch + ); return Ok(()); } } @@ -143,13 +174,15 @@ impl Node { // Are we currently coordinating for this epoch? // Stop coordinating if we are. - if self.coordinator_state.is_some() { - info!( - self.log, - "Stopping coordination due to commit"; - "epoch" => %epoch - ); - self.coordinator_state = None; + if let Some(cs) = &self.coordinator_state { + if cs.reconfigure_msg().epoch() == epoch { + info!( + self.log, + "Stopping coordination due to commit"; + "epoch" => %epoch + ); + self.coordinator_state = None; + } } Ok(()) @@ -162,7 +195,10 @@ impl Node { peer: PlatformId, ) { ctx.add_connection(peer.clone()); - self.send_coordinator_msgs_to(ctx, peer); + self.send_coordinator_msgs_to(ctx, peer.clone()); + if let Some(ksc) = &mut self.key_share_computer { + ksc.on_connect(ctx, peer); + } } /// A peer node has disconnected from this one @@ -198,6 +234,15 @@ impl Node { PeerMsgKind::Prepare { config, share } => { self.handle_prepare(ctx, from, config, share); } + PeerMsgKind::GetShare(epoch) => { + self.handle_get_share(ctx, from, epoch); + } + PeerMsgKind::Share { epoch, share } => { + self.handle_share(ctx, from, epoch, share); + } + PeerMsgKind::CommitAdvance(config) => { + self.handle_commit_advance(ctx, from, config) + } _ => todo!( "cannot handle message variant yet - not implemented: {msg:?}" ), @@ -237,6 +282,211 @@ impl Node { } } + fn handle_commit_advance( + &mut self, + ctx: &mut impl NodeHandlerCtx, + from: PlatformId, + config: Configuration, + ) { + // We may have already advanced by the time we receive this message. + // Let's check. + if ctx.persistent_state().commits.contains(&config.epoch) { + info!( + self.log, + "Received CommitAdvance, but already committed"; + "from" => %from, + "epoch" => %config.epoch + ); + return; + } + if ctx.persistent_state().has_prepared(config.epoch) { + // Go ahead and commit + info!( + self.log, + "Received CommitAdvance. Already prepared, now committing"; + "from" => %from, + "epoch" => %config.epoch + ); + ctx.update_persistent_state(|ps| ps.commits.insert(config.epoch)); + } + + // Do we have the configuration in our persistent state? If not save it. + ctx.update_persistent_state(|ps| { + if let Err(e) = ps.configs.insert_unique(config.clone()) { + let existing = + e.duplicates().first().expect("duplicate exists"); + if *existing != &config { + error!( + self.log, + "Received a configuration mismatch"; + "from" => %from, + "existing_config" => #?existing, + "received_config" => #?config + ); + // TODO: Alarm + } + false + } else { + true + } + }); + + // Are we coordinating for an older epoch? If so, cancel. + if let Some(cs) = &self.coordinator_state { + let coordinating_epoch = cs.reconfigure_msg().epoch(); + if coordinating_epoch < config.epoch { + info!( + self.log, + "Received CommitAdvance. Cancelling stale coordination"; + "from" => %from, + "coordinating_epoch" => %coordinating_epoch, + "received_epoch" => %config.epoch + ); + self.coordinator_state = None; + } else if coordinating_epoch == config.epoch { + error!( + self.log, + "Received CommitAdvance while coordinating for same epoch!"; + "from" => %from, + "epoch" => %config.epoch + ); + // TODO: Alarm + return; + } else { + info!( + self.log, + "Received CommitAdvance for stale epoch while coordinating"; + "from" => %from, + "received_epoch" => %config.epoch, + "coordinating_epoch" => %coordinating_epoch + ); + return; + } + } + + // Are we already trying to compute our share for this config? + if let Some(ksc) = &mut self.key_share_computer { + if ksc.config().epoch > config.epoch { + let msg = concat!( + "Received stale CommitAdvance. ", + "Already computing for later epoch" + ); + info!( + self.log, + "{msg}"; + "from" => %from, + "epoch" => %ksc.config().epoch, + "received_epoch" => %config.epoch + ); + return; + } else if ksc.config().epoch == config.epoch { + info!( + self.log, + "Received CommitAdvance while already computing share"; + "from" => %from, + "epoch" => %config.epoch + ); + return; + } else { + info!( + self.log, + "Received CommitAdvance while computing share for old epoch"; + "from" => %from, + "epoch" => %ksc.config().epoch, + "received_epoch" => %config.epoch + ); + // Intentionally fall through + } + } + + // We either were collectiong shares for an old epoch or haven't started yet. + self.key_share_computer = + Some(KeyShareComputer::new(&self.log, ctx, config)); + } + + fn handle_get_share( + &mut self, + ctx: &mut impl NodeHandlerCtx, + from: PlatformId, + epoch: Epoch, + ) { + if let Some(latest_committed_config) = + ctx.persistent_state().latest_committed_configuration() + { + if latest_committed_config.epoch > epoch { + info!( + self.log, + concat!( + "Received 'GetShare'` from stale node. ", + "Responded with 'CommitAdvance'." + ); + "from" => %from, + "latest_committed_epoch" => %latest_committed_config.epoch, + "requested_epoch" => %epoch + ); + ctx.send( + from, + PeerMsgKind::CommitAdvance(latest_committed_config.clone()), + ); + return; + } + } + + // If we have the share for the requested epoch, we always return it. We + // know that it is at least as new as the last committed epoch. We might + // not have learned about the configuration being committed yet, but + // other nodes have and may need the share to unlock when the control + // plane is not up yet. + // + // See RFD 238 section 5.3.3 + // + if let Some(share) = ctx.persistent_state().shares.get(&epoch) { + info!( + self.log, + "Received 'GetShare'. Responded with 'Share'."; + "from" => %from, + "epoch" => %epoch + ); + ctx.send(from, PeerMsgKind::Share { epoch, share: share.clone() }); + } else { + // TODO: We may want to return a `NoSuchShare(epoch)` reply if we don't + // have the share, but it's not strictly necessary. It would only be for + // logging/debugging purposes at the requester. + info!( + self.log, + "Received 'GetShare', but it's missing."; + "from" => %from, + "epoch" => %epoch + ); + } + } + + fn handle_share( + &mut self, + ctx: &mut impl NodeHandlerCtx, + from: PlatformId, + epoch: Epoch, + share: Share, + ) { + if let Some(cs) = &mut self.coordinator_state { + cs.handle_share(ctx, from, epoch, share); + } else if let Some(ksc) = &mut self.key_share_computer { + if ksc.handle_share(ctx, from, epoch, share) { + // We're have completed computing our share and saved it to + // our persistent state. We have also marked the configuration + // committed. + self.key_share_computer = None; + } + } else { + warn!( + self.log, + "Received share when not coordinating or computing share"; + "from" => %from, + "epoch" => %epoch + ); + } + } + fn handle_prepare( &mut self, ctx: &mut impl NodeHandlerCtx, @@ -333,9 +583,7 @@ impl Node { } } - /// Set the coordinator state and conditionally set and return the - /// persistent state depending upon whether the node is currently - /// coordinating and what its persistent state is. + /// Start coordinating a reconfiguration /// /// By the time we get here, we know that we are not upgrading from LRTQ as /// we have a `ValidatedReconfigureMsg`. @@ -344,10 +592,12 @@ impl Node { ctx: &mut impl NodeHandlerCtx, msg: ValidatedReconfigureMsg, ) -> Result<(), ReconfigurationError> { + let log = self.log.new(o!("component" => "tq-coordinator-state")); + // We have no committed configuration or lrtq ledger if ctx.persistent_state().is_uninitialized() { let (coordinator_state, my_config, my_share) = - CoordinatorState::new_uninitialized(self.log.clone(), msg)?; + CoordinatorState::new_uninitialized(log, msg)?; self.coordinator_state = Some(coordinator_state); ctx.update_persistent_state(move |ps| { ps.shares.insert(my_config.epoch, my_share); @@ -359,13 +609,16 @@ impl Node { } // We have a committed configuration that is not LRTQ - let config = - ctx.persistent_state().latest_committed_configuration().unwrap(); + let (config, our_share) = ctx + .persistent_state() + .latest_committed_config_and_share() + .expect("committed configuration exists"); self.coordinator_state = Some(CoordinatorState::new_reconfiguration( - self.log.clone(), + log, msg, - &config, + config, + our_share.clone(), )?); Ok(()) diff --git a/trust-quorum/src/persistent_state.rs b/trust-quorum/src/persistent_state.rs index 5a6e35fd516..ba6d1306272 100644 --- a/trust-quorum/src/persistent_state.rs +++ b/trust-quorum/src/persistent_state.rs @@ -22,6 +22,8 @@ pub struct PersistentState { // data it read from disk. This allows us to upgrade from LRTQ. pub lrtq: Option, pub configs: IdOrdMap, + + // Our own key shares per configuration pub shares: BTreeMap, pub commits: BTreeSet, @@ -83,6 +85,18 @@ impl PersistentState { }) } + pub fn latest_committed_config_and_share( + &self, + ) -> Option<(&Configuration, &Share)> { + self.latest_committed_epoch().map(|epoch| { + // There *must* be a configuration and share if we have a commit + ( + self.configs.get(&epoch).expect("latest config exists"), + self.shares.get(&epoch).expect("latest share exists"), + ) + }) + } + /// Return the key share for lrtq if one exists pub fn lrtq_key_share(&self) -> Option { self.lrtq.as_ref().map(|p| p.share.clone().into()) diff --git a/trust-quorum/src/validators.rs b/trust-quorum/src/validators.rs index 9e4f3e99158..aaf045d3aa6 100644 --- a/trust-quorum/src/validators.rs +++ b/trust-quorum/src/validators.rs @@ -288,12 +288,11 @@ impl ValidatedReconfigureMsg { }); } - // Ensure that we haven't seen a prepare message for a newer - // configuration. - if let Some(last_prepared_epoch) = persistent_state.latest_config { - if msg.epoch <= last_prepared_epoch { + // Ensure that we haven't seen a newer configuration + if let Some(latest_epoch) = persistent_state.latest_config { + if msg.epoch <= latest_epoch { return Err(ReconfigurationError::PreparedEpochMismatch { - existing: last_prepared_epoch, + existing: latest_epoch, new: msg.epoch, }); } diff --git a/trust-quorum/tests/cluster.rs b/trust-quorum/tests/cluster.rs index ff45396d7a1..9dec6000640 100644 --- a/trust-quorum/tests/cluster.rs +++ b/trust-quorum/tests/cluster.rs @@ -10,8 +10,9 @@ use iddqd::{IdOrdItem, IdOrdMap, id_upcast}; use omicron_test_utils::dev::test_setup_log; use omicron_uuid_kinds::RackUuid; use prop::sample::Index; -use proptest::collection::btree_set; +use proptest::collection::{btree_set, size_range}; use proptest::prelude::*; +use proptest::sample::Selector; use slog::{Logger, info, o}; use std::collections::{BTreeMap, BTreeSet}; use test_strategy::{Arbitrary, proptest}; @@ -165,6 +166,19 @@ impl NexusState { pub fn latest_config_mut(&mut self) -> RefMut<'_, NexusConfig> { self.configs.iter_mut().last().expect("at least one config") } + + pub fn last_committed_config(&self) -> Option<&NexusConfig> { + // IdOrdMap doesn't allow reverse iteration. + // We therefore iterate through all configs to find the latest committed one. + // We could track this out of band but that leaves more room for error. + let mut found: Option<&NexusConfig> = None; + for c in &self.configs { + if c.op == NexusOp::Committed { + found = Some(c) + } + } + found + } } /// Faults in our system. It's useful to keep these self contained and not @@ -257,6 +271,18 @@ struct TestState { /// If an epoch got skipped due to a crashed coordinator then there will not /// be a configuration for that epoch. pub all_coordinated_configs: IdOrdMap, + + /// Expunged nodes cannot be added to a cluster. We never reuse nodes in + /// this test. We include nodes here that may not know yet that they have + /// been expunged in the `Sut`. + pub expunged: BTreeSet, + + /// Keep track of the number of generated `Action`s that get skipped + /// + /// Because we generate actions up front, we don't know if they are valid or + /// not to run during the test. This results in quite a few discards, and we + /// track them for help in refining the test. + pub skipped_actions: usize, } impl TestState { @@ -271,6 +297,8 @@ impl TestState { member_universe: member_universe(), faults: Faults::default(), all_coordinated_configs: IdOrdMap::new(), + expunged: BTreeSet::new(), + skipped_actions: 0, } } @@ -321,11 +349,12 @@ impl TestState { } } - /// Send the first `ReconfigureMsg` from `Nexus` to the coordinator node + /// Send the latest `ReconfigureMsg` from `Nexus` to the coordinator node /// /// If the node is not available, then abort the configuration at nexus - pub fn send_initial_reconfigure_msg(&mut self) { + pub fn send_reconfigure_msg(&mut self) { let (coordinator, msg) = self.nexus.reconfigure_msg_for_latest_config(); + let epoch_to_config = msg.epoch; if self.faults.crashed_nodes.contains(coordinator) { // We must abort the configuration. This mimics a timeout. self.nexus.abort_reconfiguration(); @@ -335,8 +364,23 @@ impl TestState { .nodes .get_mut(coordinator) .expect("coordinator exists"); + node.coordinate_reconfiguration(ctx, msg) .expect("valid configuration"); + + // Do we have a `Configuration` for this epoch yet? + // + // For most reconfigurations, shares for the last committed + // configuration must be retrieved before the configuration is + // generated and saved in the persistent state. + let latest_persisted_config = + ctx.persistent_state().latest_config().expect("config exists"); + if latest_persisted_config.epoch == epoch_to_config { + // Save the configuration for later + self.all_coordinated_configs + .insert_unique(latest_persisted_config.clone()) + .expect("unique"); + } } } @@ -383,16 +427,6 @@ impl TestState { assert!(ctx.persistent_state_change_check_and_reset()); assert!(ctx.persistent_state().has_prepared(msg.epoch)); assert!(ctx.persistent_state().latest_committed_epoch().is_none()); - - // Save the configuration for later - self.all_coordinated_configs - .insert_unique( - ctx.persistent_state() - .latest_config() - .expect("config exists") - .clone(), - ) - .expect("unique"); } Ok(()) @@ -423,33 +457,44 @@ impl TestState { actions: Vec, ) -> Result<(), TestCaseError> { for action in actions { - match action { + let skipped = match action { Action::DeliverEnvelopes(indices) => { self.action_deliver_envelopes(indices) } - Action::PollPrepareAcks => { - self.action_poll_prepare_acks(); - } - Action::Commit(indices) => { - self.action_commit(indices); - } + Action::PollPrepareAcks => self.action_poll_prepare_acks(), + Action::Commit(indices) => self.action_commit(indices), Action::DeliverNexusReplies(n) => { - self.action_deliver_nexus_replies(n); + self.action_deliver_nexus_replies(n) } + Action::Reconfigure { + num_added_nodes, + removed_nodes, + threshold, + coordinator, + } => self.action_reconfigure( + num_added_nodes, + removed_nodes, + threshold, + coordinator, + ), + }; + + if skipped { + self.skipped_actions += 1; + } else { + self.check_invariants()?; } - - self.check_invariants()?; } Ok(()) } // Deliver network messages to generated destinations - fn action_deliver_envelopes(&mut self, indices: Vec) { + fn action_deliver_envelopes(&mut self, indices: Vec) -> bool { let destinations: Vec<_> = self.bootstrap_network.keys().cloned().collect(); if destinations.is_empty() { // nothing to do - return; + return true; } for index in indices { let id = index.get(&destinations); @@ -460,6 +505,38 @@ impl TestState { self.sut.nodes.get_mut(id).expect("destination exists"); node.handle(ctx, envelope.from, envelope.msg); + // If this is the first time we've seen a configuration, track it + // + // We have to do this here because for reconfigurations, shares + // for the last committed reconfiguration are gathered before + // the config is created. We don't know exactly when config + // generation occurs, but know that it happens after envelopes + // are delivered, except for configurations that don't have + // a last committed config. This is normally the initial + // configuration, but can be later ones if the initial config + // is aborted. + if ctx.persistent_state_change_check_and_reset() { + if let Some(latest_config) = + ctx.persistent_state().latest_config() + { + if !self + .all_coordinated_configs + .contains_key(&latest_config.epoch) + { + // The coordinator must be the first node to create + // the configuration. + assert_eq!( + &latest_config.coordinator, + ctx.platform_id() + ); + + self.all_coordinated_configs + .insert_unique(latest_config.clone()) + .expect("unique config"); + } + } + } + // Send any messages as a result of handling this message send_envelopes(ctx, &mut self.bootstrap_network); } @@ -467,15 +544,17 @@ impl TestState { // Remove any destinations with zero messages in-flight self.bootstrap_network.retain(|_, msgs| !msgs.is_empty()); + + false } // Call `Node::commit_reconfiguration` for nodes that have prepared and have // not yet acked their commit. - fn action_commit(&mut self, indices: Vec) { + fn action_commit(&mut self, indices: Vec) -> bool { let rack_id = self.nexus.rack_id; let latest_config = self.nexus.latest_config(); if latest_config.op != NexusOp::Committed { - return; + return true; } let committable: Vec<_> = latest_config .prepared_members @@ -484,7 +563,8 @@ impl TestState { if committable.is_empty() { // All members have committed - return; + self.skipped_actions += 1; + return true; } // We shouldn't be calling commit twice or sending multiple replies @@ -508,9 +588,10 @@ impl TestState { for from in committed { self.underlay_network.push(NexusReply::CommitAck { from, epoch }); } + false } - fn action_deliver_nexus_replies(&mut self, n: usize) { + fn action_deliver_nexus_replies(&mut self, n: usize) -> bool { let mut config = self.nexus.latest_config_mut(); let n = usize::min(n, self.underlay_network.len()); for reply in self.underlay_network.drain(0..n) { @@ -522,15 +603,16 @@ impl TestState { } } } + false } /// Poll the coordinator for acks if nexus is preparing, and commit /// if enough acks have been received. - fn action_poll_prepare_acks(&mut self) { + fn action_poll_prepare_acks(&mut self) -> bool { let mut latest_config = self.nexus.latest_config_mut(); if latest_config.op != NexusOp::Preparing { // No point in checking. Commit or abort has occurred. - return; + return true; } // If the coordinator has crashed then Nexus should abort. @@ -540,7 +622,6 @@ impl TestState { } // Lookup the coordinator node - // let (coordinator, ctx) = self .sut .nodes @@ -554,7 +635,7 @@ impl TestState { .latest_config() .map_or(Epoch(0), |c| c.epoch); if coordinator_epoch != latest_config.epoch { - return; + return true; } // Poll the coordinator for acks. @@ -581,7 +662,166 @@ impl TestState { ); latest_config.op = NexusOp::Committed; + + let new_members = latest_config.members.clone(); + let new_epoch = latest_config.epoch; + + // Expunge any removed nodes from the last committed configuration + if let Some(last_committed_epoch) = + latest_config.last_committed_epoch + { + // Release our mutable borrow + drop(latest_config); + + let last_committed_config = self + .nexus + .configs + .get(&last_committed_epoch) + .expect("config exists"); + + let expunged = last_committed_config + .members + .difference(&new_members) + .cloned(); + + for e in expunged { + info!( + self.log, + "expunged node"; + "epoch" => %new_epoch, + "platform_id" => %e); + self.expunged.insert(e); + } + } } + false + } + + fn action_reconfigure( + &mut self, + num_added_nodes: usize, + removed_nodes: Vec, + threshold: Index, + coordinator: Selector, + ) -> bool { + let latest_epoch = self.nexus.latest_config().epoch; + let last_committed_config = self.nexus.last_committed_config(); + // We must leave at least one node available to coordinate between the + // new and old configurations. + let (new_members, coordinator) = match last_committed_config { + Some(c) => { + let possible_num_nodes_to_add = usize::min( + num_added_nodes, + MAX_CLUSTER_SIZE - c.members.len(), + ); + + // How many nodes can we add taking into account expunged nodes + // and the existing cluster? + let num_nodes_to_add = usize::min( + MEMBER_UNIVERSE_SIZE + - c.members.len() + - self.expunged.len(), + possible_num_nodes_to_add, + ); + + // What is the max number of nodes that we can remove such that: + // * 1 node is in both the new and old cluster + // * The total number of nodes in the new cluster is + // >= MIN_CLUSTER_SIZE + let max_nodes_to_remove = (c.members.len() - 1 + + num_nodes_to_add) + .saturating_sub(MIN_CLUSTER_SIZE); + + // Find a potential coordinator node + // + // We can only start a reconfiguration if Nexus has an + // acknowledgement that at least one node has seen the commit. + if c.committed_members.is_empty() { + return true; + } + let coordinator = + coordinator.select(c.committed_members.iter()); + + // First, find the set of nodes to remove + let mut nodes_to_remove = BTreeSet::new(); + for s in removed_nodes { + // The same selection can be chosen more than once. so we + // must add the extra check rather than shrinking the length + // of the `removed_nodes` iterator with `take`.; + if nodes_to_remove.len() == max_nodes_to_remove { + break; + } + let node = s.select(c.members.iter()); + if node != coordinator { + nodes_to_remove.insert(node.clone()); + } + } + + // Then find the set of nodes to add + // + // Just pick the first set of nodes in `member_universe` + // that are not in the current membership and not expunged. + let mut nodes_to_add = BTreeSet::new(); + for id in self.member_universe.iter() { + if nodes_to_add.len() == num_nodes_to_add { + break; + } + if !self.expunged.contains(id) && !c.members.contains(id) { + nodes_to_add.insert(id.clone()); + } + } + + // Finally, create our new membership + let mut new_members = c.members.clone(); + for id in nodes_to_remove { + new_members.remove(&id); + } + for id in nodes_to_add { + new_members.insert(id); + } + (new_members, coordinator.clone()) + } + None => { + // We are generating a new config + if num_added_nodes < MIN_CLUSTER_SIZE { + // Nothing to do here. + return true; + } + // Pick the first `num_added_nodes` from member_universe + // It's as good a choice as any and deterministic + let new_members: BTreeSet<_> = self + .member_universe + .iter() + .take(num_added_nodes) + .cloned() + .collect(); + + // There is no last committed configuration then we can go ahead and + // choose any node. We just pick the first one for simplicity and determinism. + let coordinator = + coordinator.select(new_members.iter()).clone(); + (new_members, coordinator) + } + }; + let threshold = + Threshold(usize::max(2, threshold.index(new_members.len())) as u8); + let epoch = latest_epoch.next(); + + // Find a coordinator from the last committed configurarion that has + // acknowledged the commit from Nexus. If there isn't a node that + // acknowledged the commit, then we can't reconfigure yet. + // + let last_committed_epoch = last_committed_config.map(|c| c.epoch); + let nexus_config = NexusConfig::new( + epoch, + last_committed_epoch, + coordinator, + new_members, + threshold, + ); + self.nexus.configs.insert_unique(nexus_config).expect("new config"); + self.send_reconfigure_msg(); + false } /// At every point during the running of the test, invariants over the system @@ -593,6 +833,7 @@ impl TestState { self.invariant_all_nodes_have_same_configuration_per_epoch()?; self.invariant_nodes_have_prepared_if_coordinator_has_acks()?; self.invariant_nodes_have_committed_if_nexus_has_acks()?; + self.invariant_nodes_not_coordinating_and_computing_key_share_simultaneously()?; Ok(()) } @@ -636,19 +877,26 @@ impl TestState { .nodes .get(&latest_config.coordinator) .expect("node exists"); - let acked = - node.get_coordinator_state().map_or(BTreeSet::new(), |cs| { + + let (acked, coordinating_epoch) = node + .get_coordinator_state() + .map_or((BTreeSet::new(), Epoch(0)), |cs| { if let CoordinatorOperation::Prepare { prepare_acks, .. } = cs.op() { - prepare_acks.clone() + (prepare_acks.clone(), cs.reconfigure_msg().epoch()) } else { - BTreeSet::new() + (BTreeSet::new(), Epoch(0)) } }); + // Make sure the coordinator actually is coordinating for this epoch + if coordinating_epoch != latest_config.epoch { + return Ok(()); + } (acked, latest_config.epoch) }; + // Make sure the coordinator actually is coordinating for this epoch for id in acked { let (_, ctx) = self.sut.nodes.get(&id).expect("node exists"); @@ -681,6 +929,30 @@ impl TestState { Ok(()) } + + // A node cannot be coordinating a reconfiguration and computing a key share + // at the same time. The following two conditions based on local knowledge + // are mutually exclusive. + // + // * If a node is coordinating a reconfiguration then they are creating the + // key shares for the newest configuration and have a key share for the + // latest committed configuration. + // * If a node is computing a key share it means that they don't have the a + // key share for the latest committed configuration that they know of. + fn invariant_nodes_not_coordinating_and_computing_key_share_simultaneously( + &self, + ) -> Result<(), TestCaseError> { + for (id, (node, _)) in &self.sut.nodes { + prop_assert!( + !(node.get_coordinator_state().is_some() + && node.is_computing_key_share()), + "Coordinating and computing key share on node {}", + id + ); + } + + Ok(()) + } } /// Broken out of `TestState` to alleviate borrow checker woes @@ -697,6 +969,7 @@ fn send_envelopes( // A high-level set of generated actions to drive the test forward. #[derive(Debug, Arbitrary)] +#[allow(clippy::large_enum_variant)] pub enum Action { /// For each indexed member deliver an in-flight bootstrap network msg if /// there is one. @@ -705,8 +978,10 @@ pub enum Action { /// `test_state.bootstrap_network`. /// /// We may deliver more than one message to each member. - #[weight(50)] - DeliverEnvelopes(Vec), + #[weight(4)] + DeliverEnvelopes( + #[any(size_range(1..MAX_DELIVERED_ENVELOPES).lift())] Vec, + ), /// Have Nexus poll the coordinator for the latest configuration if it is /// still being prepared. @@ -715,25 +990,44 @@ pub enum Action { /// simulates recording this information in CRDB. If Nexus has witnessed /// that enough nodes have acked prepares then it changes the config /// operation to committed. - #[weight(10)] + #[weight(4)] PollPrepareAcks, /// If the current configuration at nexus is marked `NexusOp::Committed` /// then call `Node::commit_configuration` for each indexed /// node in `NexusConfig::prepared_members` that is not also in /// `NexusConfig::committed_members`. - #[weight(5)] - Commit(Vec), + #[weight(4)] + Commit(#[any(size_range(1..MAX_CONCURRENT_COMMITS).lift())] Vec), /// Deliver in-flight messages to Nexus from the underlay network - #[weight(5)] + #[weight(4)] DeliverNexusReplies(#[strategy(1..10usize)] usize), + + /// Generate a new configuration by adding a number of *new* (non-expunged) + /// nodes to the cluster from `member_universe` and removing the specific + /// nodes in the current cluster given by the indices `removed_nodes`. + #[weight(1)] + Reconfigure { + #[strategy(0..MAX_ADDED_NODES)] + num_added_nodes: usize, + #[any(size_range(0..MAX_REMOVED_NODES).lift())] + removed_nodes: Vec, + threshold: Index, + coordinator: Selector, + }, } const MIN_CLUSTER_SIZE: usize = 3; -const MAX_CLUSTER_SIZE: usize = 32; -const MEMBER_UNIVERSE_SIZE: usize = 64; +const MAX_CLUSTER_SIZE: usize = 20; +const MEMBER_UNIVERSE_SIZE: usize = 40; const MAX_INITIAL_DOWN_NODES: usize = 5; +const MAX_ADDED_NODES: usize = 5; +const MAX_REMOVED_NODES: usize = 3; +const MAX_DELIVERED_ENVELOPES: usize = 20; +const MAX_CONCURRENT_COMMITS: usize = 10; +const MIN_ACTIONS: usize = 100; +const MAX_ACTIONS: usize = 1000; /// Information about configurations used at test generation time #[derive(Debug, Clone, Arbitrary)] @@ -779,19 +1073,20 @@ pub struct TestInput { // disconnected from every other node. #[strategy(btree_set(0..=MEMBER_UNIVERSE_SIZE, 0..MAX_INITIAL_DOWN_NODES))] initial_down_nodes: BTreeSet, + #[any(size_range(MIN_ACTIONS..MAX_ACTIONS).lift())] actions: Vec, } #[proptest] -fn test_coordinator_behavior_from_empty_state(input: TestInput) { - let logctx = test_setup_log("coordinator_behavior_from_empty_state"); +fn test_trust_quorum_protocol(input: TestInput) { + let logctx = test_setup_log("test_trust_quorum_protocol"); let mut state = TestState::new(logctx.log.clone()); // Perform the initial setup state.create_nexus_initial_config(input.initial_config); state.setup_initial_connections(input.initial_down_nodes); - state.send_initial_reconfigure_msg(); + state.send_reconfigure_msg(); // Check the results of the initial setup state.postcondition_initial_configuration()?; @@ -802,5 +1097,11 @@ fn test_coordinator_behavior_from_empty_state(input: TestInput) { // Start executing the actions state.run_actions(input.actions)?; + info!( + state.log, + "Test complete"; + "skipped_actions" => state.skipped_actions + ); + logctx.cleanup_successful(); } From 58ee63beeb24da1e447b83651e54205012707eaa Mon Sep 17 00:00:00 2001 From: Rain Date: Wed, 27 Aug 2025 13:35:26 -0700 Subject: [PATCH 02/38] [1/n] [reconfigurator-planning] move determination of add/update zones into a separate method (#8920) I'd like to determine condition 4 of whether to perform updates (all deployment units are at known versions) by looking at the blueprint after noop conversions have been applied. We could either try and guess what would happen by looking at the noop info, or just move this determination to after noop updates have been applied. Also make the planning report store the reasons zone adds and updates are blocked, and print them as part of the planning report. There are no behavior changes in this PR. --- .../output/cmds-mupdate-update-flow-stdout | 72 ++++++++++++------- .../output/cmds-noop-image-source-stdout | 14 ++-- nexus/reconfigurator/planning/src/planner.rs | 49 +++++++------ nexus/types/src/deployment/planning_report.rs | 50 ++++++++----- openapi/nexus-internal.json | 23 +++--- 5 files changed, 129 insertions(+), 79 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 407c97e9ba5..6a3a05676b0 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -505,9 +505,12 @@ planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: chicken switches: add zones with mupdate override: false -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - current target release generation (2) is lower than minimum required by blueprint (3) + - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 + - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* zone updates waiting on zone add blockers @@ -880,9 +883,11 @@ planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: chicken switches: add zones with mupdate override: false -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 + - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* zone updates waiting on zone add blockers > blueprint-diff latest @@ -1104,9 +1109,12 @@ chicken switches: add zones with mupdate override: false * noop converting 6/6 install-dataset zones to artifact store on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - current target release generation (3) is lower than minimum required by blueprint (4) + - sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 + - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* zone updates waiting on zone add blockers @@ -1393,9 +1401,11 @@ chicken switches: add zones with mupdate override: false * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - current target release generation (3) is lower than minimum required by blueprint (4) + - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* zone updates waiting on zone add blockers > blueprint-show latest @@ -1568,9 +1578,11 @@ chicken switches: add zones with mupdate override: false * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - current target release generation (3) is lower than minimum required by blueprint (4) + - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* zone updates waiting on zone add blockers @@ -1811,9 +1823,10 @@ chicken switches: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * noop converting 6/6 install-dataset zones to artifact store on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* zone updates waiting on zone add blockers > blueprint-show latest @@ -1987,9 +2000,10 @@ chicken switches: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * noop converting 6/6 install-dataset zones to artifact store on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* zone updates waiting on zone add blockers @@ -2962,9 +2976,11 @@ chicken switches: * skipping noop zone image source check on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b: all 0 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - current target release generation (4) is lower than minimum required by blueprint (5) + - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +* zone updates waiting on zone add blockers > blueprint-diff latest @@ -3191,11 +3207,13 @@ generated blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c based on parent bluepri planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: * skipping noop zone image source check on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b: all 0 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts -* MUPdate overrides exist -* adding zones despite MUPdate override, as specified by the `add_zones_with_mupdate_override` chicken switch +* zone adds and updates are blocked: + - current target release generation (4) is lower than minimum required by blueprint (5) + - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` chicken switch * discretionary zone placement waiting for NTP zones on sleds: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b * missing NTP zone on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b -* zone updates waiting on MUPdate overrides +* zone updates waiting on zone add blockers > blueprint-diff latest diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index e9b59689616..3bd4b1ddc65 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -183,9 +183,10 @@ chicken switches: * noop converting 6/6 install-dataset zones to artifact store on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * noop converting 5/6 install-dataset zones to artifact store on sled aff6c093-197d-42c5-ad80-9f10ba051a34 -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 +* zone updates waiting on zone add blockers @@ -549,9 +550,10 @@ chicken switches: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * noop converting 2/2 install-dataset zones to artifact store on sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e -* waiting on MUPdate overrides -* MUPdate overrides exist -* zone updates waiting on MUPdate overrides +* zone adds waiting on blockers +* zone adds and updates are blocked: + - sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 +* zone updates waiting on zone add blockers diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 505646d8458..0731ba5350e 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -174,8 +174,7 @@ impl<'a> Planner<'a> { let mut noop_info = NoopConvertInfo::new(self.input, self.inventory, &self.blueprint)?; - let plan_mupdate_override_res = - self.do_plan_mupdate_override(&mut noop_info)?; + let actions_by_sled = self.do_plan_mupdate_override(&mut noop_info)?; // Log noop-convert results after do_plan_mupdate_override, because this // step might alter noop_info. @@ -183,31 +182,32 @@ impl<'a> Planner<'a> { // Within `do_plan_noop_image_source`, we plan noop image sources on // sleds other than those currently affected by mupdate overrides. This - // means that we don't have to wait for the `plan_mupdate_override_res` - // result for that step. + // means that we don't have to consider anything + // `do_plan_mupdate_override` does for this step. let noop_image_source = self.do_plan_noop_image_source(noop_info)?; + let add_update_blocked_reasons = + self.should_plan_add_or_update(&actions_by_sled)?; + // Only plan MGS-based updates updates if there are no outstanding // MUPdate overrides. - let mgs_updates = if plan_mupdate_override_res.is_empty() { + let mgs_updates = if add_update_blocked_reasons.is_empty() { self.do_plan_mgs_updates()? } else { PlanningMgsUpdatesStepReport::new(PendingMgsUpdates::new()) }; // Likewise for zone additions, unless overridden with the chicken switch. - let has_mupdate_override = !plan_mupdate_override_res.is_empty(); let add_zones_with_mupdate_override = self.input.chicken_switches().add_zones_with_mupdate_override; - let mut add = - if !has_mupdate_override || add_zones_with_mupdate_override { - self.do_plan_add(&mgs_updates)? - } else { - PlanningAddStepReport::waiting_on( - ZoneAddWaitingOn::MupdateOverrides, - ) - }; - add.has_mupdate_override = has_mupdate_override; + let mut add = if add_update_blocked_reasons.is_empty() + || add_zones_with_mupdate_override + { + self.do_plan_add(&mgs_updates)? + } else { + PlanningAddStepReport::waiting_on(ZoneAddWaitingOn::Blockers) + }; + add.add_update_blocked_reasons = add_update_blocked_reasons; add.add_zones_with_mupdate_override = add_zones_with_mupdate_override; let zone_updates = if add.any_discretionary_zones_placed() { @@ -223,10 +223,10 @@ impl<'a> Planner<'a> { PlanningZoneUpdatesStepReport::waiting_on( ZoneUpdatesWaitingOn::PendingMgsUpdates, ) - } else if !plan_mupdate_override_res.is_empty() { - // ... or if there are pending MUPdate overrides. + } else if !add.add_update_blocked_reasons.is_empty() { + // ... or if there are pending zone add blockers. PlanningZoneUpdatesStepReport::waiting_on( - ZoneUpdatesWaitingOn::MupdateOverrides, + ZoneUpdatesWaitingOn::ZoneAddBlockers, ) } else { self.do_plan_zone_updates(&mgs_updates)? @@ -1528,10 +1528,12 @@ impl<'a> Planner<'a> { Ok(report) } + /// Perform planning for mupdate overrides, returning a map of sleds to + /// actions taken. fn do_plan_mupdate_override( &mut self, noop_info: &mut NoopConvertInfo, - ) -> Result, Error> { + ) -> Result, Error> { // For each sled, compare what's in the inventory to what's in the // blueprint. let mut actions_by_sled = BTreeMap::new(); @@ -1638,7 +1640,14 @@ impl<'a> Planner<'a> { } } - // Now we need to determine whether to also perform other actions like + Ok(actions_by_sled) + } + + fn should_plan_add_or_update( + &self, + actions_by_sled: &BTreeMap, + ) -> Result, Error> { + // We need to determine whether to also perform other actions like // updating or adding zones. We have to be careful here: // // * We may have moved existing zones with an Artifact source to using diff --git a/nexus/types/src/deployment/planning_report.rs b/nexus/types/src/deployment/planning_report.rs index 04d1fde21ee..1ed30442abc 100644 --- a/nexus/types/src/deployment/planning_report.rs +++ b/nexus/types/src/deployment/planning_report.rs @@ -12,6 +12,7 @@ use super::PendingMgsUpdates; use super::PlannerChickenSwitches; use daft::Diffable; +use indent_write::fmt::IndentWriter; use omicron_common::policy::COCKROACHDB_REDUNDANCY; use omicron_uuid_kinds::BlueprintUuid; use omicron_uuid_kinds::MupdateOverrideUuid; @@ -26,6 +27,7 @@ use serde::Serialize; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::fmt; +use std::fmt::Write; /// A full blueprint planning report. Other than the blueprint ID, each /// field corresponds to a step in the update planner, i.e., a subroutine @@ -531,14 +533,15 @@ pub struct PlanningAddSufficientZonesExist { )] #[serde(rename_all = "snake_case", tag = "type")] pub enum ZoneAddWaitingOn { - /// Waiting on one or more MUPdate overrides to clear. - MupdateOverrides, + /// Waiting on one or more blockers (typically MUPdate-related reasons) to + /// clear. + Blockers, } impl ZoneAddWaitingOn { pub fn as_str(&self) -> &'static str { match self { - Self::MupdateOverrides => "MUPdate overrides", + Self::Blockers => "blockers", } } } @@ -550,10 +553,14 @@ pub struct PlanningAddStepReport { /// What are we waiting on to start zone additions? pub waiting_on: Option, - /// Are there any outstanding MUPdate overrides? - pub has_mupdate_override: bool, + /// Reasons why zone adds and any updates are blocked. + /// + /// This is typically a list of MUPdate-related reasons. + pub add_update_blocked_reasons: Vec, - /// The value of the homonymous chicken switch. + /// The value of the homonymous chicken switch. (What this really means is + /// that zone adds happen despite being blocked by one or more + /// MUPdate-related reasons.) pub add_zones_with_mupdate_override: bool, pub sleds_without_ntp_zones_in_inventory: BTreeSet, @@ -580,7 +587,7 @@ impl PlanningAddStepReport { pub fn new() -> Self { Self { waiting_on: None, - has_mupdate_override: true, + add_update_blocked_reasons: Vec::new(), add_zones_with_mupdate_override: false, sleds_without_ntp_zones_in_inventory: BTreeSet::new(), sleds_without_zpools_for_ntp_zones: BTreeSet::new(), @@ -602,6 +609,7 @@ impl PlanningAddStepReport { pub fn is_empty(&self) -> bool { self.waiting_on.is_none() + && self.add_update_blocked_reasons.is_empty() && self.sleds_without_ntp_zones_in_inventory.is_empty() && self.sleds_without_zpools_for_ntp_zones.is_empty() && self.sleds_waiting_for_ntp_zone.is_empty() @@ -664,10 +672,10 @@ impl PlanningAddStepReport { } impl fmt::Display for PlanningAddStepReport { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, mut f: &mut fmt::Formatter) -> fmt::Result { let Self { waiting_on, - has_mupdate_override, + add_update_blocked_reasons, add_zones_with_mupdate_override, sleds_without_ntp_zones_in_inventory, sleds_without_zpools_for_ntp_zones, @@ -681,17 +689,27 @@ impl fmt::Display for PlanningAddStepReport { } = self; if let Some(waiting_on) = waiting_on { - writeln!(f, "* waiting on {}", waiting_on.as_str())?; + writeln!(f, "* zone adds waiting on {}", waiting_on.as_str())?; } - if *has_mupdate_override { - writeln!(f, "* MUPdate overrides exist")?; + if !add_update_blocked_reasons.is_empty() { + // If zone adds are blocked on a set of reasons, zone updates are + // blocked on the same reason. Make that clear by saying "zone adds + // and updates are blocked" rather than just "zone adds are + // blocked". + writeln!(f, "* zone adds and updates are blocked:")?; + for reason in add_update_blocked_reasons { + let mut indent_writer = + IndentWriter::new_skip_initial(" ", f); + writeln!(indent_writer, " - {}", reason)?; + f = indent_writer.into_inner(); + } } if *add_zones_with_mupdate_override { writeln!( f, - "* adding zones despite MUPdate override, \ + "* adding zones despite being blocked, \ as specified by the `add_zones_with_mupdate_override` \ chicken switch" )?; @@ -958,8 +976,8 @@ pub enum ZoneUpdatesWaitingOn { /// Waiting on updates to RoT / SP / Host OS / etc. PendingMgsUpdates, - /// Waiting on one or more MUPdate overrides to clear. - MupdateOverrides, + /// Waiting on the same set of blockers zone adds are waiting on. + ZoneAddBlockers, } impl ZoneUpdatesWaitingOn { @@ -969,7 +987,7 @@ impl ZoneUpdatesWaitingOn { Self::PendingMgsUpdates => { "pending MGS updates (RoT / SP / Host OS / etc.)" } - Self::MupdateOverrides => "MUPdate overrides", + Self::ZoneAddBlockers => "zone add blockers", } } } diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 82331b85632..0bc5543a0b8 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -6573,8 +6573,15 @@ "PlanningAddStepReport": { "type": "object", "properties": { + "add_update_blocked_reasons": { + "description": "Reasons why zone adds and any updates are blocked.\n\nThis is typically a list of MUPdate-related reasons.", + "type": "array", + "items": { + "type": "string" + } + }, "add_zones_with_mupdate_override": { - "description": "The value of the homonymous chicken switch.", + "description": "The value of the homonymous chicken switch. (What this really means is that zone adds happen despite being blocked by one or more MUPdate-related reasons.)", "type": "boolean" }, "discretionary_zones_placed": { @@ -6587,10 +6594,6 @@ } } }, - "has_mupdate_override": { - "description": "Are there any outstanding MUPdate overrides?", - "type": "boolean" - }, "out_of_eligible_sleds": { "description": "Discretionary zone kind → (placed, wanted to place)", "type": "object", @@ -6660,9 +6663,9 @@ } }, "required": [ + "add_update_blocked_reasons", "add_zones_with_mupdate_override", "discretionary_zones_placed", - "has_mupdate_override", "out_of_eligible_sleds", "sleds_getting_ntp_and_discretionary_zones", "sleds_missing_crucible_zone", @@ -9122,13 +9125,13 @@ "ZoneAddWaitingOn": { "oneOf": [ { - "description": "Waiting on one or more MUPdate overrides to clear.", + "description": "Waiting on one or more blockers (typically MUPdate-related reasons) to clear.", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "mupdate_overrides" + "blockers" ] } }, @@ -9264,13 +9267,13 @@ ] }, { - "description": "Waiting on one or more MUPdate overrides to clear.", + "description": "Waiting on the same set of blockers zone adds are waiting on.", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "mupdate_overrides" + "zone_add_blockers" ] } }, From 347bf8d7112a5abd88e70160883ad5d323a73efb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Wed, 27 Aug 2025 14:12:46 -0700 Subject: [PATCH 03/38] [mgs-updates] Ensure boot info is available after an RoT reset (#8905) By waiting for boot info to be available, a `sprot: timeout` error will be near impossible. --- nexus/mgs-updates/src/driver_update.rs | 7 +- .../mgs-updates/src/rot_bootloader_updater.rs | 134 ++++++------------ nexus/mgs-updates/src/rot_updater.rs | 109 +++++++++++++- 3 files changed, 158 insertions(+), 92 deletions(-) diff --git a/nexus/mgs-updates/src/driver_update.rs b/nexus/mgs-updates/src/driver_update.rs index 02dd3b4500a..554ea3f3e16 100644 --- a/nexus/mgs-updates/src/driver_update.rs +++ b/nexus/mgs-updates/src/driver_update.rs @@ -667,13 +667,14 @@ fn post_update_timeout(update: &PendingMgsUpdate) -> Duration { } } PendingMgsUpdateDetails::Rot { .. } => { - // Resetting the RoT should be quick (a few seconds). - Duration::from_secs(60) + // Resetting the RoT should be quick (a few seconds), but we wait + // for boot info after the reset. + Duration::from_secs(90) } PendingMgsUpdateDetails::RotBootloader { .. } => { // Resetting the bootloader requires multiple RoT resets; give this // a longer timeout. - Duration::from_secs(180) + Duration::from_secs(210) } PendingMgsUpdateDetails::HostPhase1(..) => { // Resetting a sled takes several minutes (mostly DRAM training); diff --git a/nexus/mgs-updates/src/rot_bootloader_updater.rs b/nexus/mgs-updates/src/rot_bootloader_updater.rs index 72dc57362e6..6a87fc3a508 100644 --- a/nexus/mgs-updates/src/rot_bootloader_updater.rs +++ b/nexus/mgs-updates/src/rot_bootloader_updater.rs @@ -12,26 +12,21 @@ use crate::common_sp_update::PrecheckError; use crate::common_sp_update::PrecheckStatus; use crate::common_sp_update::error_means_caboose_is_invalid; use crate::mgs_clients::GatewayClientError; +use crate::rot_updater::WAIT_FOR_BOOT_INFO_TIMEOUT; +use crate::rot_updater::wait_for_boot_info; use futures::FutureExt; use futures::future::BoxFuture; use gateway_client::SpComponent; -use gateway_client::types::GetRotBootInfoParams; use gateway_client::types::RotImageError; use gateway_client::types::RotState; use gateway_client::types::SpComponentFirmwareSlot; use gateway_client::types::SpType; -use gateway_messages::RotBootInfo; use nexus_types::deployment::PendingMgsUpdate; use nexus_types::deployment::PendingMgsUpdateRotBootloaderDetails; use slog::Logger; -use slog::{debug, error, info}; +use slog::{debug, error}; use slog_error_chain::InlineErrorChain; use std::time::Duration; -use std::time::Instant; - -const WAIT_FOR_BOOT_INFO_TIMEOUT: Duration = Duration::from_secs(120); - -const WAIT_FOR_BOOT_INFO_INTERVAL: Duration = Duration::from_secs(10); pub struct ReconfiguratorRotBootloaderUpdater { details: PendingMgsUpdateRotBootloaderDetails, @@ -193,10 +188,6 @@ impl SpComponentUpdateHelperImpl for ReconfiguratorRotBootloaderUpdater { // We now retrieve boot info from the RoT to verify the reset // has completed and signature checks done. - debug!( - log, - "attempting to retrieve boot info to verify image validity" - ); let stage0next_error = wait_for_stage0_next_image_check( log, mgs_clients, @@ -252,7 +243,10 @@ impl SpComponentUpdateHelperImpl for ReconfiguratorRotBootloaderUpdater { }) .await?; - debug!(log, "attempting to reset device to set to new RoT bootloader version"); + debug!( + log, + "attempting to reset the device to set a new RoT bootloader version", + ); mgs_clients .try_all_serially(log, move |mgs_client| async move { mgs_client @@ -265,6 +259,15 @@ impl SpComponentUpdateHelperImpl for ReconfiguratorRotBootloaderUpdater { }) .await?; + // We wait for boot info to ensure a successful reset + wait_for_boot_info( + log, + mgs_clients, + update.sp_type, + update.slot_id, + WAIT_FOR_BOOT_INFO_TIMEOUT, + ) + .await?; Ok(()) } .boxed() @@ -272,7 +275,7 @@ impl SpComponentUpdateHelperImpl for ReconfiguratorRotBootloaderUpdater { } /// Poll the RoT asking for its boot information. This is used to check -/// state after RoT bootloader updates +/// the state for RoT bootloader image errors after RoT is reset async fn wait_for_stage0_next_image_check( log: &Logger, mgs_clients: &mut MgsClients, @@ -280,81 +283,38 @@ async fn wait_for_stage0_next_image_check( sp_slot: u16, timeout: Duration, ) -> Result, PostUpdateError> { - let before = Instant::now(); - loop { - match mgs_clients - .try_all_serially(log, |mgs_client| async move { - mgs_client - .sp_rot_boot_info( - sp_type, - sp_slot, - SpComponent::ROT.const_as_str(), - &GetRotBootInfoParams { - version: RotBootInfo::HIGHEST_KNOWN_VERSION, - }, - ) - .await - }) - .await - { - Ok(state) => match state.into_inner() { - // The minimum we will ever return is v3. - // Additionally, V2 does not report image errors, so we cannot - // know with certainty if a signature check came back with errors - RotState::V2 { .. } => { - let error = "unexpected RoT version: 2".to_string(); - error!( - log, - "failed to get RoT boot info"; - "error" => &error - ); - return Err(PostUpdateError::FatalError { error }); - } - RotState::V3 { stage0next_error, .. } => { - return Ok(stage0next_error); - } - // The RoT is probably still booting - RotState::CommunicationFailed { message } => { - if before.elapsed() >= timeout { - error!( - log, - "failed to get RoT boot info"; - "error" => %message - ); - return Err(PostUpdateError::FatalError { - error: message, - }); - } - - info!( - log, - "failed getting RoT boot info (will retry)"; - "error" => %message, - ); - tokio::time::sleep(WAIT_FOR_BOOT_INFO_INTERVAL).await; - } - }, - // The RoT might still be booting - Err(error) => { - let e = InlineErrorChain::new(&error); - if before.elapsed() >= timeout { - error!( - log, - "failed to get RoT boot info"; - &e, - ); - return Err(PostUpdateError::FatalError { - error: e.to_string(), - }); - } - - info!( + debug!(log, "attempting to verify image validity"); + match wait_for_boot_info(log, mgs_clients, sp_type, sp_slot, timeout).await + { + Ok(state) => match state { + // The minimum we will ever return is v3. + // Additionally, V2 does not report image errors, so we cannot + // know with certainty if a signature check came back with errors + RotState::V2 { .. } => { + let error = "unexpected RoT version: 2".to_string(); + error!( log, - "failed getting RoT boot info (will retry)"; - e, + "failed to get RoT boot info"; + "error" => &error ); - tokio::time::sleep(WAIT_FOR_BOOT_INFO_INTERVAL).await; + return Err(PostUpdateError::FatalError { error }); } - } + RotState::V3 { stage0next_error, .. } => { + debug!(log, "successfully completed an image signature check"); + return Ok(stage0next_error); + } + // This is unreachable because wait_for_boot_info loops for some + // time if it encounters `CommunicationFailed`, and if it hits the + // timeout, it will return an error. + RotState::CommunicationFailed { message } => { + error!( + log, + "failed to get RoT boot info"; + "error" => %message + ); + return Err(PostUpdateError::FatalError { error: message }); + } + }, + Err(error) => return Err(error), } } diff --git a/nexus/mgs-updates/src/rot_updater.rs b/nexus/mgs-updates/src/rot_updater.rs index f784d4b2633..7bf2ffd66c6 100644 --- a/nexus/mgs-updates/src/rot_updater.rs +++ b/nexus/mgs-updates/src/rot_updater.rs @@ -14,11 +14,22 @@ use crate::common_sp_update::error_means_caboose_is_invalid; use futures::FutureExt; use futures::future::BoxFuture; use gateway_client::SpComponent; +use gateway_client::types::GetRotBootInfoParams; use gateway_client::types::RotState; use gateway_client::types::SpComponentFirmwareSlot; +use gateway_client::types::SpType; +use gateway_messages::RotBootInfo; use nexus_types::deployment::PendingMgsUpdate; use nexus_types::deployment::PendingMgsUpdateRotDetails; -use slog::{debug, info}; +use slog::Logger; +use slog::{debug, error, info}; +use slog_error_chain::InlineErrorChain; +use std::time::Duration; +use std::time::Instant; + +pub const WAIT_FOR_BOOT_INFO_TIMEOUT: Duration = Duration::from_secs(120); + +const WAIT_FOR_BOOT_INFO_INTERVAL: Duration = Duration::from_secs(10); type GatewayClientError = gateway_client::Error; @@ -223,7 +234,10 @@ impl SpComponentUpdateHelperImpl for ReconfiguratorRotUpdater { }) .await?; - debug!(log, "attempting to reset device"); + debug!( + log, + "attempting to reset the device to set a new RoT version" + ); mgs_clients .try_all_serially(log, move |mgs_client| async move { mgs_client @@ -235,8 +249,99 @@ impl SpComponentUpdateHelperImpl for ReconfiguratorRotUpdater { .await }) .await?; + + // We wait for boot info to ensure a successful reset + wait_for_boot_info( + log, + mgs_clients, + update.sp_type, + update.slot_id, + WAIT_FOR_BOOT_INFO_TIMEOUT, + ) + .await?; Ok(()) } .boxed() } } + +/// Poll the RoT asking for its boot information. This confirms that the RoT has +/// been succesfully reset +pub async fn wait_for_boot_info( + log: &Logger, + mgs_clients: &mut MgsClients, + sp_type: SpType, + sp_slot: u16, + timeout: Duration, +) -> Result { + let before = Instant::now(); + loop { + debug!(log, "waiting for boot info to confirm a successful reset"); + match mgs_clients + .try_all_serially(log, |mgs_client| async move { + mgs_client + .sp_rot_boot_info( + sp_type, + sp_slot, + SpComponent::ROT.const_as_str(), + &GetRotBootInfoParams { + version: RotBootInfo::HIGHEST_KNOWN_VERSION, + }, + ) + .await + }) + .await + { + Ok(state) => match state.clone() { + // The minimum we will ever return is v3. + // Additionally, V2 does not report image errors, so we cannot + // know with certainty if a signature check came back with errors + RotState::V2 { .. } | RotState::V3 { .. } => { + debug!(log, "successfuly retrieved boot info"); + return Ok(state.into_inner()); + } + // The RoT is probably still booting + RotState::CommunicationFailed { message } => { + if before.elapsed() >= timeout { + error!( + log, + "failed to get RoT boot info"; + "error" => %message + ); + return Err(PostUpdateError::FatalError { + error: message, + }); + } + + info!( + log, + "failed getting RoT boot info (will retry)"; + "error" => %message, + ); + tokio::time::sleep(WAIT_FOR_BOOT_INFO_INTERVAL).await; + } + }, + // The RoT might still be booting + Err(error) => { + let e = InlineErrorChain::new(&error); + if before.elapsed() >= timeout { + error!( + log, + "failed to get RoT boot info"; + &e, + ); + return Err(PostUpdateError::FatalError { + error: e.to_string(), + }); + } + + info!( + log, + "failed getting RoT boot info (will retry)"; + e, + ); + tokio::time::sleep(WAIT_FOR_BOOT_INFO_INTERVAL).await; + } + } + } +} From d4df3f791c5201501f97b7d021bd6106f486a59c Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 27 Aug 2025 19:09:56 -0400 Subject: [PATCH 04/38] TQ: Add support for alarms in the protocol (#8753) This builds on #8741 An alarm represents a protocol invariant violation. It's unclear exactly what should be done about these other than recording them and allowing them to be reported upstack, which is what is done in this PR. An argument could be made for "freezing" the state machine such that trust quorum nodes stop working and the only thing they can do is report alarm status. However, that would block the trust quorum from operating at all, and it's unclear if this should cause an outage on that node. I'm also somewhat hesitant to put the alarms into the persistent state as that would prevent unlock in the case of a sled/rack reboot. On the flip side of just recording is the possible danger resulting from operating with an invariant violation. This could potentially be risky, and since we shouldn't ever see these maybe pausing for a support call is the right thing. TBD, once more work is done on the protocol. --- trust-quorum/gfss/src/shamir.rs | 12 ++- trust-quorum/src/alarm.rs | 36 +++++++++ trust-quorum/src/compute_key_share.rs | 36 ++++++--- trust-quorum/src/configuration.rs | 4 +- trust-quorum/src/lib.rs | 2 + trust-quorum/src/node.rs | 76 +++++++++++++------ trust-quorum/src/node_ctx.rs | 20 ++++- .../tests/cluster.proptest-regressions | 7 ++ trust-quorum/tests/cluster.rs | 15 ++++ 9 files changed, 171 insertions(+), 37 deletions(-) create mode 100644 trust-quorum/src/alarm.rs create mode 100644 trust-quorum/tests/cluster.proptest-regressions diff --git a/trust-quorum/gfss/src/shamir.rs b/trust-quorum/gfss/src/shamir.rs index 091e2040c91..2da11b83bad 100644 --- a/trust-quorum/gfss/src/shamir.rs +++ b/trust-quorum/gfss/src/shamir.rs @@ -23,7 +23,17 @@ pub enum SplitError { TooFewTotalShares { n: u8, k: u8 }, } -#[derive(Debug, Clone, thiserror::Error, PartialEq, Eq)] +#[derive( + Debug, + Clone, + thiserror::Error, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, +)] pub enum CombineError { #[error("must be at least 2 shares to combine")] TooFewShares, diff --git a/trust-quorum/src/alarm.rs b/trust-quorum/src/alarm.rs new file mode 100644 index 00000000000..0580c024d34 --- /dev/null +++ b/trust-quorum/src/alarm.rs @@ -0,0 +1,36 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Mechanism for reporting protocol invariant violations + +use serde::{Deserialize, Serialize}; + +use crate::{Configuration, Epoch, PlatformId}; + +#[allow(clippy::large_enum_variant)] +#[derive( + Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, +)] +pub enum Alarm { + /// Different configurations found for the same epoch + /// + /// Reason: Nexus creates configurations and stores them in CRDB before + /// sending them to a coordinator of its choosing. Nexus will not send the + /// same reconfiguration request to different coordinators. If it does those + /// coordinators will generate different key shares. However, since Nexus + /// will not tell different nodes to coordinate the same configuration, this + /// state should be impossible to reach. + MismatchedConfigurations { + config1: Configuration, + config2: Configuration, + from: PlatformId, + }, + + /// The `keyShareComputer` could not compute this node's share + /// + /// Reason: A threshold of valid key shares were received based on the the + /// share digests in the Configuration. However, computation of the share + /// still failed. This should be impossible. + ShareComputationFailed { epoch: Epoch, err: gfss::shamir::CombineError }, +} diff --git a/trust-quorum/src/compute_key_share.rs b/trust-quorum/src/compute_key_share.rs index 2bee03abbea..8cc780f752e 100644 --- a/trust-quorum/src/compute_key_share.rs +++ b/trust-quorum/src/compute_key_share.rs @@ -9,7 +9,9 @@ //! other nodes so that it can compute its own key share. use crate::crypto::Sha3_256Digest; -use crate::{Configuration, Epoch, NodeHandlerCtx, PeerMsgKind, PlatformId}; +use crate::{ + Alarm, Configuration, Epoch, NodeHandlerCtx, PeerMsgKind, PlatformId, +}; use gfss::gf256::Gf256; use gfss::shamir::{self, Share}; use slog::{Logger, error, o, warn}; @@ -101,6 +103,7 @@ impl KeyShareComputer { "epoch" => %epoch, "from" => %from ); + return false; } // A valid share was received. Is it new? @@ -116,12 +119,23 @@ impl KeyShareComputer { // What index are we in the configuration? This is our "x-coordinate" // for our key share calculation. We always start indexing from 1, since // 0 is the rack secret. - let index = self - .config - .members - .keys() - .position(|id| id == ctx.platform_id()) - .expect("node exists"); + let index = + self.config.members.keys().position(|id| id == ctx.platform_id()); + + let Some(index) = index else { + let msg = concat!( + "Failed to get index for ourselves in current configuration. ", + "We are not a member, and must have been expunged." + ); + error!( + self.log, + "{msg}"; + "platform_id" => %ctx.platform_id(), + "config" => ?self.config + ); + return false; + }; + let x_coordinate = Gf256::new(u8::try_from(index + 1).expect("index fits in u8")); @@ -137,11 +151,9 @@ impl KeyShareComputer { }); true } - Err(e) => { - // TODO: put the node into into an `Alarm` state similar to - // https://github.com/oxidecomputer/omicron/pull/8062 once we - // have alarms? - error!(self.log, "Failed to compute share: {}", e); + Err(err) => { + error!(self.log, "Failed to compute share: {}", err); + ctx.raise_alarm(Alarm::ShareComputationFailed { epoch, err }); false } } diff --git a/trust-quorum/src/configuration.rs b/trust-quorum/src/configuration.rs index 23b4f6f3c8d..a6057c62ed1 100644 --- a/trust-quorum/src/configuration.rs +++ b/trust-quorum/src/configuration.rs @@ -30,7 +30,9 @@ pub enum ConfigurationError { /// The configuration for a given epoch. /// /// Only valid for non-lrtq configurations -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive( + Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, +)] pub struct Configuration { /// Unique Id of the rack pub rack_id: RackUuid, diff --git a/trust-quorum/src/lib.rs b/trust-quorum/src/lib.rs index 39418714296..8bb8d8de5d3 100644 --- a/trust-quorum/src/lib.rs +++ b/trust-quorum/src/lib.rs @@ -23,7 +23,9 @@ mod persistent_state; mod validators; pub use configuration::Configuration; pub use coordinator_state::{CoordinatorOperation, CoordinatorState}; +mod alarm; +pub use alarm::Alarm; pub use crypto::RackSecret; pub use messages::*; pub use node::Node; diff --git a/trust-quorum/src/node.rs b/trust-quorum/src/node.rs index 72d38a5b674..a6613f9062f 100644 --- a/trust-quorum/src/node.rs +++ b/trust-quorum/src/node.rs @@ -20,7 +20,7 @@ use crate::validators::{ MismatchedRackIdError, ReconfigurationError, ValidatedReconfigureMsg, }; use crate::{ - Configuration, CoordinatorState, Epoch, NodeHandlerCtx, PlatformId, + Alarm, Configuration, CoordinatorState, Epoch, NodeHandlerCtx, PlatformId, messages::*, }; use gfss::shamir::Share; @@ -308,28 +308,33 @@ impl Node { "epoch" => %config.epoch ); ctx.update_persistent_state(|ps| ps.commits.insert(config.epoch)); + return; } // Do we have the configuration in our persistent state? If not save it. - ctx.update_persistent_state(|ps| { - if let Err(e) = ps.configs.insert_unique(config.clone()) { - let existing = - e.duplicates().first().expect("duplicate exists"); - if *existing != &config { - error!( - self.log, - "Received a configuration mismatch"; - "from" => %from, - "existing_config" => #?existing, - "received_config" => #?config - ); - // TODO: Alarm - } - false - } else { - true + if let Some(existing) = + ctx.persistent_state().configuration(config.epoch) + { + if existing != &config { + error!( + self.log, + "Received a configuration mismatch"; + "from" => %from, + "existing_config" => #?existing, + "received_config" => #?config + ); + ctx.raise_alarm(Alarm::MismatchedConfigurations { + config1: (*existing).clone(), + config2: config.clone(), + from: from.clone(), + }); } - }); + } else { + ctx.update_persistent_state(|ps| { + ps.configs.insert_unique(config.clone()).expect("new config"); + true + }); + } // Are we coordinating for an older epoch? If so, cancel. if let Some(cs) = &self.coordinator_state { @@ -343,14 +348,14 @@ impl Node { "received_epoch" => %config.epoch ); self.coordinator_state = None; + // Intentionally fall through } else if coordinating_epoch == config.epoch { - error!( + info!( self.log, "Received CommitAdvance while coordinating for same epoch!"; "from" => %from, "epoch" => %config.epoch ); - // TODO: Alarm return; } else { info!( @@ -399,7 +404,8 @@ impl Node { } } - // We either were collectiong shares for an old epoch or haven't started yet. + // We either were collectiong shares for an old epoch or haven't started + // yet. self.key_share_computer = Some(KeyShareComputer::new(&self.log, ctx, config)); } @@ -414,6 +420,18 @@ impl Node { ctx.persistent_state().latest_committed_configuration() { if latest_committed_config.epoch > epoch { + if !latest_committed_config.members.contains_key(&from) { + info!( + self.log, + "Received a GetShare message from expunged node"; + "from" => %from, + "latest_committed_epoch" => + %latest_committed_config.epoch, + "requested_epoch" => %epoch + ); + // TODO: Send an expunged message + return; + } info!( self.log, concat!( @@ -432,6 +450,20 @@ impl Node { } } + // Do we have the configuration? Is the requesting peer a member? + if let Some(config) = ctx.persistent_state().configuration(epoch) { + if !config.members.contains_key(&from) { + info!( + self.log, + "Received a GetShare message from expunged node"; + "from" => %from, + "epoch" => %epoch + ); + // TODO: Send an expunged message + return; + } + } + // If we have the share for the requested epoch, we always return it. We // know that it is at least as new as the last committed epoch. We might // not have learned about the configuration being committed yet, but diff --git a/trust-quorum/src/node_ctx.rs b/trust-quorum/src/node_ctx.rs index 30cfe15b174..e3a4f7fed32 100644 --- a/trust-quorum/src/node_ctx.rs +++ b/trust-quorum/src/node_ctx.rs @@ -4,7 +4,9 @@ //! Parameter to Node API calls that allows interaction with the system at large -use crate::{Envelope, PeerMsg, PeerMsgKind, PersistentState, PlatformId}; +use crate::{ + Alarm, Envelope, PeerMsg, PeerMsgKind, PersistentState, PlatformId, +}; use std::collections::BTreeSet; /// An API shared by [`NodeCallerCtx`] and [`NodeHandlerCtx`] @@ -12,6 +14,7 @@ pub trait NodeCommonCtx { fn platform_id(&self) -> &PlatformId; fn persistent_state(&self) -> &PersistentState; fn connected(&self) -> &BTreeSet; + fn alarms(&self) -> &BTreeSet; } /// An API for an [`NodeCtx`] usable from a [`crate::Node`] @@ -54,6 +57,9 @@ pub trait NodeHandlerCtx: NodeCommonCtx { /// Remove a peer from the connected set fn remove_connection(&mut self, id: &PlatformId); + + /// Record (in-memory) that an alarm has occurred + fn raise_alarm(&mut self, alarm: Alarm); } /// Common parameter to [`crate::Node`] methods @@ -79,6 +85,9 @@ pub struct NodeCtx { /// Connected peer nodes connected: BTreeSet, + + /// Any alarms that have occurred + alarms: BTreeSet, } impl NodeCtx { @@ -89,6 +98,7 @@ impl NodeCtx { persistent_state_changed: false, outgoing: Vec::new(), connected: BTreeSet::new(), + alarms: BTreeSet::new(), } } } @@ -105,6 +115,10 @@ impl NodeCommonCtx for NodeCtx { fn connected(&self) -> &BTreeSet { &self.connected } + + fn alarms(&self) -> &BTreeSet { + &self.alarms + } } impl NodeHandlerCtx for NodeCtx { @@ -138,6 +152,10 @@ impl NodeHandlerCtx for NodeCtx { fn remove_connection(&mut self, id: &PlatformId) { self.connected.remove(id); } + + fn raise_alarm(&mut self, alarm: Alarm) { + self.alarms.insert(alarm); + } } impl NodeCallerCtx for NodeCtx { diff --git a/trust-quorum/tests/cluster.proptest-regressions b/trust-quorum/tests/cluster.proptest-regressions new file mode 100644 index 00000000000..bb7577eca65 --- /dev/null +++ b/trust-quorum/tests/cluster.proptest-regressions @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc aa229dbfa63db6ed21d624e6c474f3591be97204fc051fe2b7b6d1af78409532 # shrinks to input = _TestTrustQuorumProtocolArgs { input: TestInput { initial_config: GeneratedConfiguration { members: {2, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 21}, threshold: Index(0) }, initial_down_nodes: {}, actions: [DeliverEnvelopes([Index(14493870343628933413), Index(0), Index(2635249153387079100), Index(7659274142974149207), Index(10538101999560721780), Index(9052169730812637246), Index(4315275857016173272), Index(1740015673248333392), Index(13987162340212539423)]), DeliverEnvelopes([Index(9041798398182823156), Index(13868876044032355650)]), DeliverEnvelopes([Index(3011348768397116341), Index(8660180482978775225), Index(9182847551331361735), Index(16121357191531813301), Index(3788564878545978377), Index(17393195961508650460), Index(3346167418243659948), Index(3573581286569374339), Index(13250013466448051467), Index(4820013139542767201), Index(8039736122411843390), Index(17013067520142601277), Index(16122004323767661676), Index(17569103901503114628), Index(1932949711347229750), Index(4319513761709523737), Index(4584565250890182062), Index(1279075436995347802), Index(17062300948886540969)]), PollPrepareAcks, Commit([Index(17389235045933586108), Index(8560515657089253499), Index(10855801671237462872), Index(13083943140650726535), Index(6596360928255073579), Index(12912936435579810120), Index(13555346217578137976), Index(1669092046242131079)]), DeliverNexusReplies(6), Reconfigure { num_added_nodes: 3, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(1703742760083928913), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Reconfigure { num_added_nodes: 0, removed_nodes: [], threshold: Index(5259489828371974487), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(14446131236589207074), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelopes([Index(2073236698708178332)]), DeliverEnvelopes([Index(3453196791926166099), Index(568387599084033335), Index(3409840665184802609), Index(12286030226091071672), Index(7286708483759199339), Index(2055658592347030945), Index(1173213978658236675), Index(8649978366863725550)]), Reconfigure { num_added_nodes: 2, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(9996579049375553061), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Reconfigure { num_added_nodes: 2, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(17841859598948908161), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Reconfigure { num_added_nodes: 1, removed_nodes: [], threshold: Index(8689652795643676030), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelopes([Index(13933299526419321128), Index(5225873483252317482), Index(5244472839316863196), Index(15065206300363858421), Index(1331991639281083654), Index(9969299029824142306), Index(10418763353833126365)]), Reconfigure { num_added_nodes: 2, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(17441105865747581233), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelopes([Index(9629417235951452701), Index(17699594351241716570), Index(6446358179225870872), Index(10477494232845674894), Index(226357282448375442), Index(17511469032238010586), Index(5868458999694820988), Index(9008597573607186863), Index(14889469521169823547), Index(16216562580497901897), Index(10976472819558126038), Index(8297592251652111615)]), DeliverEnvelopes([Index(6225179597717370182), Index(3456377812674314165), Index(4870110218414913904), Index(12933525926916895630), Index(5251263308628043457), Index(14960131063828769882), Index(954293712144926568), Index(15613441814815191176), Index(7681056590698508795), Index(3497512649074481205)]), DeliverEnvelopes([Index(5458890086829541581), Index(4300690563898456475), Index(469725690344733484), Index(3627582562296635119), Index(13485721932604488035), Index(838877244787605597), Index(17616669406954758185), Index(10912008557925580957), Index(2064843088888151834), Index(13592137105530054033), Index(464925174084213053), Index(12477077518224200179), Index(7062021569203679845)]), DeliverEnvelopes([Index(15629509651933908728), Index(7593448817552421286), Index(6069697258859901799), Index(7447724974852314797), Index(6976391078682036797), Index(15954111707567866812), Index(9739269646592130920), Index(7840844524173616953), Index(17251031726274733868), Index(3236370306236870045), Index(10198745931145551826), Index(11526777721718675946), Index(7855592439162647423), Index(17024415010144483949), Index(4229155328323504098), Index(5472111026499192469)]), DeliverEnvelopes([Index(4411786472941543389), Index(2352452285251361371), Index(7822068502251264399), Index(10423488690252370246), Index(6800625973591759468), Index(14891503574606637490), Index(4929072166198014740), Index(15521754316309705239), Index(8420972048384534331), Index(14271222805435196129), Index(15731413232976718648), Index(7620446100066998584), Index(16071224481319847817), Index(3144698519065113997), Index(2809650099793229888), Index(5232005717265660268), Index(7349929989771864370), Index(5385953262323237136), Index(5995048832340625664)]), DeliverEnvelopes([Index(17995955460911334433), Index(4708641667603374303), Index(7732664601632819026), Index(17423877575181254121), Index(13237365779440508178), Index(16219013419076737978)]), DeliverEnvelopes([Index(8304946145816664660), Index(862057291908146042), Index(15725440490586181656), Index(16995826471578986413), Index(2215118704959889028), Index(5868186664753940774), Index(8206076788005089132), Index(5701603538374868810), Index(7882633833049924931), Index(14920895499854492495), Index(9744657067126826847), Index(17217199825680194648)]), DeliverEnvelopes([Index(8037762189481305143), Index(7161454571813702078), Index(16624697899890776024), Index(6776753215496123550), Index(3552663706596513637), Index(13168137737732814449), Index(17773039112791415335), Index(5357529117839942678), Index(11360548594432633510), Index(17152936725964058392), Index(3735622760167456433), Index(3729434446292991499), Index(8239146570926600189), Index(17213753850553118615), Index(3586505624037292033), Index(5676170410763085337), Index(7854336347370863376), Index(3296193835517592123)]), DeliverEnvelopes([Index(7450515981053560543), Index(7281049720524507073), Index(11805621530410984780), Index(14212481354680952709), Index(11677201894475474096), Index(15579047847775346789), Index(8933612206363673270), Index(14589715969816048496), Index(9146028479816005688), Index(15678520830606285653), Index(13646715682213052692), Index(16542748546042997802), Index(3018629145641526806), Index(3279754668523137850), Index(9190080933222987746)]), DeliverEnvelopes([Index(4442754352407199000), Index(6726639451905550261), Index(276325053358083205), Index(8309639208446119622), Index(17071310980996705540), Index(13681920258353201127), Index(6824753331681591316), Index(9564449636752909493), Index(11781421339321328484), Index(17060651525797945236), Index(6256326521108731728), Index(882364767508101600), Index(18155289916698303405), Index(3604801106996280475), Index(18030768360505361161), Index(10828708533993573006), Index(10140811484372325170)]), DeliverEnvelopes([Index(14675779565613765883), Index(7875237876508366946), Index(1080598541644522468), Index(9795208880339871790), Index(16788173384975729322), Index(7862287353069942075), Index(18287036425108952119), Index(438554228774108801), Index(16936278258143324901), Index(17297858047970493225), Index(16280794788037943206)]), DeliverEnvelopes([Index(14318968306187897375), Index(15260180391589155604), Index(15065168303490293135), Index(16283608916485249870), Index(13275033248234956364), Index(2830029696975378334), Index(10826126602511348625), Index(3503489386636778134), Index(4143173347019462961), Index(14194382779242307605), Index(6222922790114912969), Index(4040121466287304927), Index(9113628229529655615), Index(5415084019849945730), Index(1626553538397648371), Index(15501687776809538824)]), DeliverEnvelopes([Index(5723406985380791968), Index(1155733174009260305), Index(7032746625126254072), Index(4743835759260085268), Index(7266303314157497289), Index(9385508013014315790), Index(17143596343958976419), Index(12019102422355865994), Index(17164688997776033771), Index(5601248314369099348), Index(8909509441657159134), Index(434646333244277371), Index(2059402500670808177), Index(10728320671205025035), Index(10839709495944012941), Index(13074991387519599432), Index(16684428541064080728), Index(1516391557911492208)]), PollPrepareAcks, Commit([Index(10338703738028056944), Index(18153730223737876722), Index(7878439710336920970), Index(16934827903768804991)]), DeliverNexusReplies(7), DeliverEnvelopes([Index(12043925503899430601), Index(13756482947028862137), Index(16512953710007648247), Index(17789811749467591337), Index(10320232143017481771), Index(1459556606276719356), Index(845781064116242142), Index(1956519909910251783), Index(3164835970372175816), Index(14120105518532170713), Index(10524629999594340195), Index(2646948465709084646), Index(15782024841931074195), Index(4857968548620380892), Index(10321553606781845802), Index(2234980294207553151), Index(14471054211349921145)]), DeliverEnvelopes([Index(3767742994858916851), Index(5436520998904493278), Index(10088257053026246598), Index(1498263247328214760), Index(14106058185302211861), Index(6061739775064231445), Index(2023011050214169455), Index(7250794870201811289), Index(17600053360168057888), Index(1740642731188709568), Index(2663072436983654497), Index(809252200146375679), Index(1869788575371162759), Index(4781549348184833023), Index(3523664822500907563)]), Commit([Index(14652573856526224379), Index(5781738428227077912), Index(12854715630867365273), Index(1911123227362573689)]), DeliverEnvelopes([Index(5483103026978400083), Index(14279025710451447544), Index(4937116848351103172), Index(16927987341169693780), Index(9896521330207311715)]), DeliverEnvelopes([Index(16187414459998690745), Index(65365848735262627), Index(16029309594081455593), Index(8738644556875519057), Index(8210105897167225670), Index(15476129597523288716), Index(5183379879833502204), Index(11717624468492776210), Index(15229030068062309892), Index(11851728872298706575), Index(10702820350636537827), Index(2687836824532707096), Index(2996236953690420395)]), Commit([Index(10707814609213298068), Index(12699776013897419563)]), Commit([Index(10157815118387593249), Index(5123540499752415756), Index(15808738534837068446)]), DeliverEnvelopes([Index(1541961047234018357), Index(1525243190754021203), Index(12707227425511543885), Index(1363732177719220534), Index(6262127490926569596), Index(15087875812043424072), Index(7757884893278869289), Index(11575998173089334269), Index(7651957407489583015), Index(384152082221813754), Index(13402749590934510602), Index(9025488436292943022)]), Commit([Index(2522165368668848218), Index(13296047016915538553), Index(15622931071258404786), Index(3114595942384622948), Index(12412617090084599671), Index(11664985786937734966)]), PollPrepareAcks, Commit([Index(13180524526118109937), Index(17193218842005402908), Index(17790813705747893301), Index(739262928517866423)]), PollPrepareAcks, PollPrepareAcks, DeliverNexusReplies(3), DeliverEnvelopes([Index(652695315058809065), Index(727847806737003567), Index(5404780742271511029), Index(16694185347801040155), Index(4098481543012673009), Index(7791187420263002075)]), Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(199832759506365010), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelopes([Index(9168374719730985799), Index(10794102811332111465), Index(17028470677432611953)]), DeliverNexusReplies(3), Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(18233548140847603442), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverNexusReplies(7), DeliverNexusReplies(6), PollPrepareAcks, Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(6273134768375120567), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReplies(1), DeliverNexusReplies(8), Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(15266709888734590000), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelopes([Index(9098454520105484266), Index(5401214547661529702), Index(729613142155688864), Index(18165312717926349510), Index(13596594098232187840), Index(17864609912832060176), Index(6576389962946327472), Index(6748861532334916574), Index(6270337184347778455), Index(4621189873065888654), Index(1329701404467837166), Index(15253391592315247523), Index(7548879285352433967), Index(1519898158722201861), Index(9221399647181265582), Index(14775495632976400799)]), DeliverNexusReplies(7), DeliverNexusReplies(7), DeliverEnvelopes([Index(6844824367736706288), Index(15644179546212865649), Index(4684448243389276339), Index(7196508453975880576)]), PollPrepareAcks, PollPrepareAcks, DeliverNexusReplies(8), DeliverEnvelopes([Index(5940056868932490196), Index(7905305070747045242), Index(18010824367591471345)]), PollPrepareAcks, DeliverEnvelopes([Index(11313344465270829281), Index(14770881005951866888), Index(14791521919982503413), Index(13986963003913738122), Index(15146722185811966483), Index(5670599236968951456), Index(17258904022024285117), Index(9970983405711455637), Index(1037749372107185535), Index(14590287308375860226), Index(14401361711946635290), Index(11544751816992154044)]), DeliverNexusReplies(2), DeliverNexusReplies(5), PollPrepareAcks, Commit([Index(4768387033456628201), Index(4474917877175302218), Index(16571851457975840247), Index(15255821624041417534), Index(16757765128366937769), Index(2630722755438967257), Index(9285796362852384853), Index(14015858312178579018)]), DeliverEnvelopes([Index(15871463961649635696), Index(18163198373939775702), Index(2600226515950816111)]), DeliverEnvelopes([Index(3137454651926879610), Index(4284065310722833297), Index(16055853410816331071), Index(15166506491941155629), Index(12590336857066018677)]), Commit([Index(5641127885216290645), Index(11167110485586573589), Index(1014364951816464696), Index(12696711847665095026), Index(17978099696654320596)]), DeliverNexusReplies(8), Commit([Index(4029921677534804644), Index(15185718783597090882), Index(11314695466132516141), Index(5987253657617228904), Index(12519437426641924797), Index(6263639313484088578), Index(1682708770096528814), Index(17514314903812047682), Index(11457587421741508124)]), Commit([Index(9774639841375717942), Index(10038533130474007843), Index(17060633271273876314), Index(7000629506295319285)]), DeliverEnvelopes([Index(15061341259133073864), Index(5027422415066345386), Index(9379255978562524739), Index(6150067533282387651), Index(213696060201653414), Index(16059755076226721578), Index(16824493282550752097), Index(15722153504680899440), Index(5759912639700558277), Index(4924300060842152804), Index(9327426990216358558), Index(17734827167724690660), Index(1357167295908663567), Index(13405304061122455105)]), Commit([Index(17426666830188603729), Index(12810851899054823276), Index(5888927766144618223), Index(18098665668806060067), Index(8508378395275761614), Index(1650853648920734436), Index(5663558801920224835)]), DeliverEnvelopes([Index(15295993199711016882), Index(3859998607405058373), Index(16246966108341368602), Index(2383124365549873480), Index(16408321263194864709), Index(14444156841149526789), Index(6495747943129340003), Index(17772779185757377240), Index(14604635448037405545), Index(4847143012976409235), Index(1145991671159544728), Index(6507514046328470924), Index(13464350535917834973), Index(14483501505596509725), Index(4511687336665279669), Index(9620101944261959773), Index(484390403697139876), Index(9857456322741422009), Index(3341222595136110266)]), PollPrepareAcks, DeliverNexusReplies(5), PollPrepareAcks, PollPrepareAcks, DeliverEnvelopes([Index(213757553854772440), Index(3794994835931277050), Index(1230995856688641619), Index(615222509626393634), Index(11235657579233697266), Index(380444266263585305), Index(17306335717947557462), Index(8806641371157136569), Index(544411057042609898), Index(14217584839742171129), Index(17179942378194078060), Index(18111746867895443277), Index(4705170374898011720), Index(3617427108480506863), Index(4184498722027448066), Index(17681194391468303007), Index(12378580499825910547), Index(1587437098921461308), Index(5646864724861626575)]), Commit([Index(2895265222191031484), Index(17950221019931177667), Index(9263853737651325899), Index(3842185010035964272), Index(12827291054333377474)]), Commit([Index(15759430573635913179), Index(9798702836106401259), Index(5201468168437920159), Index(2657315818353075169), Index(8828036642617900104), Index(16884723312357945697), Index(14437294211944068336)]), DeliverNexusReplies(5), DeliverEnvelopes([Index(7768905154491618039), Index(3438302915553662917), Index(14091186660758608634)]), Commit([Index(17866948588368032403), Index(11604953794048654451)]), DeliverNexusReplies(2), DeliverNexusReplies(7), Commit([Index(13712612038145219747), Index(12085627743433809088), Index(8699014604701166458), Index(2994093657804205227), Index(16225639718416622872), Index(6900536451203328895), Index(11227963432608776486), Index(13049675711087060279), Index(5214683319822193770)]), DeliverNexusReplies(4), PollPrepareAcks, DeliverNexusReplies(2), DeliverEnvelopes([Index(6044528710470823771), Index(7296331177112754699), Index(9457525726669633929), Index(5079497433348808932), Index(17348870535556670336), Index(512738896472721452), Index(12256764748176290216), Index(7604437520746076905), Index(6769996048460626115), Index(16864410288957109112), Index(3088590075318437038), Index(2095012960284709716)]), Commit([Index(15776963438323035828), Index(16592172527267608874), Index(4574510404731010758), Index(9801172050530399304), Index(12591565060149844827), Index(17758031771693043328), Index(2356648783237251688), Index(2403981669904249702), Index(4639941478079293934)])] } } diff --git a/trust-quorum/tests/cluster.rs b/trust-quorum/tests/cluster.rs index 9dec6000640..9bc7da94c65 100644 --- a/trust-quorum/tests/cluster.rs +++ b/trust-quorum/tests/cluster.rs @@ -834,6 +834,7 @@ impl TestState { self.invariant_nodes_have_prepared_if_coordinator_has_acks()?; self.invariant_nodes_have_committed_if_nexus_has_acks()?; self.invariant_nodes_not_coordinating_and_computing_key_share_simultaneously()?; + self.invariant_no_alarms()?; Ok(()) } @@ -953,6 +954,20 @@ impl TestState { Ok(()) } + + // Ensure there has been no alarm at any node + fn invariant_no_alarms(&self) -> Result<(), TestCaseError> { + for (id, (_, ctx)) in &self.sut.nodes { + let alarms = ctx.alarms(); + prop_assert!( + alarms.is_empty(), + "Alarms found for {}: {:#?}", + id, + alarms + ); + } + Ok(()) + } } /// Broken out of `TestState` to alleviate borrow checker woes From 99e958412011a28c97da56f9054d998314c9dfe8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Wed, 27 Aug 2025 16:52:07 -0700 Subject: [PATCH 05/38] [reconfigurator-cli] Fixes and improvements for RoT update testing (#8904) Follow up to https://github.com/oxidecomputer/omicron/pull/8893 --- dev-tools/reconfigurator-cli/src/lib.rs | 61 +- .../tests/input/cmds-target-release.txt | 37 +- .../reconfigurator-cli/tests/input/cmds.txt | 8 + .../tests/output/cmds-stdout | 204 +++++- .../tests/output/cmds-target-release-stdout | 655 +++++++++++++++++- nexus/reconfigurator/planning/src/system.rs | 75 +- 6 files changed, 972 insertions(+), 68 deletions(-) diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index 43be0311aee..fc998013ca7 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -10,6 +10,7 @@ use chrono::{DateTime, Utc}; use clap::{ArgAction, ValueEnum}; use clap::{Args, Parser, Subcommand}; use daft::Diffable; +use gateway_types::rot::RotSlot; use iddqd::IdOrdMap; use indent_write::fmt::IndentWriter; use internal_dns_types::diff::DnsDiff; @@ -22,7 +23,7 @@ use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::example::ExampleSystemBuilder; use nexus_reconfigurator_planning::planner::Planner; use nexus_reconfigurator_planning::system::{ - SledBuilder, SledInventoryVisibility, SystemDescription, + RotStateOverrides, SledBuilder, SledInventoryVisibility, SystemDescription, }; use nexus_reconfigurator_simulation::{BlueprintId, CollectionId, SimState}; use nexus_reconfigurator_simulation::{SimStateBuilder, SimTufRepoSource}; @@ -553,6 +554,27 @@ struct SledUpdateRotArgs { /// sets the version reported for the RoT slot b #[clap(long, required_unless_present_any = &["slot_a"])] slot_b: Option, + + /// sets whether we expect the "A" or "B" slot to be active + #[clap(long)] + active_slot: Option, + + /// sets the persistent boot preference written into the current + /// authoritative CFPA page (ping or pong). + #[clap(long)] + persistent_boot_preference: Option, + + /// sets the pending persistent boot preference written into the CFPA + /// scratch page that will become the persistent boot preference in the + /// authoritative CFPA page upon reboot, unless CFPA update of the + /// authoritative page fails for some reason + #[clap(long, num_args(0..=1))] + pending_persistent_boot_preference: Option>, + + /// sets the transient boot preference, which overrides persistent + /// preference selection for a single boot (unimplemented) + #[clap(long, num_args(0..=1),)] + transient_boot_preference: Option>, } #[derive(Debug, Args)] @@ -1706,6 +1728,31 @@ fn cmd_sled_update_rot( if let Some(slot_b) = &args.slot_b { labels.push(format!("slot b -> {}", slot_b)); } + if let Some(active_slot) = &args.active_slot { + labels.push(format!("active slot -> {}", active_slot)); + } + + if let Some(persistent_boot_preference) = &args.persistent_boot_preference { + labels.push(format!( + "persistent boot preference -> {}", + persistent_boot_preference + )); + } + + if let Some(pending_persistent_boot_preference) = + &args.pending_persistent_boot_preference + { + labels.push(format!( + "pending persistent boot preference -> {:?}", + pending_persistent_boot_preference + )); + } + if let Some(transient_boot_preference) = &args.transient_boot_preference { + labels.push(format!( + "transient boot preference -> {:?}", + transient_boot_preference + )); + } assert!( !labels.is_empty(), @@ -1717,8 +1764,16 @@ fn cmd_sled_update_rot( let sled_id = args.sled_id.to_sled_id(system.description())?; system.description_mut().sled_update_rot_versions( sled_id, - args.slot_a, - args.slot_b, + RotStateOverrides { + active_slot_override: args.active_slot, + slot_a_version_override: args.slot_a, + slot_b_version_override: args.slot_b, + persistent_boot_preference_override: args + .persistent_boot_preference, + pending_persistent_boot_preference_override: args + .pending_persistent_boot_preference, + transient_boot_preference_override: args.transient_boot_preference, + }, )?; sim.commit_and_bump( diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt index 7cf40d7261e..5b16cf68076 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt @@ -52,7 +52,7 @@ blueprint-diff latest # reflect that update has completed as well. Like before, collect inventory from # it and use that collection for the next step. This should report that the # update completed, remove that update, and add one for another sled. -sled-update-rot 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --slot-a 1.0.0 +sled-update-rot 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --slot-b 1.0.0 --active-slot b --persistent-boot-preference b inventory-generate blueprint-plan latest latest blueprint-diff latest @@ -127,7 +127,7 @@ blueprint-diff latest # Now simulate the update completing successfully. # Like before, we should see a pending SP update for this sled. -sled-update-rot 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-a 1.0.0 +sled-update-rot 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-b 1.0.0 --active-slot b --persistent-boot-preference b inventory-generate blueprint-plan latest latest blueprint-diff latest @@ -175,9 +175,38 @@ inventory-generate blueprint-plan latest latest blueprint-diff latest -# Update the RoT on the last sled. +# Now let's simulate an RoT update that hasn't completed by setting the persistent +# boot preference to a different slot than the active one. This should cause the +# planner to mark it as impossible, and replace it. +set ignore-impossible-mgs-updates-since now +sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-b 1.0.0 --active-slot a --persistent-boot-preference b +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# An RoT update cannot continue if pending persistent boot preference is set. +# The slot B is set to a different version so it does not appear to be active +# and on the correct version, and the corresponding checks are triggered. +# Like before, this should cause the planner to mark it as impossible, and +# replace it. +set ignore-impossible-mgs-updates-since now +sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-b 1.1.0 --active-slot b --persistent-boot-preference b --pending-persistent-boot-preference b +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# We do the same for transient boot preference. Note that we need to set the +# pending-persistent-boot-preference back to None +set ignore-impossible-mgs-updates-since now +sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-b 1.1.0 --active-slot b --persistent-boot-preference b --transient-boot-preference b --pending-persistent-boot-preference +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# Update the RoT on the last sled. Note that we need to set the +# transient-boot-preference back to None # There should be one last pending SP update. -sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-a 1.0.0 +sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-b 1.0.0 --active-slot b --persistent-boot-preference b --transient-boot-preference inventory-generate blueprint-plan latest latest blueprint-diff latest diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds.txt b/dev-tools/reconfigurator-cli/tests/input/cmds.txt index 5eeb7d07793..f78d2e6c5c0 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds.txt @@ -24,6 +24,14 @@ sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-a 4.0.0 --slot-b inv sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-a 4.0.0 --slot-b 5.0.0 sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-b 6.0.0 --active-slot b +sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-b 6.0.0 --active-slot b --persistent-boot-preference b +sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-b 6.0.0 --active-slot a --persistent-boot-preference a --pending-persistent-boot-preference b --transient-boot-preference a +sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-a 7.0.0 --pending-persistent-boot-preference --transient-boot-preference +sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a sled-update-sp dde1c0e2-b10d-4621-b420-f179f7a7a00a sled-update-sp dde1c0e2-b10d-4621-b420-f179f7a7a00a --active 1.0.0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-stdout index 52b0cc3f939..9fa0f4469bc 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-stdout @@ -276,6 +276,166 @@ zpools (10): SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-e0f5c287-3296-4a35-b597-7452283ff329" }, disk_id: 0f13d3dd-1830-4a06-b664-e6f0473ba704 (physical_disk), policy: InService, state: Active } +> sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-b 6.0.0 --active-slot b +set sled dde1c0e2-b10d-4621-b420-f179f7a7a00a RoT settings: slot b -> 6.0.0, active slot -> B + +> sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) +serial serial0 +subnet fd00:1122:3344:101::/64 +SP active version: Some("0.0.1") +SP inactive version: None +RoT bootloader stage 0 version: Some("0.0.1") +RoT bootloader stage 0 next version: None +RoT active slot: B +RoT slot A version: Some("4.0.0") +RoT slot B version: Some("6.0.0") +RoT persistent boot preference: A +RoT pending persistent boot preference: None +RoT transient boot preference: None +zpools (10): + 674c6591-11be-44f2-9df1-db3bb663ec01 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-674c6591-11be-44f2-9df1-db3bb663ec01" }, disk_id: a52a7c57-7fd0-4139-8293-bda299523c53 (physical_disk), policy: InService, state: Active } + 677dd944-6761-4a89-8606-4d7fe485a63c (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-677dd944-6761-4a89-8606-4d7fe485a63c" }, disk_id: fcf54220-3ff4-463e-b4a2-58447f51b68c (physical_disk), policy: InService, state: Active } + 70e81eac-6ed4-4c2d-b16a-fabe2aec56fc (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-70e81eac-6ed4-4c2d-b16a-fabe2aec56fc" }, disk_id: 42643377-e4d1-41a0-ac32-38d6e56cb22a (physical_disk), policy: InService, state: Active } + 7b26c659-bf8f-4c60-ab75-fd2dd8ef5866 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-7b26c659-bf8f-4c60-ab75-fd2dd8ef5866" }, disk_id: 5b2df08c-ea6a-4771-8363-80031249c97b (physical_disk), policy: InService, state: Active } + 8e0008d0-9313-4caf-bc20-305ccce29846 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-8e0008d0-9313-4caf-bc20-305ccce29846" }, disk_id: 0f5e7fc1-8d87-45f8-a00e-f5127b7a3905 (physical_disk), policy: InService, state: Active } + 929e328a-dd25-447d-9af7-6e2216adf4aa (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-929e328a-dd25-447d-9af7-6e2216adf4aa" }, disk_id: f62e3201-e89b-4667-9707-e49f86b9df07 (physical_disk), policy: InService, state: Active } + 9a25ff89-5446-4233-bf58-20a24c80aa58 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-9a25ff89-5446-4233-bf58-20a24c80aa58" }, disk_id: 49b87668-e08b-4939-91f7-a82612e2ebff (physical_disk), policy: InService, state: Active } + a9cd1fe6-f1ba-4227-bff7-978992c3d6ad (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-a9cd1fe6-f1ba-4227-bff7-978992c3d6ad" }, disk_id: dcde393a-3ac6-4e98-8833-012787e73e15 (physical_disk), policy: InService, state: Active } + b3ede1e1-3264-4b21-8c7d-9ea5d3715210 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-b3ede1e1-3264-4b21-8c7d-9ea5d3715210" }, disk_id: 4863117c-b77d-4dbc-996d-d18ddf0f5ff7 (physical_disk), policy: InService, state: Active } + e0f5c287-3296-4a35-b597-7452283ff329 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-e0f5c287-3296-4a35-b597-7452283ff329" }, disk_id: 0f13d3dd-1830-4a06-b664-e6f0473ba704 (physical_disk), policy: InService, state: Active } + + +> sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-b 6.0.0 --active-slot b --persistent-boot-preference b +set sled dde1c0e2-b10d-4621-b420-f179f7a7a00a RoT settings: slot b -> 6.0.0, active slot -> B, persistent boot preference -> B + +> sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) +serial serial0 +subnet fd00:1122:3344:101::/64 +SP active version: Some("0.0.1") +SP inactive version: None +RoT bootloader stage 0 version: Some("0.0.1") +RoT bootloader stage 0 next version: None +RoT active slot: B +RoT slot A version: Some("4.0.0") +RoT slot B version: Some("6.0.0") +RoT persistent boot preference: B +RoT pending persistent boot preference: None +RoT transient boot preference: None +zpools (10): + 674c6591-11be-44f2-9df1-db3bb663ec01 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-674c6591-11be-44f2-9df1-db3bb663ec01" }, disk_id: a52a7c57-7fd0-4139-8293-bda299523c53 (physical_disk), policy: InService, state: Active } + 677dd944-6761-4a89-8606-4d7fe485a63c (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-677dd944-6761-4a89-8606-4d7fe485a63c" }, disk_id: fcf54220-3ff4-463e-b4a2-58447f51b68c (physical_disk), policy: InService, state: Active } + 70e81eac-6ed4-4c2d-b16a-fabe2aec56fc (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-70e81eac-6ed4-4c2d-b16a-fabe2aec56fc" }, disk_id: 42643377-e4d1-41a0-ac32-38d6e56cb22a (physical_disk), policy: InService, state: Active } + 7b26c659-bf8f-4c60-ab75-fd2dd8ef5866 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-7b26c659-bf8f-4c60-ab75-fd2dd8ef5866" }, disk_id: 5b2df08c-ea6a-4771-8363-80031249c97b (physical_disk), policy: InService, state: Active } + 8e0008d0-9313-4caf-bc20-305ccce29846 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-8e0008d0-9313-4caf-bc20-305ccce29846" }, disk_id: 0f5e7fc1-8d87-45f8-a00e-f5127b7a3905 (physical_disk), policy: InService, state: Active } + 929e328a-dd25-447d-9af7-6e2216adf4aa (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-929e328a-dd25-447d-9af7-6e2216adf4aa" }, disk_id: f62e3201-e89b-4667-9707-e49f86b9df07 (physical_disk), policy: InService, state: Active } + 9a25ff89-5446-4233-bf58-20a24c80aa58 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-9a25ff89-5446-4233-bf58-20a24c80aa58" }, disk_id: 49b87668-e08b-4939-91f7-a82612e2ebff (physical_disk), policy: InService, state: Active } + a9cd1fe6-f1ba-4227-bff7-978992c3d6ad (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-a9cd1fe6-f1ba-4227-bff7-978992c3d6ad" }, disk_id: dcde393a-3ac6-4e98-8833-012787e73e15 (physical_disk), policy: InService, state: Active } + b3ede1e1-3264-4b21-8c7d-9ea5d3715210 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-b3ede1e1-3264-4b21-8c7d-9ea5d3715210" }, disk_id: 4863117c-b77d-4dbc-996d-d18ddf0f5ff7 (physical_disk), policy: InService, state: Active } + e0f5c287-3296-4a35-b597-7452283ff329 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-e0f5c287-3296-4a35-b597-7452283ff329" }, disk_id: 0f13d3dd-1830-4a06-b664-e6f0473ba704 (physical_disk), policy: InService, state: Active } + + +> sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-b 6.0.0 --active-slot a --persistent-boot-preference a --pending-persistent-boot-preference b --transient-boot-preference a +set sled dde1c0e2-b10d-4621-b420-f179f7a7a00a RoT settings: slot b -> 6.0.0, active slot -> A, persistent boot preference -> A, pending persistent boot preference -> Some(B), transient boot preference -> Some(A) + +> sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) +serial serial0 +subnet fd00:1122:3344:101::/64 +SP active version: Some("0.0.1") +SP inactive version: None +RoT bootloader stage 0 version: Some("0.0.1") +RoT bootloader stage 0 next version: None +RoT active slot: A +RoT slot A version: Some("4.0.0") +RoT slot B version: Some("6.0.0") +RoT persistent boot preference: A +RoT pending persistent boot preference: Some(B) +RoT transient boot preference: Some(A) +zpools (10): + 674c6591-11be-44f2-9df1-db3bb663ec01 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-674c6591-11be-44f2-9df1-db3bb663ec01" }, disk_id: a52a7c57-7fd0-4139-8293-bda299523c53 (physical_disk), policy: InService, state: Active } + 677dd944-6761-4a89-8606-4d7fe485a63c (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-677dd944-6761-4a89-8606-4d7fe485a63c" }, disk_id: fcf54220-3ff4-463e-b4a2-58447f51b68c (physical_disk), policy: InService, state: Active } + 70e81eac-6ed4-4c2d-b16a-fabe2aec56fc (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-70e81eac-6ed4-4c2d-b16a-fabe2aec56fc" }, disk_id: 42643377-e4d1-41a0-ac32-38d6e56cb22a (physical_disk), policy: InService, state: Active } + 7b26c659-bf8f-4c60-ab75-fd2dd8ef5866 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-7b26c659-bf8f-4c60-ab75-fd2dd8ef5866" }, disk_id: 5b2df08c-ea6a-4771-8363-80031249c97b (physical_disk), policy: InService, state: Active } + 8e0008d0-9313-4caf-bc20-305ccce29846 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-8e0008d0-9313-4caf-bc20-305ccce29846" }, disk_id: 0f5e7fc1-8d87-45f8-a00e-f5127b7a3905 (physical_disk), policy: InService, state: Active } + 929e328a-dd25-447d-9af7-6e2216adf4aa (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-929e328a-dd25-447d-9af7-6e2216adf4aa" }, disk_id: f62e3201-e89b-4667-9707-e49f86b9df07 (physical_disk), policy: InService, state: Active } + 9a25ff89-5446-4233-bf58-20a24c80aa58 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-9a25ff89-5446-4233-bf58-20a24c80aa58" }, disk_id: 49b87668-e08b-4939-91f7-a82612e2ebff (physical_disk), policy: InService, state: Active } + a9cd1fe6-f1ba-4227-bff7-978992c3d6ad (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-a9cd1fe6-f1ba-4227-bff7-978992c3d6ad" }, disk_id: dcde393a-3ac6-4e98-8833-012787e73e15 (physical_disk), policy: InService, state: Active } + b3ede1e1-3264-4b21-8c7d-9ea5d3715210 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-b3ede1e1-3264-4b21-8c7d-9ea5d3715210" }, disk_id: 4863117c-b77d-4dbc-996d-d18ddf0f5ff7 (physical_disk), policy: InService, state: Active } + e0f5c287-3296-4a35-b597-7452283ff329 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-e0f5c287-3296-4a35-b597-7452283ff329" }, disk_id: 0f13d3dd-1830-4a06-b664-e6f0473ba704 (physical_disk), policy: InService, state: Active } + + +> sled-update-rot dde1c0e2-b10d-4621-b420-f179f7a7a00a --slot-a 7.0.0 --pending-persistent-boot-preference --transient-boot-preference +set sled dde1c0e2-b10d-4621-b420-f179f7a7a00a RoT settings: slot a -> 7.0.0, pending persistent boot preference -> None, transient boot preference -> None + +> sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) +serial serial0 +subnet fd00:1122:3344:101::/64 +SP active version: Some("0.0.1") +SP inactive version: None +RoT bootloader stage 0 version: Some("0.0.1") +RoT bootloader stage 0 next version: None +RoT active slot: A +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") +RoT persistent boot preference: A +RoT pending persistent boot preference: None +RoT transient boot preference: None +zpools (10): + 674c6591-11be-44f2-9df1-db3bb663ec01 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-674c6591-11be-44f2-9df1-db3bb663ec01" }, disk_id: a52a7c57-7fd0-4139-8293-bda299523c53 (physical_disk), policy: InService, state: Active } + 677dd944-6761-4a89-8606-4d7fe485a63c (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-677dd944-6761-4a89-8606-4d7fe485a63c" }, disk_id: fcf54220-3ff4-463e-b4a2-58447f51b68c (physical_disk), policy: InService, state: Active } + 70e81eac-6ed4-4c2d-b16a-fabe2aec56fc (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-70e81eac-6ed4-4c2d-b16a-fabe2aec56fc" }, disk_id: 42643377-e4d1-41a0-ac32-38d6e56cb22a (physical_disk), policy: InService, state: Active } + 7b26c659-bf8f-4c60-ab75-fd2dd8ef5866 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-7b26c659-bf8f-4c60-ab75-fd2dd8ef5866" }, disk_id: 5b2df08c-ea6a-4771-8363-80031249c97b (physical_disk), policy: InService, state: Active } + 8e0008d0-9313-4caf-bc20-305ccce29846 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-8e0008d0-9313-4caf-bc20-305ccce29846" }, disk_id: 0f5e7fc1-8d87-45f8-a00e-f5127b7a3905 (physical_disk), policy: InService, state: Active } + 929e328a-dd25-447d-9af7-6e2216adf4aa (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-929e328a-dd25-447d-9af7-6e2216adf4aa" }, disk_id: f62e3201-e89b-4667-9707-e49f86b9df07 (physical_disk), policy: InService, state: Active } + 9a25ff89-5446-4233-bf58-20a24c80aa58 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-9a25ff89-5446-4233-bf58-20a24c80aa58" }, disk_id: 49b87668-e08b-4939-91f7-a82612e2ebff (physical_disk), policy: InService, state: Active } + a9cd1fe6-f1ba-4227-bff7-978992c3d6ad (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-a9cd1fe6-f1ba-4227-bff7-978992c3d6ad" }, disk_id: dcde393a-3ac6-4e98-8833-012787e73e15 (physical_disk), policy: InService, state: Active } + b3ede1e1-3264-4b21-8c7d-9ea5d3715210 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-b3ede1e1-3264-4b21-8c7d-9ea5d3715210" }, disk_id: 4863117c-b77d-4dbc-996d-d18ddf0f5ff7 (physical_disk), policy: InService, state: Active } + e0f5c287-3296-4a35-b597-7452283ff329 (zpool) + SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-e0f5c287-3296-4a35-b597-7452283ff329" }, disk_id: 0f13d3dd-1830-4a06-b664-e6f0473ba704 (physical_disk), policy: InService, state: Active } + + > sled-update-sp dde1c0e2-b10d-4621-b420-f179f7a7a00a @@ -291,8 +451,8 @@ SP inactive version: None RoT bootloader stage 0 version: Some("0.0.1") RoT bootloader stage 0 next version: None RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -331,8 +491,8 @@ SP inactive version: Some("2.0.0") RoT bootloader stage 0 version: Some("0.0.1") RoT bootloader stage 0 next version: None RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -371,8 +531,8 @@ SP inactive version: Some("2.0.0") RoT bootloader stage 0 version: Some("0.0.1") RoT bootloader stage 0 next version: None RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -411,8 +571,8 @@ SP inactive version: None RoT bootloader stage 0 version: Some("0.0.1") RoT bootloader stage 0 next version: None RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -451,8 +611,8 @@ SP inactive version: Some("5.0.0") RoT bootloader stage 0 version: Some("0.0.1") RoT bootloader stage 0 next version: None RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -494,8 +654,8 @@ SP inactive version: Some("5.0.0") RoT bootloader stage 0 version: Some("1.0.0") RoT bootloader stage 0 next version: None RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -534,8 +694,8 @@ SP inactive version: Some("5.0.0") RoT bootloader stage 0 version: Some("1.0.0") RoT bootloader stage 0 next version: Some("2.0.0") RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -574,8 +734,8 @@ SP inactive version: Some("5.0.0") RoT bootloader stage 0 version: Some("3.0.0") RoT bootloader stage 0 next version: Some("2.0.0") RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -614,8 +774,8 @@ SP inactive version: Some("5.0.0") RoT bootloader stage 0 version: Some("4.0.0") RoT bootloader stage 0 next version: None RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -654,8 +814,8 @@ SP inactive version: Some("5.0.0") RoT bootloader stage 0 version: Some("4.0.0") RoT bootloader stage 0 next version: Some("5.0.0") RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None @@ -726,8 +886,8 @@ SP inactive version: Some("5.0.0") RoT bootloader stage 0 version: Some("4.0.0") RoT bootloader stage 0 next version: Some("5.0.0") RoT active slot: A -RoT slot A version: Some("4.0.0") -RoT slot B version: Some("5.0.0") +RoT slot A version: Some("7.0.0") +RoT slot B version: Some("6.0.0") RoT persistent boot preference: A RoT pending persistent boot preference: None RoT transient boot preference: None diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 7ba66f41052..40014409995 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -772,8 +772,8 @@ external DNS: > # reflect that update has completed as well. Like before, collect inventory from > # it and use that collection for the next step. This should report that the > # update completed, remove that update, and add one for another sled. -> sled-update-rot 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --slot-a 1.0.0 -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 RoT settings: slot a -> 1.0.0 +> sled-update-rot 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 --slot-b 1.0.0 --active-slot b --persistent-boot-preference b +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 RoT settings: slot b -> 1.0.0, active slot -> B, persistent boot preference -> B > inventory-generate generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds @@ -2582,8 +2582,8 @@ external DNS: > # Now simulate the update completing successfully. > # Like before, we should see a pending SP update for this sled. -> sled-update-rot 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-a 1.0.0 -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot a -> 1.0.0 +> sled-update-rot 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c --slot-b 1.0.0 --active-slot b --persistent-boot-preference b +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot b -> 1.0.0, active slot -> B, persistent boot preference -> B > inventory-generate generated inventory collection 005f6a30-7f65-4593-9f78-ee68f766f42b from configured sleds @@ -3833,13 +3833,612 @@ external DNS: -> # Update the RoT on the last sled. +> # Now let's simulate an RoT update that hasn't completed by setting the persistent +> # boot preference to a different slot than the active one. This should cause the +> # planner to mark it as impossible, and replace it. +> set ignore-impossible-mgs-updates-since now +ignoring impossible MGS updates since + +> sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-b 1.0.0 --active-slot a --persistent-boot-preference b +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot b -> 1.0.0, active slot -> A, persistent boot preference -> B + +> inventory-generate +generated inventory collection 34c3258c-b2ab-4da9-9720-41a3a703c3d7 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("1.0.0")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 based on parent blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 +planning report for blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746: +chicken switches: + add zones with mupdate override: false + +* 1 pending MGS update: + * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("1.0.0")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 +to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + sp_type slot part_number serial_number artifact_hash artifact_version details + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +* sled 2 model2 serial2 d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + └─ + Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("1.0.0")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # An RoT update cannot continue if pending persistent boot preference is set. +> # The slot B is set to a different version so it does not appear to be active +> # and on the correct version, and the corresponding checks are triggered. +> # Like before, this should cause the planner to mark it as impossible, and +> # replace it. +> set ignore-impossible-mgs-updates-since now +ignoring impossible MGS updates since + +> sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-b 1.1.0 --active-slot b --persistent-boot-preference b --pending-persistent-boot-preference b +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot b -> 1.1.0, active slot -> B, persistent boot preference -> B, pending persistent boot preference -> Some(B) + +> inventory-generate +generated inventory collection 5e106b73-6a14-4955-b8a8-a4f8afed6405 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("1.0.0")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: Some(B), expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint b82656b0-a9be-433d-83d0-e2bdf371777a based on parent blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 +planning report for blueprint b82656b0-a9be-433d-83d0-e2bdf371777a: +chicken switches: + add zones with mupdate override: false + +* 1 pending MGS update: + * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: Some(B), expected_transient_boot_preference: None }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 +to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 2 model2 serial2 d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("1.0.0")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + └─ + Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: Some(B), expected_transient_boot_preference: None }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # We do the same for transient boot preference. Note that we need to set the +> # pending-persistent-boot-preference back to None +> set ignore-impossible-mgs-updates-since now +ignoring impossible MGS updates since + +> sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-b 1.1.0 --active-slot b --persistent-boot-preference b --transient-boot-preference b --pending-persistent-boot-preference +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot b -> 1.1.0, active slot -> B, persistent boot preference -> B, pending persistent boot preference -> None, transient boot preference -> Some(B) + +> inventory-generate +generated inventory collection 36ef425f-a672-4bf4-8d29-14815a84ccad from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: Some(B), expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: Some(B), expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 based on parent blueprint b82656b0-a9be-433d-83d0-e2bdf371777a +planning report for blueprint 31c84831-be52-4630-bc3f-128d72cd8f22: +chicken switches: + add zones with mupdate override: false + +* 1 pending MGS update: + * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: Some(B) }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + +> blueprint-diff latest +from: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a +to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 2 model2 serial2 d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: Some(B), expected_transient_boot_preference: None }) + └─ + Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: Some(B) }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) + AAAA fd00:1122:3344:103::26 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) + AAAA fd00:1122:3344:103::27 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) + AAAA fd00:1122:3344:102::28 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) + SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) + SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal + name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) + SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal + name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) + SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) + SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal + name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) + SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) + AAAA fd00:1122:3344:102::27 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Update the RoT on the last sled. Note that we need to set the +> # transient-boot-preference back to None > # There should be one last pending SP update. -> sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-a 1.0.0 -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot a -> 1.0.0 +> sled-update-rot d81c6a84-79b8-4958-ae41-ea46c9b19763 --slot-b 1.0.0 --active-slot b --persistent-boot-preference b --transient-boot-preference +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot b -> 1.0.0, active slot -> B, persistent boot preference -> B, transient boot preference -> None > inventory-generate -generated inventory collection 34c3258c-b2ab-4da9-9720-41a3a703c3d7 from configured sleds +generated inventory collection 70bea701-e212-4877-8e6c-925f1f73ddd2 from configured sleds > blueprint-plan latest latest INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 @@ -3849,11 +4448,11 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 +INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: Some(B), expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 based on parent blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 -planning report for blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746: +generated blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 based on parent blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 +planning report for blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2: chicken switches: add zones with mupdate override: false @@ -3863,8 +4462,8 @@ chicken switches: > blueprint-diff latest -from: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 -to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 +from: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 +to: blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -3882,11 +4481,11 @@ to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 PENDING MGS UPDATES: Pending MGS-managed updates (all baseboards): - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -* sled 2 model2 serial2 - d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) - └─ + 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4 + Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* sled 2 model2 serial2 - d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02 1.0.0 - Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: Some(B) }) + └─ + 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4 + Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) internal DNS: @@ -4034,7 +4633,7 @@ external DNS: set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 SP versions: active -> 1.0.0 > inventory-generate -generated inventory collection 5e106b73-6a14-4955-b8a8-a4f8afed6405 from configured sleds +generated inventory collection 8187f847-81c7-4750-88ac-d691937461af from configured sleds > blueprint-plan latest latest INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 @@ -4047,8 +4646,8 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:103::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint b82656b0-a9be-433d-83d0-e2bdf371777a based on parent blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 -planning report for blueprint b82656b0-a9be-433d-83d0-e2bdf371777a: +generated blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 based on parent blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 +planning report for blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6: chicken switches: add zones with mupdate override: false @@ -4058,8 +4657,8 @@ chicken switches: > blueprint-diff latest -from: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 -to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a +from: blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 +to: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 MODIFIED SLEDS: @@ -4292,7 +4891,7 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 2 details: boot_disk -> set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 1 details: active -> B, B -> 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 > inventory-generate -generated inventory collection 36ef425f-a672-4bf4-8d29-14815a84ccad from configured sleds +generated inventory collection 45c1c7bb-984a-43f7-bb3f-4a5437ed7b82 from configured sleds > # Do one more planning run. This should update one control plane zone. @@ -4308,8 +4907,8 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO ran out of boards for MGS-driven update -generated blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 based on parent blueprint b82656b0-a9be-433d-83d0-e2bdf371777a -planning report for blueprint 31c84831-be52-4630-bc3f-128d72cd8f22: +generated blueprint e54a0836-53e1-4948-a3af-0b77165289b5 based on parent blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 +planning report for blueprint e54a0836-53e1-4948-a3af-0b77165289b5: chicken switches: add zones with mupdate override: false @@ -4319,8 +4918,8 @@ chicken switches: > blueprint-diff latest -from: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a -to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 +from: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 +to: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 MODIFIED SLEDS: diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index f09315dd6ac..d9ee2cf64e2 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -615,11 +615,10 @@ impl SystemDescription { pub fn sled_update_rot_versions( &mut self, sled_id: SledUuid, - slot_a_version: Option, - slot_b_version: Option, + overrides: RotStateOverrides, ) -> anyhow::Result<&mut Self> { let sled = self.get_sled_mut(sled_id)?; - sled.set_rot_versions(slot_a_version, slot_b_version); + sled.set_rot_versions(overrides); Ok(self) } @@ -1687,14 +1686,57 @@ impl Sled { /// Update the reported RoT versions /// - /// If either field is `None`, that field is _unchanged_. + /// If any of the overrides are `None`, that field is _unchanged_. // Note that this means there's no way to _unset_ the version. - fn set_rot_versions( - &mut self, - slot_a_version: Option, - slot_b_version: Option, - ) { - if let Some(slot_a_version) = slot_a_version { + fn set_rot_versions(&mut self, overrides: RotStateOverrides) { + let RotStateOverrides { + active_slot_override, + slot_a_version_override, + slot_b_version_override, + persistent_boot_preference_override, + pending_persistent_boot_preference_override, + transient_boot_preference_override, + } = overrides; + + if let Some((_slot, sp_state)) = self.inventory_sp.as_mut() { + match &mut sp_state.rot { + RotState::V3 { + active, + persistent_boot_preference, + pending_persistent_boot_preference, + transient_boot_preference, + .. + } => { + if let Some(active_slot_override) = active_slot_override { + *active = active_slot_override; + } + if let Some(persistent_boot_preference_override) = + persistent_boot_preference_override + { + *persistent_boot_preference = + persistent_boot_preference_override; + } + + if let Some(pending_persistent_boot_preference_override) = + pending_persistent_boot_preference_override + { + *pending_persistent_boot_preference = + pending_persistent_boot_preference_override; + } + + if let Some(transient_boot_preference_override) = + transient_boot_preference_override + { + *transient_boot_preference = + transient_boot_preference_override; + } + } + // We will only support RotState::V3 + _ => unreachable!(), + }; + } + + if let Some(slot_a_version) = slot_a_version_override { match slot_a_version { ExpectedVersion::NoValidVersion => { self.rot_slot_a_caboose = None; @@ -1714,7 +1756,7 @@ impl Sled { } } - if let Some(slot_b_version) = slot_b_version { + if let Some(slot_b_version) = slot_b_version_override { match slot_b_version { ExpectedVersion::NoValidVersion => { self.rot_slot_b_caboose = None; @@ -1856,6 +1898,17 @@ impl Sled { } } +/// Settings that can be overriden in a simulated sled's RotState +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct RotStateOverrides { + pub active_slot_override: Option, + pub slot_a_version_override: Option, + pub slot_b_version_override: Option, + pub persistent_boot_preference_override: Option, + pub pending_persistent_boot_preference_override: Option>, + pub transient_boot_preference_override: Option>, +} + /// The visibility of a sled in the inventory. /// /// This enum can be used to simulate a sled temporarily dropping out and it not From 9577096abb2801be1280c46b323a501ba819b006 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 27 Aug 2025 16:54:07 -0700 Subject: [PATCH 06/38] fix omdb flake by adding redactor for why last activation was triggered (#8929) --- dev-tools/omdb/tests/successes.out | 190 ++++++++++++------------ dev-tools/omdb/tests/test_all_output.rs | 1 + 2 files changed, 96 insertions(+), 95 deletions(-) diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 7e20b803e86..2dd0c267f7e 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -433,13 +433,13 @@ termination: Exited(0) stdout: task: "dns_config_internal" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms last generation found: 1 task: "dns_servers_internal" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms servers found: 1 @@ -448,7 +448,7 @@ task: "dns_servers_internal" task: "dns_propagation_internal" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms attempt to propagate generation: 1 @@ -458,13 +458,13 @@ task: "dns_propagation_internal" task: "dns_config_external" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms last generation found: 2 task: "dns_servers_external" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms servers found: 1 @@ -473,7 +473,7 @@ task: "dns_servers_external" task: "dns_propagation_external" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms attempt to propagate generation: 2 @@ -483,25 +483,25 @@ task: "dns_propagation_external" task: "nat_garbage_collector" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: failed to resolve addresses for Dendrite services: proto error: no records found for Query { name: Name("_dendrite._tcp.control-plane.oxide.internal."), query_type: SRV, query_class: IN } task: "blueprint_loader" configured period: every m s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: failed to read target blueprint: Internal Error: no target blueprint set task: "blueprint_executor" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: no blueprint task: "abandoned_vmm_reaper" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total abandoned VMMs found: 0 VMM records deleted: 0 @@ -510,50 +510,50 @@ task: "abandoned_vmm_reaper" task: "alert_dispatcher" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms alerts dispatched: 0 alerts with no receivers subscribed: 0 task: "bfd_manager" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: failed to resolve addresses for Dendrite services: proto error: no records found for Query { name: Name("_dendrite._tcp.control-plane.oxide.internal."), query_type: SRV, query_class: IN } task: "blueprint_planner" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms blueprint planning explicitly disabled by config! task: "blueprint_rendezvous" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: no blueprint task: "chicken_switches_watcher" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "chicken_switches_watcher" (don't know how to interpret details: Object {"chicken_switches_updated": Bool(false)}) task: "crdb_node_id_collector" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: no blueprint task: "decommissioned_disk_cleaner" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "decommissioned_disk_cleaner" (don't know how to interpret details: Object {"deleted": Number(0), "error": Null, "error_count": Number(0), "found": Number(0), "not_ready_to_be_deleted": Number(0)}) task: "external_endpoints" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms external API endpoints: 2 ('*' below marks default) @@ -569,7 +569,7 @@ task: "external_endpoints" task: "instance_reincarnation" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms instances eligible for reincarnation: 0 instance failed instances: 0 @@ -580,7 +580,7 @@ task: "instance_reincarnation" task: "instance_updater" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms task explicitly disabled by config! instances in need of updates: 0 @@ -593,7 +593,7 @@ task: "instance_updater" task: "instance_watcher" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total instances checked: 0 checks completed: 0 @@ -605,7 +605,7 @@ task: "instance_watcher" task: "inventory_collection" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms last collection id: ..................... last collection started: @@ -613,40 +613,40 @@ task: "inventory_collection" task: "lookup_region_port" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total filled in ports: 0 errors: 0 task: "metrics_producer_gc" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "metrics_producer_gc" (don't know how to interpret details: Object {"expiration": String(""), "pruned": Array []}) task: "phantom_disks" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms number of phantom disks deleted: 0 number of phantom disk delete errors: 0 task: "physical_disk_adoption" configured period: every s - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: task disabled task: "read_only_region_replacement_start" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total requests created ok: 0 errors: 0 task: "region_replacement" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms region replacement requests created ok: 0 region replacement start sagas started ok: 0 @@ -655,7 +655,7 @@ task: "region_replacement" task: "region_replacement_driver" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms region replacement drive sagas started ok: 0 region replacement finish sagas started ok: 0 @@ -663,21 +663,21 @@ task: "region_replacement_driver" task: "region_snapshot_replacement_finish" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms region snapshot replacement finish sagas started ok: 0 errors: 0 task: "region_snapshot_replacement_garbage_collection" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total garbage collections requested: 0 errors: 0 task: "region_snapshot_replacement_start" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total requests created ok: 0 total start saga invoked ok: 0 @@ -686,7 +686,7 @@ task: "region_snapshot_replacement_start" task: "region_snapshot_replacement_step" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total step records created ok: 0 total step garbage collect saga invoked ok: 0 @@ -696,7 +696,7 @@ task: "region_snapshot_replacement_step" task: "saga_recovery" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms since Nexus started: sagas recovered: 0 @@ -716,18 +716,18 @@ task: "saga_recovery" task: "service_firewall_rule_propagation" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms task: "service_zone_nat_tracker" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: inventory collection is None task: "sp_ereport_ingester" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms /!\ errors: 1 - failed to resolve MGS addresses: proto error: no records found for Query { name: Name("_mgs._tcp.control-plane.oxide.internal."), query_type: SRV, query_class: IN } @@ -740,7 +740,7 @@ task: "sp_ereport_ingester" task: "support_bundle_collector" configured period: every days h m s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms Support Bundle Cleanup Report: Bundles deleted from sleds: 0 @@ -751,13 +751,13 @@ task: "support_bundle_collector" task: "switch_port_config_manager" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "switch_port_config_manager" (don't know how to interpret details: Object {}) task: "tuf_artifact_replication" configured period: every h - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms request ringbuf: @@ -783,19 +783,19 @@ task: "tuf_artifact_replication" task: "v2p_manager" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "v2p_manager" (don't know how to interpret details: Object {}) task: "vpc_route_manager" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "vpc_route_manager" (don't know how to interpret details: Object {}) task: "webhook_deliverator" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms receivers: 0 successful deliveries: 0 @@ -814,7 +814,7 @@ termination: Exited(0) stdout: task: "saga_recovery" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms since Nexus started: sagas recovered: 0 @@ -842,13 +842,13 @@ termination: Exited(0) stdout: task: "blueprint_loader" configured period: every m s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: failed to read target blueprint: Internal Error: no target blueprint set task: "blueprint_executor" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: no blueprint @@ -862,13 +862,13 @@ termination: Exited(0) stdout: task: "dns_config_internal" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms last generation found: 1 task: "dns_servers_internal" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms servers found: 1 @@ -877,7 +877,7 @@ task: "dns_servers_internal" task: "dns_propagation_internal" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms attempt to propagate generation: 1 @@ -895,13 +895,13 @@ termination: Exited(0) stdout: task: "dns_config_external" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms last generation found: 2 task: "dns_servers_external" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms servers found: 1 @@ -910,7 +910,7 @@ task: "dns_servers_external" task: "dns_propagation_external" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms attempt to propagate generation: 2 @@ -928,13 +928,13 @@ termination: Exited(0) stdout: task: "dns_config_internal" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms last generation found: 1 task: "dns_servers_internal" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms servers found: 1 @@ -943,7 +943,7 @@ task: "dns_servers_internal" task: "dns_propagation_internal" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms attempt to propagate generation: 1 @@ -953,13 +953,13 @@ task: "dns_propagation_internal" task: "dns_config_external" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms last generation found: 2 task: "dns_servers_external" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms servers found: 1 @@ -968,7 +968,7 @@ task: "dns_servers_external" task: "dns_propagation_external" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms attempt to propagate generation: 2 @@ -978,25 +978,25 @@ task: "dns_propagation_external" task: "nat_garbage_collector" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: failed to resolve addresses for Dendrite services: proto error: no records found for Query { name: Name("_dendrite._tcp.control-plane.oxide.internal."), query_type: SRV, query_class: IN } task: "blueprint_loader" configured period: every m s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: failed to read target blueprint: Internal Error: no target blueprint set task: "blueprint_executor" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: no blueprint task: "abandoned_vmm_reaper" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total abandoned VMMs found: 0 VMM records deleted: 0 @@ -1005,50 +1005,50 @@ task: "abandoned_vmm_reaper" task: "alert_dispatcher" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms alerts dispatched: 0 alerts with no receivers subscribed: 0 task: "bfd_manager" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: failed to resolve addresses for Dendrite services: proto error: no records found for Query { name: Name("_dendrite._tcp.control-plane.oxide.internal."), query_type: SRV, query_class: IN } task: "blueprint_planner" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms blueprint planning explicitly disabled by config! task: "blueprint_rendezvous" configured period: every m - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: no blueprint task: "chicken_switches_watcher" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "chicken_switches_watcher" (don't know how to interpret details: Object {"chicken_switches_updated": Bool(false)}) task: "crdb_node_id_collector" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: no blueprint task: "decommissioned_disk_cleaner" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "decommissioned_disk_cleaner" (don't know how to interpret details: Object {"deleted": Number(0), "error": Null, "error_count": Number(0), "found": Number(0), "not_ready_to_be_deleted": Number(0)}) task: "external_endpoints" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms external API endpoints: 2 ('*' below marks default) @@ -1064,7 +1064,7 @@ task: "external_endpoints" task: "instance_reincarnation" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms instances eligible for reincarnation: 0 instance failed instances: 0 @@ -1075,7 +1075,7 @@ task: "instance_reincarnation" task: "instance_updater" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms task explicitly disabled by config! instances in need of updates: 0 @@ -1088,7 +1088,7 @@ task: "instance_updater" task: "instance_watcher" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total instances checked: 0 checks completed: 0 @@ -1100,7 +1100,7 @@ task: "instance_watcher" task: "inventory_collection" configured period: every m - last completed activation: , triggered by an explicit signal + last completed activation: , triggered by started at (s ago) and ran for ms last collection id: ..................... last collection started: @@ -1108,40 +1108,40 @@ task: "inventory_collection" task: "lookup_region_port" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total filled in ports: 0 errors: 0 task: "metrics_producer_gc" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "metrics_producer_gc" (don't know how to interpret details: Object {"expiration": String(""), "pruned": Array []}) task: "phantom_disks" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms number of phantom disks deleted: 0 number of phantom disk delete errors: 0 task: "physical_disk_adoption" configured period: every s - last completed activation: , triggered by a dependent task completing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: task disabled task: "read_only_region_replacement_start" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total requests created ok: 0 errors: 0 task: "region_replacement" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms region replacement requests created ok: 0 region replacement start sagas started ok: 0 @@ -1150,7 +1150,7 @@ task: "region_replacement" task: "region_replacement_driver" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms region replacement drive sagas started ok: 0 region replacement finish sagas started ok: 0 @@ -1158,21 +1158,21 @@ task: "region_replacement_driver" task: "region_snapshot_replacement_finish" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms region snapshot replacement finish sagas started ok: 0 errors: 0 task: "region_snapshot_replacement_garbage_collection" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total garbage collections requested: 0 errors: 0 task: "region_snapshot_replacement_start" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total requests created ok: 0 total start saga invoked ok: 0 @@ -1181,7 +1181,7 @@ task: "region_snapshot_replacement_start" task: "region_snapshot_replacement_step" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms total step records created ok: 0 total step garbage collect saga invoked ok: 0 @@ -1191,7 +1191,7 @@ task: "region_snapshot_replacement_step" task: "saga_recovery" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms since Nexus started: sagas recovered: 0 @@ -1211,18 +1211,18 @@ task: "saga_recovery" task: "service_firewall_rule_propagation" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms task: "service_zone_nat_tracker" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms last completion reported error: inventory collection is None task: "sp_ereport_ingester" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms /!\ errors: 1 - failed to resolve MGS addresses: proto error: no records found for Query { name: Name("_mgs._tcp.control-plane.oxide.internal."), query_type: SRV, query_class: IN } @@ -1235,7 +1235,7 @@ task: "sp_ereport_ingester" task: "support_bundle_collector" configured period: every days h m s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms Support Bundle Cleanup Report: Bundles deleted from sleds: 0 @@ -1246,13 +1246,13 @@ task: "support_bundle_collector" task: "switch_port_config_manager" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "switch_port_config_manager" (don't know how to interpret details: Object {}) task: "tuf_artifact_replication" configured period: every h - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms request ringbuf: @@ -1278,19 +1278,19 @@ task: "tuf_artifact_replication" task: "v2p_manager" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "v2p_manager" (don't know how to interpret details: Object {}) task: "vpc_route_manager" configured period: every s - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms warning: unknown background task: "vpc_route_manager" (don't know how to interpret details: Object {}) task: "webhook_deliverator" configured period: every m - last completed activation: , triggered by a periodic timer firing + last completed activation: , triggered by started at (s ago) and ran for ms receivers: 0 successful deliveries: 0 diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index ddb9e2541d4..4b1413f29ef 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -298,6 +298,7 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { redactor .field("put config ok:", r"\d+") .field("list ok:", r"\d+") + .field("triggered by", r"[\w ]+") .section(&["task: \"tuf_artifact_replication\"", "request ringbuf:"]); for args in invocations { From 29c5508ec3bbea556af3daa87e65282ec65866bb Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 27 Aug 2025 22:07:02 -0400 Subject: [PATCH 07/38] TQ: Introduce tqdb (#8801) This PR builds on #8753 This is a hefty PR, but it's not as bad as it looks. Over 4k lines of it is in the example log file in the second commit. There's also some moved and unmodified code that I'll point out. This PR introduces a new test tool for the trust-quorum protocol: tqdb. tqdb is a repl that takes event traces produced by the `cluster` proptest and uses them for deterministic replay of actions against test state. The test state includes a "universe" of real protocol nodes, a fake nexus, and fake networks. The proptest and debugging state is shared and contained in the `trust-quorum-test-utils`. The debugger allows a variety of functionality including stepping through individual events, setting breakpoints, snapshotting and diffing states and viewing the event log itself. The purpose of tqdb is twofold: 1. Allow for debugging of failed proptests. This is non-trivial in some cases, even with shrunken tests, because the generated actions are high-level and are all generated up front. The actual operations such as reconfigurations are derived from these high level random generations in conjunction with the current state of the system. Therefore the set of failing generated actions doesn't really tell you much. You have to look at the logs, and the assertion that fired and reason about it with incomplete information. Now, for each concrete action taken, we record the event in a log. In the case of a failure an event log can be loaded into tqdb, with a breakpoint set right before the failure. A snapshot of the state can be taken, and then the failing event can be applied. The diff will tell you what changed and allow you to inspect the actual state of the system. Full visibility into your failure is now possible. 2. The trust quorum protocol is non-trivial. Tqdb allows developers to see in detail how the protocol behaves and understand what is happening in certain situations. Event logs can be created by hand (or script) for particularly interesting scenarios and then run through tqdb. In order to get the diff functionality to work as I wanted, I had to implement `Eq` for types that implemented `subtle::ConstantTimeEq` in both `gfss` (our secret sharing library), and `trust-quorum` crates. However the safety in terms of the compiler breaking the constant time guarantees is unknown. Therefore, a feature flag was added such that only `test-utils` and `tqdb` crates are able to use these implementations. They are not used in the production codebase. Feature unification is not at play here because neither `test-utils` or `tqdb` are part of the product. --- Cargo.lock | 42 + Cargo.toml | 6 + dev-tools/reconfigurator-cli/src/lib.rs | 2 +- dev-tools/repl-utils/src/lib.rs | 16 +- trust-quorum/Cargo.toml | 13 + trust-quorum/gfss/Cargo.toml | 11 + trust-quorum/gfss/src/gf256.rs | 11 +- trust-quorum/gfss/src/shamir.rs | 10 + trust-quorum/src/compute_key_share.rs | 61 +- trust-quorum/src/configuration.rs | 11 +- trust-quorum/src/coordinator_state.rs | 98 +- trust-quorum/src/crypto.rs | 21 +- trust-quorum/src/lib.rs | 81 +- trust-quorum/src/messages.rs | 2 + trust-quorum/src/node.rs | 59 +- trust-quorum/src/node_ctx.rs | 32 + trust-quorum/src/persistent_state.rs | 4 +- trust-quorum/src/validators.rs | 31 +- trust-quorum/test-utils/Cargo.toml | 21 + trust-quorum/test-utils/src/event.rs | 33 + trust-quorum/test-utils/src/event_log.rs | 40 + trust-quorum/test-utils/src/lib.rs | 23 + trust-quorum/test-utils/src/nexus.rs | 170 + trust-quorum/test-utils/src/state.rs | 888 ++++ trust-quorum/tests/cluster.rs | 814 +-- trust-quorum/tqdb/Cargo.toml | 30 + ...rust_quorum_protocol.14368.453-events.json | 4668 +++++++++++++++++ trust-quorum/tqdb/src/bin/tqdb/main.rs | 716 +++ 28 files changed, 7175 insertions(+), 739 deletions(-) create mode 100644 trust-quorum/test-utils/Cargo.toml create mode 100644 trust-quorum/test-utils/src/event.rs create mode 100644 trust-quorum/test-utils/src/event_log.rs create mode 100644 trust-quorum/test-utils/src/lib.rs create mode 100644 trust-quorum/test-utils/src/nexus.rs create mode 100644 trust-quorum/test-utils/src/state.rs create mode 100644 trust-quorum/tqdb/Cargo.toml create mode 100644 trust-quorum/tqdb/example-event-logs/cluster-49df2a4b903c778a-test_trust_quorum_protocol.14368.453-events.json create mode 100644 trust-quorum/tqdb/src/bin/tqdb/main.rs diff --git a/Cargo.lock b/Cargo.lock index a64e0070901..9c6883e8aa4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14001,6 +14001,27 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +[[package]] +name = "tqdb" +version = "0.1.0" +dependencies = [ + "anyhow", + "camino", + "clap", + "colored", + "daft", + "iddqd", + "omicron-repl-utils", + "omicron-workspace-hack", + "reconfigurator-cli", + "reedline", + "serde_json", + "slog", + "tabled 0.15.0", + "trust-quorum", + "trust-quorum-test-utils", +] + [[package]] name = "tracing" version = "0.1.40" @@ -14165,6 +14186,7 @@ dependencies = [ name = "trust-quorum" version = "0.1.0" dependencies = [ + "anyhow", "assert_matches", "bcs", "bootstore", @@ -14172,6 +14194,7 @@ dependencies = [ "chacha20poly1305", "daft", "derive_more 0.99.20", + "dropshot", "gfss", "hex", "hkdf", @@ -14183,6 +14206,7 @@ dependencies = [ "rand 0.9.2", "secrecy 0.10.3", "serde", + "serde_json", "serde_with", "sha3", "slog", @@ -14192,10 +14216,28 @@ dependencies = [ "test-strategy", "thiserror 2.0.12", "tokio", + "trust-quorum-test-utils", "uuid", "zeroize", ] +[[package]] +name = "trust-quorum-test-utils" +version = "0.1.0" +dependencies = [ + "camino", + "daft", + "dropshot", + "gfss", + "iddqd", + "omicron-uuid-kinds", + "omicron-workspace-hack", + "serde", + "serde_json", + "slog", + "trust-quorum", +] + [[package]] name = "try-lock" version = "0.2.5" diff --git a/Cargo.toml b/Cargo.toml index e526f23b118..0af37939fb2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -141,6 +141,8 @@ members = [ "test-utils", "trust-quorum", "trust-quorum/gfss", + "trust-quorum/test-utils", + "trust-quorum/tqdb", "typed-rng", "update-common", "update-engine", @@ -298,6 +300,8 @@ default-members = [ "sp-sim", "trust-quorum", "trust-quorum/gfss", + "trust-quorum/test-utils", + "trust-quorum/tqdb", "test-utils", "typed-rng", "update-common", @@ -460,6 +464,8 @@ gateway-test-utils = { path = "gateway-test-utils" } gateway-types = { path = "gateway-types" } gethostname = "0.5.0" gfss = { path = "trust-quorum/gfss" } +trust-quorum = { path = "trust-quorum" } +trust-quorum-test-utils = { path = "trust-quorum/test-utils" } glob = "0.3.2" guppy = "0.17.20" headers = "0.4.1" diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index fc998013ca7..3b1ec1b20e6 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -15,7 +15,7 @@ use iddqd::IdOrdMap; use indent_write::fmt::IndentWriter; use internal_dns_types::diff::DnsDiff; use itertools::Itertools; -use log_capture::LogCapture; +pub use log_capture::LogCapture; use nexus_inventory::CollectionBuilder; use nexus_reconfigurator_blippy::Blippy; use nexus_reconfigurator_blippy::BlippyReportSortKey; diff --git a/dev-tools/repl-utils/src/lib.rs b/dev-tools/repl-utils/src/lib.rs index 3a4a0c5547e..f14f7a606e2 100644 --- a/dev-tools/repl-utils/src/lib.rs +++ b/dev-tools/repl-utils/src/lib.rs @@ -9,6 +9,7 @@ use anyhow::anyhow; use anyhow::bail; use camino::Utf8Path; use clap::Parser; +use reedline::Prompt; use reedline::Reedline; use reedline::Signal; use std::fs::File; @@ -110,13 +111,24 @@ pub fn run_repl_from_file( pub fn run_repl_on_stdin( run_one: &mut dyn FnMut(C) -> anyhow::Result>, ) -> anyhow::Result<()> { - let mut ed = Reedline::create(); + let ed = Reedline::create(); let prompt = reedline::DefaultPrompt::new( reedline::DefaultPromptSegment::Empty, reedline::DefaultPromptSegment::Empty, ); + run_repl_on_stdin_customized(ed, &prompt, run_one) +} + +/// Runs a REPL using stdin/stdout with a customized `Reedline` and `Prompt` +/// +/// See docs for [`run_repl_on_stdin`] +pub fn run_repl_on_stdin_customized( + mut ed: Reedline, + prompt: &dyn Prompt, + run_one: &mut dyn FnMut(C) -> anyhow::Result>, +) -> anyhow::Result<()> { loop { - match ed.read_line(&prompt) { + match ed.read_line(prompt) { Ok(Signal::Success(buffer)) => { // Strip everything after '#' as a comment. let entry = match buffer.split_once('#') { diff --git a/trust-quorum/Cargo.toml b/trust-quorum/Cargo.toml index eaf141ddf2d..0d6ac6863c0 100644 --- a/trust-quorum/Cargo.toml +++ b/trust-quorum/Cargo.toml @@ -8,6 +8,7 @@ license = "MPL-2.0" workspace = true [dependencies] +anyhow.workspace = true bcs.workspace = true bootstore.workspace = true camino.workspace = true @@ -36,6 +37,18 @@ omicron-workspace-hack.workspace = true [dev-dependencies] assert_matches.workspace = true +dropshot.workspace = true omicron-test-utils.workspace = true proptest.workspace = true +serde_json.workspace = true test-strategy.workspace = true +trust-quorum-test-utils.workspace = true + +[features] +# Impl `PartialEq` and `Eq` for types implementing `subtle::ConstantTimeEq` when +# this feature is enabled. +# +# This is of unknown risk. The rust compiler may obviate the security of using +# subtle when we do this. On the other hand its very useful for testing and +# debugging outside of production. +danger_partial_eq_ct_wrapper = ["gfss/danger_partial_eq_ct_wrapper"] diff --git a/trust-quorum/gfss/Cargo.toml b/trust-quorum/gfss/Cargo.toml index 5802654f80e..3b6ad9fdf1b 100644 --- a/trust-quorum/gfss/Cargo.toml +++ b/trust-quorum/gfss/Cargo.toml @@ -21,3 +21,14 @@ omicron-workspace-hack.workspace = true [dev-dependencies] proptest.workspace = true test-strategy.workspace = true + +[features] + + +# Impl `PartialEq` and `Eq` for types implementing `subtle::ConstantTimeEq` when +# this feature is enabled. +# +# This is of unknown risk. The rust compiler may obviate the security of using +# subtle when we do this. On the other hand its very useful for testing and +# debugging outside of production. +danger_partial_eq_ct_wrapper = [] diff --git a/trust-quorum/gfss/src/gf256.rs b/trust-quorum/gfss/src/gf256.rs index 235cf37265c..78fc2bc1f28 100644 --- a/trust-quorum/gfss/src/gf256.rs +++ b/trust-quorum/gfss/src/gf256.rs @@ -32,7 +32,7 @@ use zeroize::Zeroize; /// An element in a finite field of prime power 2^8 /// -/// We explicitly don't enable the equality operators to prevent ourselves from +/// We explicitly don't derive the equality operators to prevent ourselves from /// accidentally using those instead of the constant time ones. #[repr(transparent)] #[derive(Debug, Clone, Copy, Zeroize, Serialize, Deserialize)] @@ -120,6 +120,15 @@ impl ConstantTimeEq for Gf256 { } } +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl PartialEq for Gf256 { + fn eq(&self, other: &Self) -> bool { + self.ct_eq(&other).into() + } +} +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl Eq for Gf256 {} + impl Add for Gf256 { type Output = Self; diff --git a/trust-quorum/gfss/src/shamir.rs b/trust-quorum/gfss/src/shamir.rs index 2da11b83bad..49ea0a90a48 100644 --- a/trust-quorum/gfss/src/shamir.rs +++ b/trust-quorum/gfss/src/shamir.rs @@ -137,6 +137,16 @@ impl Share { } } +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl PartialEq for Share { + fn eq(&self, other: &Self) -> bool { + self.x_coordinate == other.x_coordinate + && self.y_coordinates == other.y_coordinates + } +} +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl Eq for Share {} + impl std::fmt::Debug for Share { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("KeyShareGf256").finish() diff --git a/trust-quorum/src/compute_key_share.rs b/trust-quorum/src/compute_key_share.rs index 8cc780f752e..648519733f5 100644 --- a/trust-quorum/src/compute_key_share.rs +++ b/trust-quorum/src/compute_key_share.rs @@ -8,17 +8,17 @@ //! share for that configuration it must collect a threshold of key shares from //! other nodes so that it can compute its own key share. -use crate::crypto::Sha3_256Digest; use crate::{ Alarm, Configuration, Epoch, NodeHandlerCtx, PeerMsgKind, PlatformId, }; use gfss::gf256::Gf256; use gfss::shamir::{self, Share}; -use slog::{Logger, error, o, warn}; +use slog::{Logger, error, o}; use std::collections::BTreeMap; /// In memory state that tracks retrieval of key shares in order to compute /// this node's key share for a given configuration. +#[derive(Debug, Clone)] pub struct KeyShareComputer { log: Logger, @@ -28,6 +28,17 @@ pub struct KeyShareComputer { collected_shares: BTreeMap, } +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl PartialEq for KeyShareComputer { + fn eq(&self, other: &Self) -> bool { + self.config == other.config + && self.collected_shares == other.collected_shares + } +} + +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl Eq for KeyShareComputer {} + impl KeyShareComputer { pub fn new( log: &Logger, @@ -54,7 +65,9 @@ impl KeyShareComputer { ctx: &mut impl NodeHandlerCtx, peer: PlatformId, ) { - if !self.collected_shares.contains_key(&peer) { + if self.config.members.contains_key(&peer) + && !self.collected_shares.contains_key(&peer) + { ctx.send(peer, PeerMsgKind::GetShare(self.config.epoch)); } } @@ -70,55 +83,29 @@ impl KeyShareComputer { epoch: Epoch, share: Share, ) -> bool { - // Are we trying to retrieve shares for `epoch`? - if epoch != self.config.epoch { - warn!( - self.log, - "Received Share from node with wrong epoch"; - "received_epoch" => %epoch, - "from" => %from - ); - return false; - } - - // Is the sender a member of the configuration `epoch`? - // Was the sender a member of the configuration at `old_epoch`? - let Some(expected_digest) = self.config.members.get(&from) else { - warn!( - self.log, - "Received Share from unexpected node"; - "epoch" => %epoch, - "from" => %from - ); + if !crate::validate_share(&self.log, &self.config, &from, epoch, &share) + { + // Logging done inside `validate_share` return false; }; - // Does the share hash match what we expect? - let mut digest = Sha3_256Digest::default(); - share.digest::(&mut digest.0); - if digest != *expected_digest { - error!( - self.log, - "Received share with invalid digest"; - "epoch" => %epoch, - "from" => %from - ); - return false; - } - // A valid share was received. Is it new? if self.collected_shares.insert(from, share).is_some() { return false; } - // Do we have enough shares to computer our rack share? + // Do we have enough shares to compute our rack share? if self.collected_shares.len() < self.config.threshold.0 as usize { return false; } + // Share indices are assigned according the configuration membership's + // key order, when the configuration is constructed. + // // What index are we in the configuration? This is our "x-coordinate" // for our key share calculation. We always start indexing from 1, since // 0 is the rack secret. + // let index = self.config.members.keys().position(|id| id == ctx.platform_id()); diff --git a/trust-quorum/src/configuration.rs b/trust-quorum/src/configuration.rs index a6057c62ed1..8b116e6f4a8 100644 --- a/trust-quorum/src/configuration.rs +++ b/trust-quorum/src/configuration.rs @@ -7,6 +7,7 @@ use crate::crypto::{EncryptedRackSecrets, RackSecret, Sha3_256Digest}; use crate::validators::ValidatedReconfigureMsg; use crate::{Epoch, PlatformId, Threshold}; +use daft::Diffable; use gfss::shamir::{Share, SplitError}; use iddqd::{IdOrdItem, id_upcast}; use omicron_uuid_kinds::RackUuid; @@ -31,7 +32,15 @@ pub enum ConfigurationError { /// /// Only valid for non-lrtq configurations #[derive( - Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, + Debug, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, + Diffable, )] pub struct Configuration { /// Unique Id of the rack diff --git a/trust-quorum/src/coordinator_state.rs b/trust-quorum/src/coordinator_state.rs index 78e8c8b1254..50cba4d839a 100644 --- a/trust-quorum/src/coordinator_state.rs +++ b/trust-quorum/src/coordinator_state.rs @@ -4,12 +4,12 @@ //! State of a reconfiguration coordinator inside a [`crate::Node`] -use crate::NodeHandlerCtx; -use crate::crypto::{ - LrtqShare, PlaintextRackSecrets, Sha3_256Digest, ShareDigestLrtq, -}; +use crate::configuration::ConfigurationDiff; +use crate::crypto::{LrtqShare, PlaintextRackSecrets, ShareDigestLrtq}; use crate::validators::{ReconfigurationError, ValidatedReconfigureMsg}; use crate::{Configuration, Epoch, PeerMsgKind, PlatformId, RackSecret}; +use crate::{NodeHandlerCtx, ValidatedReconfigureMsgDiff}; +use daft::{Diffable, Leaf}; use gfss::shamir::Share; use slog::{Logger, error, info, o, warn}; use std::collections::{BTreeMap, BTreeSet}; @@ -27,7 +27,9 @@ use std::mem; /// allows progress to always be made with a full linearization of epochs. /// /// We allow some unused fields before we complete the coordination code +#[derive(Clone, Debug, Diffable)] pub struct CoordinatorState { + #[daft(ignore)] log: Logger, /// A copy of the message used to start this reconfiguration @@ -41,6 +43,34 @@ pub struct CoordinatorState { op: CoordinatorOperation, } +// For diffs we want to allow access to all fields, but not make them public in +// the `CoordinatorState` type itself. +impl<'daft> CoordinatorStateDiff<'daft> { + pub fn reconfigure_msg(&self) -> &ValidatedReconfigureMsgDiff<'daft> { + &self.reconfigure_msg + } + + pub fn configuration(&self) -> &ConfigurationDiff<'daft> { + &self.configuration + } + + pub fn op(&self) -> Leaf<&CoordinatorOperation> { + self.op + } +} + +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl PartialEq for CoordinatorState { + fn eq(&self, other: &Self) -> bool { + self.reconfigure_msg == other.reconfigure_msg + && self.configuration == other.configuration + && self.op == other.op + } +} + +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl Eq for CoordinatorState {} + impl CoordinatorState { /// Start coordinating a reconfiguration for a brand new trust quorum /// @@ -179,13 +209,14 @@ impl CoordinatorState { #[expect(unused)] CoordinatorOperation::CollectLrtqShares { members, shares } => {} CoordinatorOperation::Prepare { prepares, .. } => { - for (platform_id, (config, share)) in - prepares.clone().into_iter() - { + for (platform_id, (config, share)) in prepares.iter() { if ctx.connected().contains(&platform_id) { ctx.send( - platform_id, - PeerMsgKind::Prepare { config, share }, + platform_id.clone(), + PeerMsgKind::Prepare { + config: config.clone(), + share: share.clone(), + }, ); } } @@ -209,7 +240,6 @@ impl CoordinatorState { } => {} CoordinatorOperation::CollectLrtqShares { members, shares } => {} CoordinatorOperation::Prepare { prepares, prepare_acks } => { - let rack_id = self.reconfigure_msg.rack_id(); if let Some((config, share)) = prepares.get(&to) { ctx.send( to, @@ -285,39 +315,15 @@ impl CoordinatorState { "new_epoch" => new_epoch.to_string() )); - // Are we trying to retrieve shares for `epoch`? - if *old_epoch != epoch { - warn!( - log, - "Received Share from node with wrong epoch"; - "received_epoch" => %epoch, - "from" => %from - ); - return; - } - - // Was the sender a member of the configuration at `old_epoch`? - let Some(expected_digest) = old_config.members.get(&from) - else { - warn!( - log, - "Received Share from unexpected node"; - "received_epoch" => %epoch, - "from" => %from - ); + if !crate::validate_share( + &self.log, + &old_config, + &from, + epoch, + &share, + ) { + // Logging done inside `validate_share` return; - }; - - // Does the share hash match what we expect? - let mut digest = Sha3_256Digest::default(); - share.digest::(&mut digest.0); - if digest != *expected_digest { - error!( - log, - "Received share with invalid digest"; - "received_epoch" => %epoch, - "from" => %from - ); } // A valid share was received. Is it new? @@ -411,6 +417,12 @@ impl CoordinatorState { }; // Save the encrypted rack secrets in the current configuration + // + // A new configuration is always created with a `None` value + // for `encrypted_rack_secrets`, as it gets filled in here. + // + // If we change that it's a programmer error that will be caught + // immediately by our tests. assert!(self.configuration.encrypted_rack_secrets.is_none()); self.configuration.encrypted_rack_secrets = Some(new_encrypted_rack_secrets); @@ -467,6 +479,8 @@ impl CoordinatorState { } /// What should the coordinator be doing? +#[derive(Clone, Debug, Diffable)] +#[cfg_attr(feature = "danger_partial_eq_ct_wrapper", derive(PartialEq, Eq))] pub enum CoordinatorOperation { CollectShares { old_epoch: Epoch, diff --git a/trust-quorum/src/crypto.rs b/trust-quorum/src/crypto.rs index 69d33c6cd66..cdb99677339 100644 --- a/trust-quorum/src/crypto.rs +++ b/trust-quorum/src/crypto.rs @@ -45,6 +45,7 @@ const CHACHA20POLY1305_NONCE_LEN: usize = 12; // The key share format used for LRTQ #[derive(Clone, Serialize, Deserialize, Zeroize, ZeroizeOnDrop, From)] +#[cfg_attr(feature = "danger_partial_eq_ct_wrapper", derive(PartialEq, Eq))] pub struct LrtqShare(Vec); // We don't want to risk debug-logging the actual share contents, so implement @@ -74,18 +75,20 @@ impl LrtqShare { pub struct ShareDigestLrtq(Sha3_256Digest); #[derive( - Default, - Debug, - Clone, - PartialEq, - Eq, - PartialOrd, - Ord, - Serialize, - Deserialize, + Default, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, )] pub struct Sha3_256Digest(pub [u8; 32]); +impl std::fmt::Debug for Sha3_256Digest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "sha3 digest: ")?; + for v in self.0.as_slice() { + write!(f, "{:x?}", v)?; + } + Ok(()) + } +} + /// A boxed array containing rack secret data /// /// We explicitly choose to box the data so that it is not littered around diff --git a/trust-quorum/src/lib.rs b/trust-quorum/src/lib.rs index 8bb8d8de5d3..aed8a518b9e 100644 --- a/trust-quorum/src/lib.rs +++ b/trust-quorum/src/lib.rs @@ -9,8 +9,12 @@ //! All persistent state and all networking is managed outside of this //! implementation. +use crypto::Sha3_256Digest; +use daft::Diffable; use derive_more::Display; +use gfss::shamir::Share; use serde::{Deserialize, Serialize}; +use slog::{Logger, error, warn}; mod compute_key_share; mod configuration; @@ -22,16 +26,19 @@ mod node_ctx; mod persistent_state; mod validators; pub use configuration::Configuration; -pub use coordinator_state::{CoordinatorOperation, CoordinatorState}; +pub use coordinator_state::{ + CoordinatorOperation, CoordinatorState, CoordinatorStateDiff, +}; +pub use validators::ValidatedReconfigureMsgDiff; mod alarm; pub use alarm::Alarm; pub use crypto::RackSecret; pub use messages::*; -pub use node::Node; +pub use node::{Node, NodeDiff}; // public only for docs. pub use node_ctx::NodeHandlerCtx; -pub use node_ctx::{NodeCallerCtx, NodeCommonCtx, NodeCtx}; +pub use node_ctx::{NodeCallerCtx, NodeCommonCtx, NodeCtx, NodeCtxDiff}; pub use persistent_state::{PersistentState, PersistentStateSummary}; #[derive( @@ -46,7 +53,9 @@ pub use persistent_state::{PersistentState, PersistentStateSummary}; Serialize, Deserialize, Display, + Diffable, )] +#[daft(leaf)] pub struct Epoch(pub u64); impl Epoch { @@ -69,7 +78,9 @@ impl Epoch { Serialize, Deserialize, Display, + Diffable, )] +#[daft(leaf)] pub struct Threshold(pub u8); /// A unique identifier for a given trust quorum member. @@ -80,8 +91,17 @@ pub struct Threshold(pub u8); /// /// See RFDs 303 and 308 for more details. #[derive( - Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, + Debug, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, + Diffable, )] +#[daft(leaf)] pub struct PlatformId { part_number: String, serial_number: String, @@ -108,9 +128,60 @@ impl PlatformId { } /// A container to make messages between trust quorum nodes routable -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Diffable)] +#[cfg_attr(feature = "danger_partial_eq_ct_wrapper", derive(PartialEq, Eq))] +#[daft(leaf)] pub struct Envelope { pub to: PlatformId, pub from: PlatformId, pub msg: PeerMsg, } + +/// Check if a received share is valid for a given configuration +/// +/// Return true if valid, false otherwise. +pub fn validate_share( + log: &Logger, + config: &Configuration, + from: &PlatformId, + epoch: Epoch, + share: &Share, +) -> bool { + // Are we trying to retrieve shares for `epoch`? + if epoch != config.epoch { + warn!( + log, + "Received Share from node with wrong epoch"; + "received_epoch" => %epoch, + "from" => %from + ); + return false; + } + + // Is the sender a member of the configuration `epoch`? + // Was the sender a member of the configuration at `old_epoch`? + let Some(expected_digest) = config.members.get(&from) else { + warn!( + log, + "Received Share from unexpected node"; + "epoch" => %epoch, + "from" => %from + ); + return false; + }; + + // Does the share hash match what we expect? + let mut digest = Sha3_256Digest::default(); + share.digest::(&mut digest.0); + if digest != *expected_digest { + error!( + log, + "Received share with invalid digest"; + "epoch" => %epoch, + "from" => %from + ); + return false; + } + + true +} diff --git a/trust-quorum/src/messages.rs b/trust-quorum/src/messages.rs index 052a8d04a40..3167cba5002 100644 --- a/trust-quorum/src/messages.rs +++ b/trust-quorum/src/messages.rs @@ -24,12 +24,14 @@ pub struct ReconfigureMsg { /// Messages sent between trust quorum members over a sprockets channel #[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "danger_partial_eq_ct_wrapper", derive(PartialEq, Eq))] pub struct PeerMsg { pub rack_id: RackUuid, pub kind: PeerMsgKind, } #[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "danger_partial_eq_ct_wrapper", derive(PartialEq, Eq))] pub enum PeerMsgKind { /// Sent from a coordinator node to inform a peer about a new configuration Prepare { diff --git a/trust-quorum/src/node.rs b/trust-quorum/src/node.rs index a6613f9062f..16503dbef88 100644 --- a/trust-quorum/src/node.rs +++ b/trust-quorum/src/node.rs @@ -23,6 +23,7 @@ use crate::{ Alarm, Configuration, CoordinatorState, Epoch, NodeHandlerCtx, PlatformId, messages::*, }; +use daft::{Diffable, Leaf}; use gfss::shamir::Share; use omicron_uuid_kinds::RackUuid; use slog::{Logger, error, info, o, warn}; @@ -32,7 +33,9 @@ use slog::{Logger, error, info, o, warn}; /// This is a `sans-io` implementation that is deterministic (except for /// `RackSecretGeneration`, which currently hardcodes use of an OsRng). This /// style is primarily for testing purposes. +#[derive(Debug, Clone, Diffable)] pub struct Node { + #[daft(ignore)] log: Logger, /// In memory state for when this node is coordinating a reconfiguration @@ -43,6 +46,29 @@ pub struct Node { key_share_computer: Option, } +// For diffs we want to allow access to all fields, but not make them public in +// the `Node` type itself. +impl NodeDiff<'_> { + pub fn coordinator_state(&self) -> Leaf> { + self.coordinator_state + } + + pub fn key_share_computer(&self) -> Leaf> { + self.key_share_computer + } +} + +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl PartialEq for Node { + fn eq(&self, other: &Self) -> bool { + self.coordinator_state == other.coordinator_state + && self.key_share_computer == other.key_share_computer + } +} + +#[cfg(feature = "danger_partial_eq_ct_wrapper")] +impl Eq for Node {} + impl Node { pub fn new(log: &Logger, ctx: &mut impl NodeHandlerCtx) -> Node { let id_str = format!("{:?}", ctx.platform_id()); @@ -288,6 +314,19 @@ impl Node { from: PlatformId, config: Configuration, ) { + // The sender sent us a configuration even though we are not part of the + // configuration. This is a bug on the sender's part, but doesn't rise + // to the level of an alarm. Log an error. + if !config.members.contains_key(ctx.platform_id()) { + error!( + self.log, + "Received CommitAdvance, but not a member of configuration"; + "from" => %from, + "epoch" => %config.epoch + ); + return; + } + // We may have already advanced by the time we receive this message. // Let's check. if ctx.persistent_state().commits.contains(&config.epoch) { @@ -328,6 +367,7 @@ impl Node { config2: config.clone(), from: from.clone(), }); + return; } } else { ctx.update_persistent_state(|ps| { @@ -404,7 +444,7 @@ impl Node { } } - // We either were collectiong shares for an old epoch or haven't started + // We either were collecting shares for an old epoch or haven't started // yet. self.key_share_computer = Some(KeyShareComputer::new(&self.log, ctx, config)); @@ -435,7 +475,7 @@ impl Node { info!( self.log, concat!( - "Received 'GetShare'` from stale node. ", + "Received 'GetShare' from stale node. ", "Responded with 'CommitAdvance'." ); "from" => %from, @@ -542,6 +582,16 @@ impl Node { return; } + if !config.members.contains_key(ctx.platform_id()) { + error!( + self.log, + "Received Prepare when not a member of configuration"; + "from" => %from, + "prepare_epoch" => %config.epoch + ); + return; + } + // We always save the config and share if we haven't committed a later // configuration. If we have seen a newer `Prepare`, it's possible // that that configuration will not commit, and the latest committed @@ -568,7 +618,10 @@ impl Node { ); } // If we are coordinating for an older epoch, then we should stop - // coordinating. This epoch will never commit. + // coordinating. The configuration at this epoch will either never + // commit, or has already committed without us learning about it from + // Nexus. In either case the rest of the system has moved on and we + // should stop coordinating. if let Some(cs) = &self.coordinator_state { if msg_epoch > cs.reconfigure_msg().epoch() { // This prepare is for a newer configuration than the one we are diff --git a/trust-quorum/src/node_ctx.rs b/trust-quorum/src/node_ctx.rs index e3a4f7fed32..e7d36da7bd7 100644 --- a/trust-quorum/src/node_ctx.rs +++ b/trust-quorum/src/node_ctx.rs @@ -6,7 +6,9 @@ use crate::{ Alarm, Envelope, PeerMsg, PeerMsgKind, PersistentState, PlatformId, + persistent_state::PersistentStateDiff, }; +use daft::{BTreeSetDiff, Diffable, Leaf}; use std::collections::BTreeSet; /// An API shared by [`NodeCallerCtx`] and [`NodeHandlerCtx`] @@ -67,6 +69,8 @@ pub trait NodeHandlerCtx: NodeCommonCtx { /// We separate access to this context via different APIs; namely [`NodeCallerCtx`] /// and [`NodeHandlerCtx`]. This statically prevents both the caller and /// [`crate::Node`] internals from performing improper mutations. +#[derive(Debug, Clone, Diffable)] +#[cfg_attr(feature = "danger_partial_eq_ct_wrapper", derive(PartialEq, Eq))] pub struct NodeCtx { /// The unique hardware ID of a sled platform_id: PlatformId, @@ -90,6 +94,34 @@ pub struct NodeCtx { alarms: BTreeSet, } +// For diffs we want to allow access to all fields, but not make them public in +// the `NodeCtx` type itself. +impl<'daft> NodeCtxDiff<'daft> { + pub fn platform_id(&self) -> Leaf<&PlatformId> { + self.platform_id + } + + pub fn persistent_state(&self) -> &PersistentStateDiff<'daft> { + &self.persistent_state + } + + pub fn persistent_state_changed(&self) -> Leaf<&bool> { + self.persistent_state_changed + } + + pub fn outgoing(&self) -> Leaf<&[Envelope]> { + self.outgoing + } + + pub fn connected(&self) -> &BTreeSetDiff<'daft, PlatformId> { + &self.connected + } + + pub fn alarms(&self) -> &BTreeSetDiff<'daft, Alarm> { + &self.alarms + } +} + impl NodeCtx { pub fn new(platform_id: PlatformId) -> NodeCtx { NodeCtx { diff --git a/trust-quorum/src/persistent_state.rs b/trust-quorum/src/persistent_state.rs index ba6d1306272..d2a9a090396 100644 --- a/trust-quorum/src/persistent_state.rs +++ b/trust-quorum/src/persistent_state.rs @@ -9,6 +9,7 @@ use crate::crypto::LrtqShare; use crate::{Configuration, Epoch, PlatformId}; use bootstore::schemes::v0::SharePkgCommon as LrtqShareData; +use daft::Diffable; use gfss::shamir::Share; use iddqd::IdOrdMap; use omicron_uuid_kinds::{GenericUuid, RackUuid}; @@ -16,7 +17,8 @@ use serde::{Deserialize, Serialize}; use std::collections::{BTreeMap, BTreeSet}; /// All the persistent state for this protocol -#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[derive(Debug, Clone, Serialize, Deserialize, Default, Diffable)] +#[cfg_attr(feature = "danger_partial_eq_ct_wrapper", derive(PartialEq, Eq))] pub struct PersistentState { // If this node was an LRTQ node, sled-agent will start it with the ledger // data it read from disk. This allows us to upgrade from LRTQ. diff --git a/trust-quorum/src/validators.rs b/trust-quorum/src/validators.rs index aaf045d3aa6..ffa361dc1f2 100644 --- a/trust-quorum/src/validators.rs +++ b/trust-quorum/src/validators.rs @@ -7,6 +7,7 @@ use crate::configuration::ConfigurationError; use crate::messages::ReconfigureMsg; use crate::{Epoch, PersistentStateSummary, PlatformId, Threshold}; +use daft::{BTreeSetDiff, Diffable, Leaf}; use omicron_uuid_kinds::RackUuid; use slog::{Logger, error, info, warn}; use std::collections::BTreeSet; @@ -124,7 +125,7 @@ pub enum ReconfigurationError { /// A `ReconfigureMsg` that has been determined to be valid for the remainder /// of code paths. We encode this check into a type in a "parse, don't validate" /// manner. -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq, Diffable)] pub struct ValidatedReconfigureMsg { rack_id: RackUuid, epoch: Epoch, @@ -137,6 +138,34 @@ pub struct ValidatedReconfigureMsg { coordinator_id: PlatformId, } +// For diffs we want to allow access to all fields, but not make them public in +// the `ValidatedReconfigureMsg` type itself. +impl<'daft> ValidatedReconfigureMsgDiff<'daft> { + pub fn rack_id(&self) -> Leaf<&RackUuid> { + self.rack_id + } + + pub fn epoch(&self) -> Leaf<&Epoch> { + self.epoch + } + + pub fn last_committed_epoch(&self) -> Leaf> { + self.last_committed_epoch + } + + pub fn members(&self) -> &BTreeSetDiff<'daft, PlatformId> { + &self.members + } + + pub fn threshold(&self) -> Leaf<&Threshold> { + self.threshold + } + + pub fn coordinator_id(&self) -> Leaf<&PlatformId> { + self.coordinator_id + } +} + impl PartialEq for ReconfigureMsg { fn eq(&self, other: &ValidatedReconfigureMsg) -> bool { let ReconfigureMsg { diff --git a/trust-quorum/test-utils/Cargo.toml b/trust-quorum/test-utils/Cargo.toml new file mode 100644 index 00000000000..f2701c471a2 --- /dev/null +++ b/trust-quorum/test-utils/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "trust-quorum-test-utils" +version = "0.1.0" +edition = "2024" + +[lints] +workspace = true + +[dependencies] +camino.workspace = true +daft.workspace = true +dropshot.workspace = true +gfss = { workspace = true, features = ["danger_partial_eq_ct_wrapper"] } +iddqd.workspace = true +omicron-uuid-kinds.workspace = true +serde.workspace = true +serde_json.workspace = true +slog.workspace = true +trust-quorum = { workspace = true, features = ["danger_partial_eq_ct_wrapper"] } + +omicron-workspace-hack.workspace = true diff --git a/trust-quorum/test-utils/src/event.rs b/trust-quorum/test-utils/src/event.rs new file mode 100644 index 00000000000..6a573d95852 --- /dev/null +++ b/trust-quorum/test-utils/src/event.rs @@ -0,0 +1,33 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Events passed to our SUT/Nexus sim in both proptests and tqdb + +use crate::nexus::{NexusConfig, NexusReply}; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeSet; +use trust_quorum::{Epoch, PlatformId}; + +/// An event that can be fed into our system under test (SUT) +/// +/// Proptest generated `Action`s get translated into events at test execution +/// time and recorded for replay. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Event { + InitialSetup { + member_universe_size: usize, + config: NexusConfig, + crashed_nodes: BTreeSet, + }, + AbortConfiguration(Epoch), + SendNexusReplyOnUnderlay(NexusReply), + /// Pull an envelope off the bootstrap network and call `Node::handle` + DeliverEnvelope { + destination: PlatformId, + }, + /// Pull a `NexusReply` off the underlay network and update the `NexusState` + DeliverNexusReply, + CommitConfiguration(PlatformId), + Reconfigure(NexusConfig), +} diff --git a/trust-quorum/test-utils/src/event_log.rs b/trust-quorum/test-utils/src/event_log.rs new file mode 100644 index 00000000000..4319513a1ea --- /dev/null +++ b/trust-quorum/test-utils/src/event_log.rs @@ -0,0 +1,40 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! A mechanism for recording [`crate::Event`]s + +use super::Event; +use camino::Utf8Path; +use std::fs::File; +use std::io::{Seek, Write}; + +pub struct EventLog { + file: File, +} + +impl EventLog { + pub fn new(path: &Utf8Path) -> EventLog { + let mut file = File::create(path).unwrap(); + // We want to incremntally write an array of `Event`s. + // Start the array + file.write_all(b"[\n").expect("opening brace written"); + EventLog { file } + } + + pub fn record(&mut self, event: &Event) { + serde_json::to_writer_pretty(&mut self.file, event) + .expect("writing event succeeded"); + self.file.write_all(b",\n").expect("write succeeded"); + } +} + +impl Drop for EventLog { + fn drop(&mut self) { + // Backup over the trailing comma and newline + let _ = self.file.seek_relative(-2); + // Finish writing the array of events + let _ = self.file.write_all(b"\n]\n"); + let _ = self.file.sync_data(); + } +} diff --git a/trust-quorum/test-utils/src/lib.rs b/trust-quorum/test-utils/src/lib.rs new file mode 100644 index 00000000000..7eccc64f5a9 --- /dev/null +++ b/trust-quorum/test-utils/src/lib.rs @@ -0,0 +1,23 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Helpers for use by our proptests and tqdb + +mod event; +mod event_log; +pub mod nexus; +mod state; + +pub use event::Event; +pub use event_log::EventLog; +pub use state::TqState; + +use trust_quorum::PlatformId; + +/// All possible members used in a test +pub fn member_universe(size: usize) -> Vec { + (0..size) + .map(|serial| PlatformId::new("test".into(), serial.to_string())) + .collect() +} diff --git a/trust-quorum/test-utils/src/nexus.rs b/trust-quorum/test-utils/src/nexus.rs new file mode 100644 index 00000000000..a64acb39d09 --- /dev/null +++ b/trust-quorum/test-utils/src/nexus.rs @@ -0,0 +1,170 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Nexus related types for trust-quorum testing + +use daft::Diffable; +use iddqd::id_ord_map::RefMut; +use iddqd::{IdOrdItem, IdOrdMap, id_upcast}; +use omicron_uuid_kinds::RackUuid; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeSet; +use trust_quorum::{Epoch, PlatformId, ReconfigureMsg, Threshold}; + +// The operational state of nexus for a given configuration +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Diffable)] +pub enum NexusOp { + Committed, + Aborted, + Preparing, +} + +/// A single nexus configuration +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Diffable)] +pub struct NexusConfig { + pub op: NexusOp, + pub epoch: Epoch, + pub last_committed_epoch: Option, + pub coordinator: PlatformId, + pub members: BTreeSet, + // This is our `K` parameter + pub threshold: Threshold, + + // This is our `Z` parameter. + // + // Nexus can commit when it has seen K+Z prepare acknowledgements + // + // Only nexus needs to know this value since it alone determines when a + // commit may occur. + pub commit_crash_tolerance: u8, + + pub prepared_members: BTreeSet, + pub committed_members: BTreeSet, +} + +impl NexusConfig { + pub fn new( + epoch: Epoch, + last_committed_epoch: Option, + coordinator: PlatformId, + members: BTreeSet, + threshold: Threshold, + ) -> NexusConfig { + // We want a few extra nodes beyond `threshold` to ack before we commit. + // This is the number of nodes that can go offline while still allowing + // an unlock to occur. + let commit_crash_tolerance = match members.len() - threshold.0 as usize + { + 0..=1 => 0, + 2..=4 => 1, + 5..=7 => 2, + _ => 3, + }; + NexusConfig { + op: NexusOp::Preparing, + epoch, + last_committed_epoch, + coordinator, + members, + threshold, + commit_crash_tolerance, + prepared_members: BTreeSet::new(), + committed_members: BTreeSet::new(), + } + } + + pub fn to_reconfigure_msg(&self, rack_id: RackUuid) -> ReconfigureMsg { + ReconfigureMsg { + rack_id, + epoch: self.epoch, + last_committed_epoch: self.last_committed_epoch, + members: self.members.clone(), + threshold: self.threshold, + } + } + + // Are there enough prepared members to commit? + pub fn can_commit(&self) -> bool { + self.prepared_members.len() + >= (self.threshold.0 + self.commit_crash_tolerance) as usize + } +} + +impl IdOrdItem for NexusConfig { + type Key<'a> = Epoch; + + fn key(&self) -> Self::Key<'_> { + self.epoch + } + + id_upcast!(); +} + +/// A model of Nexus's view of the world during the test +#[derive(Debug, Clone, Diffable)] +pub struct NexusState { + // No reason to change the rack_id + pub rack_id: RackUuid, + + pub configs: IdOrdMap, +} + +impl NexusState { + #[allow(clippy::new_without_default)] + pub fn new() -> NexusState { + NexusState { rack_id: RackUuid::new_v4(), configs: IdOrdMap::new() } + } + + // Create a `ReconfigureMsg` for the latest nexus config + pub fn reconfigure_msg_for_latest_config( + &self, + ) -> (&PlatformId, ReconfigureMsg) { + let config = self.configs.iter().last().expect("at least one config"); + (&config.coordinator, config.to_reconfigure_msg(self.rack_id)) + } + + /// Abort the latest reconfiguration attempt + pub fn abort_reconfiguration(&mut self) { + let config = self.configs.iter().last().expect("at least one config"); + // Can only abort while preparing + assert_eq!(config.op, NexusOp::Preparing); + } + + pub fn latest_config(&self) -> &NexusConfig { + self.configs.iter().last().expect("at least one config") + } + + pub fn latest_config_mut(&mut self) -> RefMut<'_, NexusConfig> { + self.configs.iter_mut().last().expect("at least one config") + } + + pub fn last_committed_config(&self) -> Option<&NexusConfig> { + // IdOrdMap doesn't allow reverse iteration. + // We therefore iterate through all configs to find the latest committed one. + // We could track this out of band but that leaves more room for error. + let mut found: Option<&NexusConfig> = None; + for c in &self.configs { + if c.op == NexusOp::Committed { + found = Some(c) + } + } + found + } +} + +#[derive( + Debug, + Clone, + PartialOrd, + Ord, + PartialEq, + Eq, + Serialize, + Deserialize, + Diffable, +)] +pub enum NexusReply { + AckedPreparesFromCoordinator { epoch: Epoch, acks: BTreeSet }, + CommitAck { from: PlatformId, epoch: Epoch }, +} diff --git a/trust-quorum/test-utils/src/state.rs b/trust-quorum/test-utils/src/state.rs new file mode 100644 index 00000000000..35ae9f13e84 --- /dev/null +++ b/trust-quorum/test-utils/src/state.rs @@ -0,0 +1,888 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! The entire state of our test system + +use crate::nexus::{ + NexusConfig, NexusOp, NexusReply, NexusState, NexusStateDiff, +}; +use crate::{Event, member_universe}; +use daft::{BTreeMapDiff, BTreeSetDiff, Diffable, Leaf}; +use iddqd::IdOrdMap; +use slog::Logger; +use std::collections::{BTreeMap, BTreeSet}; +use std::fmt::Display; +use trust_quorum::{ + Configuration, CoordinatorOperation, CoordinatorStateDiff, Envelope, Epoch, + Node, NodeCallerCtx, NodeCommonCtx, NodeCtx, NodeCtxDiff, NodeDiff, + PeerMsgKind, PlatformId, ValidatedReconfigureMsgDiff, +}; + +// The state of our entire system including the system under test and +// test specific infrastructure. +#[derive(Debug, Clone, Diffable)] +pub struct TqState { + /// A logger for our test + #[daft(ignore)] + pub log: Logger, + + /// Our system under test + pub sut: Sut, + + /// All in flight messages between nodes + pub bootstrap_network: BTreeMap>, + + /// All in flight responses to nexus. We don't model the requests, as those + /// are `Node` public method calls. But we don't want to synchronously + /// update nexus state as a result of those calls, because that ruins any + /// possible interleaving with other actions. + /// + /// This is a way to allow interleaving of nexus replies without changing + /// the Node API to accept a separate set of Nexus messages and return + /// messages. We may decide that we want to do that, but for now we'll stick + /// with a concrete `Node` method based API that is "triggered" by nexus + /// messages. + pub underlay_network: Vec, + + /// A model of Nexus's view of the world during the test + pub nexus: NexusState, + + /// A cache of our member universe, so we only have to generate it once + pub member_universe: Vec, + + /// All possible system faults in our test + pub faults: Faults, + + /// All configurations ever generated by a coordinator. + /// + /// If an epoch got skipped due to a crashed coordinator then there will not + /// be a configuration for that epoch. + pub all_coordinated_configs: IdOrdMap, + + /// Expunged nodes cannot be added to a cluster. We never reuse nodes in + /// this test. We include nodes here that may not know yet that they have + /// been expunged in the `Sut`. + pub expunged: BTreeSet, +} + +impl TqState { + pub fn new(log: Logger) -> TqState { + // We'll fill this in when applying the initial_config + let sut = Sut::empty(); + let member_universe = vec![]; + TqState { + log, + sut, + bootstrap_network: BTreeMap::new(), + underlay_network: Vec::new(), + nexus: NexusState::new(), + member_universe, + faults: Faults::default(), + all_coordinated_configs: IdOrdMap::new(), + expunged: BTreeSet::new(), + } + } + + /// Send the latest `ReconfigureMsg` from `Nexus` to the coordinator node + /// + /// If the node is not available, then abort the configuration at nexus + pub fn send_reconfigure_msg(&mut self) { + let (coordinator, msg) = self.nexus.reconfigure_msg_for_latest_config(); + let epoch_to_config = msg.epoch; + if self.faults.crashed_nodes.contains(coordinator) { + // We must abort the configuration. This mimics a timeout. + self.nexus.abort_reconfiguration(); + } else { + let (node, ctx) = self + .sut + .nodes + .get_mut(coordinator) + .expect("coordinator exists"); + + node.coordinate_reconfiguration(ctx, msg) + .expect("valid configuration"); + + // Do we have a `Configuration` for this epoch yet? + // + // For most reconfigurations, shares for the last committed + // configuration must be retrieved before the configuration is + // generated and saved in the persistent state. + let latest_persisted_config = + ctx.persistent_state().latest_config().expect("config exists"); + if latest_persisted_config.epoch == epoch_to_config { + // Save the configuration for later + self.all_coordinated_configs + .insert_unique(latest_persisted_config.clone()) + .expect("unique"); + } + } + } + + /// Check postcondition assertions after initial configuration + pub fn postcondition_initial_configuration(&mut self) { + let (coordinator, msg) = self.nexus.reconfigure_msg_for_latest_config(); + + // The coordinator should have received the `ReconfigureMsg` from Nexus + if !self.faults.crashed_nodes.contains(coordinator) { + let (node, ctx) = self + .sut + .nodes + .get_mut(coordinator) + .expect("coordinator exists"); + let mut connected_members = 0; + // The coordinator should start preparing by sending a `PrepareMsg` to all + // connected nodes in the membership set. + for member in + msg.members.iter().filter(|&id| id != coordinator).cloned() + { + if self.faults.is_connected(coordinator.clone(), member.clone()) + { + connected_members += 1; + let msg_found = ctx.envelopes().any(|envelope| { + envelope.to == member + && envelope.from == *coordinator + && matches!( + envelope.msg.kind, + PeerMsgKind::Prepare { .. } + ) + }); + assert!(msg_found); + } + } + assert_eq!(connected_members, ctx.envelopes().count()); + + // The coordinator should be in the prepare phase + let cs = node.get_coordinator_state().expect("is coordinating"); + assert!(matches!(cs.op(), CoordinatorOperation::Prepare { .. })); + + // The persistent state should have changed + assert!(ctx.persistent_state_change_check_and_reset()); + assert!(ctx.persistent_state().has_prepared(msg.epoch)); + assert!(ctx.persistent_state().latest_committed_epoch().is_none()); + } + } + + /// Put any outgoing coordinator messages from the latest configuration on the wire + pub fn send_envelopes_from_coordinator(&mut self) { + let coordinator = { + let (coordinator, _) = + self.nexus.reconfigure_msg_for_latest_config(); + coordinator.clone() + }; + self.send_envelopes_from(&coordinator); + } + + pub fn send_envelopes_from(&mut self, id: &PlatformId) { + let (_, ctx) = self.sut.nodes.get_mut(id).expect("node exists"); + for envelope in ctx.drain_envelopes() { + let msgs = + self.bootstrap_network.entry(envelope.to.clone()).or_default(); + msgs.push(envelope); + } + } + + pub fn apply_event(&mut self, event: Event) { + match event { + Event::InitialSetup { + member_universe_size, + config, + crashed_nodes, + } => { + self.apply_event_initial_config( + member_universe_size, + config, + crashed_nodes, + ); + } + Event::AbortConfiguration(epoch) => { + self.apply_event_abort_configuration(epoch) + } + Event::SendNexusReplyOnUnderlay(reply) => { + self.apply_event_send_nexus_reply_on_underlay(reply) + } + Event::DeliverEnvelope { destination } => { + self.apply_event_deliver_envelope(destination); + } + Event::DeliverNexusReply => { + self.apply_event_deliver_nexus_reply(); + } + Event::CommitConfiguration(dest) => { + self.apply_event_commit(dest); + } + Event::Reconfigure(nexus_config) => { + self.apply_event_reconfigure(nexus_config) + } + } + } + + fn apply_event_initial_config( + &mut self, + member_universe_size: usize, + config: NexusConfig, + crashed_nodes: BTreeSet, + ) { + // Generate the member universe + self.member_universe = member_universe(member_universe_size); + // Create the SUT nodes + self.sut = Sut::new(&self.log, self.member_universe.clone()); + + self.faults.crashed_nodes = crashed_nodes; + + // Inform nexus about the initial configuration + self.nexus.configs.insert_unique(config).expect("new config"); + + // Establish bootstrap network connections between live nodes + for (from, (node, ctx)) in self + .sut + .nodes + .iter_mut() + .filter(|(id, _)| !self.faults.crashed_nodes.contains(id)) + { + for to in self.member_universe.iter().filter(|id| { + !self.faults.crashed_nodes.contains(id) && from != *id + }) { + node.on_connect(ctx, to.clone()); + } + } + + self.send_reconfigure_msg(); + + // Check the results of the initial setup + self.postcondition_initial_configuration(); + + // Put the coordinator's outgoing messages on the wire if there are any + self.send_envelopes_from_coordinator(); + } + + fn apply_event_commit(&mut self, id: PlatformId) { + let rack_id = self.nexus.rack_id; + let latest_config = self.nexus.latest_config(); + let (node, ctx) = + self.sut.nodes.get_mut(&id).expect("destination exists"); + node.commit_configuration(ctx, rack_id, latest_config.epoch) + .expect("commit succeeded"); + + self.underlay_network.push(NexusReply::CommitAck { + from: id, + epoch: latest_config.epoch, + }); + } + + fn apply_event_send_nexus_reply_on_underlay(&mut self, reply: NexusReply) { + self.underlay_network.push(reply); + } + + fn apply_event_deliver_nexus_reply(&mut self) { + let mut latest_config = self.nexus.latest_config_mut(); + let reply = self.underlay_network.pop().expect("reply exists"); + match reply { + NexusReply::AckedPreparesFromCoordinator { epoch, acks } => { + if epoch == latest_config.epoch { + latest_config.prepared_members.extend(acks); + } + } + NexusReply::CommitAck { from, epoch } => { + if latest_config.epoch == epoch { + latest_config.committed_members.insert(from); + } + } + } + } + + fn apply_event_abort_configuration(&mut self, epoch: Epoch) { + let mut latest_config = self.nexus.latest_config_mut(); + assert_eq!(epoch, latest_config.epoch); + latest_config.op = NexusOp::Aborted; + } + + fn apply_event_deliver_envelope(&mut self, destination: PlatformId) { + let envelope = self + .bootstrap_network + .get_mut(&destination) + .unwrap() + .pop() + .expect("envelope in bootstrap network"); + let (node, ctx) = + self.sut.nodes.get_mut(&envelope.to).expect("destination exists"); + node.handle(ctx, envelope.from, envelope.msg); + + // If this is the first time we've seen a configuration, track it + // + // We have to do this here because for reconfigurations, shares + // for the last committed reconfiguration are gathered before + // the config is created. We don't know exactly when config + // generation occurs, but know that it happens after envelopes + // are delivered, except for configurations that don't have + // a last committed config. This is normally the initial + // configuration, but can be later ones if the initial config + // is aborted. + if ctx.persistent_state_change_check_and_reset() { + if let Some(latest_config) = ctx.persistent_state().latest_config() + { + if !self + .all_coordinated_configs + .contains_key(&latest_config.epoch) + { + // The coordinator must be the first node to create + // the configuration. + assert_eq!(&latest_config.coordinator, ctx.platform_id()); + + self.all_coordinated_configs + .insert_unique(latest_config.clone()) + .expect("unique config"); + } + } + } + + // Send any messages as a result of handling this message + send_envelopes(ctx, &mut self.bootstrap_network); + + // Remove any destinations with zero messages in-flight + self.bootstrap_network.retain(|_, msgs| !msgs.is_empty()); + } + + fn apply_event_reconfigure(&mut self, nexus_config: NexusConfig) { + self.nexus.configs.insert_unique(nexus_config).expect("new config"); + self.send_reconfigure_msg(); + self.send_envelopes_from_coordinator(); + } +} + +/// Broken out of `TqState` to alleviate borrow checker woes +fn send_envelopes( + ctx: &mut NodeCtx, + bootstrap_network: &mut BTreeMap>, +) { + for envelope in ctx.drain_envelopes() { + let envelopes = + bootstrap_network.entry(envelope.to.clone()).or_default(); + envelopes.push(envelope); + } +} + +/// The system under test +/// +/// This is our real code. +#[derive(Debug, Clone, Diffable)] +pub struct Sut { + /// All nodes in the member universe + pub nodes: BTreeMap, +} + +impl Sut { + pub fn empty() -> Sut { + Sut { nodes: BTreeMap::new() } + } + + pub fn new(log: &Logger, universe: Vec) -> Sut { + let nodes = universe + .into_iter() + .map(|id| { + let mut ctx = NodeCtx::new(id.clone()); + let node = Node::new(log, &mut ctx); + (id, (node, ctx)) + }) + .collect(); + Sut { nodes } + } +} + +/// Faults in our system. It's useful to keep these self contained and not +/// in separate fields in `TestState` so that we can access them all at once +/// independently of other `TestState` fields. +#[derive(Default, Debug, Clone, Diffable)] +pub struct Faults { + // We allow nodes to crash and restart and therefore track crashed nodes here. + // + // A crashed node is implicitly disconnected from every other node. We don't + // bother storing the pairs in `disconnected_nodes`, but instead check both + // fields when necessary. + pub crashed_nodes: BTreeSet, + + /// The set of disconnected nodes + pub disconnected_nodes: DisconnectedNodes, +} + +impl Faults { + pub fn is_connected(&self, node1: PlatformId, node2: PlatformId) -> bool { + !self.crashed_nodes.contains(&node1) + && !self.crashed_nodes.contains(&node2) + && !self.disconnected_nodes.contains(node1, node2) + } +} + +/// For cardinality purposes, we assume all nodes are connected and explicitly +/// disconnect some of them. This allows us to track and compare much less data. +#[derive(Default, Debug, Clone, Diffable)] +pub struct DisconnectedNodes { + // We sort each pair on insert for quick lookups + pairs: BTreeSet<(PlatformId, PlatformId)>, +} + +impl DisconnectedNodes { + // Return true if the pair is newly inserted + pub fn insert(&mut self, node1: PlatformId, node2: PlatformId) -> bool { + assert_ne!(node1, node2); + + let pair = if node1 < node2 { (node1, node2) } else { (node2, node1) }; + self.pairs.insert(pair) + } + + // Return true if the pair of nodes is disconnected, false otherwise + pub fn contains(&self, node1: PlatformId, node2: PlatformId) -> bool { + assert_ne!(node1, node2); + let pair = if node1 < node2 { (node1, node2) } else { (node2, node1) }; + self.pairs.contains(&pair) + } +} + +/***************************************************************************** + * + * Diff related display code + * + *****************************************************************************/ + +/// Diff Display functionality for `TqState` +/// +/// All diff related code lives in `test-utils`, because we enable the +/// trust-quorum feature `danger_partial_eq_ct_wrapper` in this crate. We +/// don't enable it for all uses of the `trust_quorum` crate, especially in +/// production. +/// +/// Since we only use it for human readable output in test tools - at least for +/// now, we put it behind a feature flag and implement all display functionality +/// here. +impl Display for TqStateDiff<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // The set of SUT nodes never changes + for (&id, &leaf) in self.sut.nodes.common.iter() { + if leaf.is_modified() { + writeln!(f, "Node changed: {id}")?; + let (node_diff, ctx_diff) = leaf.diff_pair(); + display_node_diff(node_diff, f)?; + display_node_ctx_diff(ctx_diff, f)?; + + // Add a blank line between modified nodes + writeln!(f)?; + } + } + + display_bootstrap_network_diff(&self.bootstrap_network, f)?; + display_underlay_network_diff(&self.underlay_network, f)?; + display_nexus_state_diff(&self.nexus, f)?; + display_faults_diff(&self.faults, f)?; + display_expunged_diff(&self.expunged, f)?; + + Ok(()) + } +} + +fn display_expunged_diff( + diff: &BTreeSetDiff<'_, PlatformId>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + if !diff.added.is_empty() { + writeln!(f, "expunged nodes:")?; + for id in &diff.added { + writeln!(f, " {id}")?; + } + } + Ok(()) +} + +fn display_faults_diff( + diff: &FaultsDiff<'_>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + if !diff.crashed_nodes.added.is_empty() { + writeln!(f, " Nodes crashed:")?; + for id in &diff.crashed_nodes.added { + writeln!(f, " {id}")?; + } + } + if !diff.crashed_nodes.removed.is_empty() { + writeln!(f, " nodes started:")?; + for id in &diff.crashed_nodes.removed { + writeln!(f, " {id}")?; + } + } + + if !diff.disconnected_nodes.pairs.added.is_empty() { + writeln!(f, " nodes disconnected from each other:")?; + for pair in &diff.disconnected_nodes.pairs.added { + writeln!(f, " {}, {}", pair.0, pair.1)?; + } + } + if !diff.disconnected_nodes.pairs.removed.is_empty() { + writeln!(f, " nodes connected to each other:")?; + for pair in &diff.disconnected_nodes.pairs.removed { + writeln!(f, " {}, {}", pair.0, pair.1)?; + } + } + Ok(()) +} + +fn display_nexus_state_diff( + diff: &NexusStateDiff<'_>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + if diff.configs.modified().count() != 0 { + writeln!(f, " nexus state changed:")?; + } + + // Nexus configs can only be added or modified + for c in &diff.configs.added { + writeln!(f, " config added at epoch {}, op: {:?}", c.epoch, c.op)?; + } + for c in diff.configs.modified_diff() { + writeln!(f, " config modified at epoch {}", c.epoch.before)?; + if c.op.is_modified() { + let op = c.op.diff_pair(); + writeln!(f, " op changed: {:?} -> {:?}", op.before, op.after)?; + } + for id in c.prepared_members.added { + writeln!(f, " new prepare ack received: {id}")?; + } + for id in c.committed_members.added { + writeln!(f, " new commit ack received: {id}")?; + } + } + + Ok(()) +} + +fn display_underlay_network_diff( + diff: &Leaf<&[NexusReply]>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + if diff.is_unchanged() { + return Ok(()); + } + + let before: BTreeSet<_> = diff.before.iter().collect(); + let after: BTreeSet<_> = diff.after.iter().collect(); + + let added = after.difference(&before).count(); + let removed = before.difference(&after).count(); + + writeln!(f, " {added} new nexus replies in flight on underlay network")?; + writeln!( + f, + " {removed} nexus replies delivered to nexus from underlay network", + )?; + + Ok(()) +} + +fn display_bootstrap_network_diff( + diff: &BTreeMapDiff<'_, PlatformId, Vec>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + if !diff.added.is_empty() { + writeln!(f, " messages newly in flight on bootstrap network:")?; + for id in diff.added.keys() { + writeln!(f, " destination: {id}")?; + } + } + + if !diff.removed.is_empty() { + writeln!(f, " all messages delivered from bootstrap network:")?; + for id in diff.removed.keys() { + writeln!(f, " destination: {id}")?; + } + } + + if diff.unchanged_keys().count() != 0 { + writeln!(f, " messages remain in flight from bootstrap network:")?; + for id in diff.unchanged_keys() { + writeln!(f, " destination: {id}")?; + } + } + Ok(()) +} + +// Walk a `NodeCtxDiff` and format it for display +fn display_node_ctx_diff( + diff: NodeCtxDiff<'_>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + if !diff.persistent_state().configs.added.is_empty() { + writeln!(f, " config added to persistent state: ")?; + for c in &diff.persistent_state().configs.added { + writeln!(f, " epoch: {}", c.epoch)?; + } + } + if !diff.persistent_state().configs.removed.is_empty() { + writeln!(f, " config removed from persistent state: ")?; + for c in &diff.persistent_state().configs.removed { + writeln!(f, " epoch: {}", c.epoch)?; + } + } + + if !diff.persistent_state().shares.added.is_empty() { + writeln!(f, " our share added to persistent state: ")?; + for e in diff.persistent_state().shares.added.keys() { + writeln!(f, " epoch: {e}")?; + } + } + if !diff.persistent_state().shares.removed.is_empty() { + writeln!(f, " our share removed from persistent state: ")?; + for e in diff.persistent_state().shares.removed.keys() { + writeln!(f, " epoch: {e}")?; + } + } + + if !diff.persistent_state().commits.added.is_empty() { + writeln!(f, " commit added to persistent state: ")?; + for e in &diff.persistent_state().commits.added { + writeln!(f, " epoch: {e}")?; + } + } + if !diff.persistent_state().commits.removed.is_empty() { + writeln!(f, " commit removed from persistent state: ")?; + for e in &diff.persistent_state().commits.removed { + writeln!(f, " epoch: {e}")?; + } + } + + if diff.outgoing().is_modified() { + writeln!(f, " messages sent to or delivered from bootstrap network")?; + } + + if !diff.connected().added.is_empty() { + writeln!(f, " nodes connected:")?; + for id in &diff.connected().added { + writeln!(f, " {id}")?; + } + } + + if !diff.connected().removed.is_empty() { + writeln!(f, " nodes disconnected:")?; + for id in &diff.connected().removed { + writeln!(f, " {id}")?; + } + } + + if !diff.alarms().added.is_empty() { + writeln!(f, " alarms triggered:")?; + for alarm in &diff.alarms().added { + writeln!(f, " {alarm:?}")?; + } + } + + if !diff.alarms().removed.is_empty() { + writeln!(f, " alarms cleared:")?; + for alarm in &diff.alarms().removed { + writeln!(f, " {alarm:?}")?; + } + } + + Ok(()) +} + +// Walk a `NodeDiff` and format it for display +fn display_node_diff( + node_diff: NodeDiff<'_>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + // Show changes in `Node::coordinator_state` + if node_diff.coordinator_state().is_modified() { + writeln!(f, " coordinator state changed: ")?; + if node_diff.coordinator_state().before.is_none() { + writeln!( + f, + " started coordinating at epoch {}", + node_diff + .coordinator_state() + .after + .unwrap() + .reconfigure_msg() + .epoch() + )?; + } else if node_diff.coordinator_state().after.is_none() { + writeln!( + f, + " stopped coordinating at epoch {}", + node_diff + .coordinator_state() + .before + .unwrap() + .reconfigure_msg() + .epoch() + )?; + } else { + let before = node_diff.coordinator_state().before.unwrap(); + let after = node_diff.coordinator_state().after.unwrap(); + + // They are both `Some`, so figure out what changed + // by recursing + let diff = before.diff(after); + display_coordinator_state_diff(diff, f)?; + } + } + + // Show changes in `Node::key_share_computer` + if node_diff.key_share_computer().is_modified() { + writeln!(f, " key share computer changed: ")?; + if node_diff.key_share_computer().before.is_none() { + writeln!( + f, + " started computing key share at epoch {}", + node_diff.key_share_computer().after.unwrap().config().epoch + )?; + } else if node_diff.key_share_computer().after.is_none() { + writeln!( + f, + " stopped computing key share at epoch {}", + node_diff.key_share_computer().before.unwrap().config().epoch + )?; + } else { + writeln!( + f, + " computing key share at epochs: {} -> {}", + node_diff.key_share_computer().before.unwrap().config().epoch, + node_diff.key_share_computer().after.unwrap().config().epoch + )?; + } + } + + Ok(()) +} + +pub fn display_coordinator_state_diff( + diff: CoordinatorStateDiff<'_>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + display_validated_reconfigure_msg_diff(diff.reconfigure_msg(), f)?; + + // Configuration contains roughly the same information as a + // `ValidatedReconfigureMsg`. Let's report the only relevant change. + if diff.configuration().encrypted_rack_secrets.is_modified() { + writeln!(f, " encrypted rack secrets changed")?; + } + + display_coordinator_operation_diff(diff.op().diff_pair(), f)?; + + Ok(()) +} + +pub fn display_validated_reconfigure_msg_diff( + diff: &ValidatedReconfigureMsgDiff<'_>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + // diff.rack_id changes when tqdb `rewind` command is used, which makes it + // confusing. It never changes inside tests, so no need to diff it. + if diff.epoch().is_modified() { + writeln!( + f, + " epoch: {} -> {}", + diff.epoch().before, + diff.epoch().after + )?; + } + if diff.last_committed_epoch().is_modified() { + writeln!( + f, + " last committed epoch: {:?} -> {:?}", + diff.last_committed_epoch().before, + diff.last_committed_epoch().after + )?; + } + if !diff.members().added.is_empty() { + writeln!(f, " added members:")?; + for member in &diff.members().added { + writeln!(f, " {member}")?; + } + } + if !diff.members().removed.is_empty() { + writeln!(f, " removed members:")?; + for member in &diff.members().removed { + writeln!(f, " {member}")?; + } + } + if diff.threshold().is_modified() { + writeln!( + f, + " threshold: {} -> {}", + diff.threshold().before, + diff.threshold().after + )?; + } + // Always write out the coordinator id. It's useful for digging. + writeln!( + f, + " coordinator: {} -> {}", + diff.coordinator_id().before, + diff.coordinator_id().after, + )?; + + Ok(()) +} + +pub fn display_coordinator_operation_diff( + diff: Leaf<&CoordinatorOperation>, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + if diff.is_unchanged() { + return Ok(()); + } + + // If the same variant changed contents, compare them. Otherwise report the + // change in variants. + match (diff.before, diff.after) { + ( + CoordinatorOperation::CollectShares { + old_epoch, + old_collected_shares, + .. + }, + CoordinatorOperation::CollectShares { + old_epoch: after_old_epoch, + old_collected_shares: after_old_collected_shares, + .. + }, + ) => { + // If the collection epoch changed, then only report that + if old_epoch != after_old_epoch { + #[allow(clippy::uninlined_format_args)] + writeln!( + f, + " collecting shares: epoch changed: {} -> {}", + old_epoch, after_old_epoch + )?; + } else if old_collected_shares != after_old_collected_shares { + writeln!( + f, + " collected shares changed at epoch: {old_epoch}", + )?; + } + } + ( + CoordinatorOperation::CollectLrtqShares { shares: before, .. }, + CoordinatorOperation::CollectLrtqShares { shares: after, .. }, + ) => { + if before != after { + writeln!(f, " collected lrtq shares differ")?; + } + } + ( + CoordinatorOperation::Prepare { prepare_acks: before, .. }, + CoordinatorOperation::Prepare { prepare_acks: after, .. }, + ) => { + if before != after { + writeln!(f, " received prepare acks differ")?; + } + } + (before, after) => { + writeln!( + f, + " coordinator operation changed: {} -> {}", + before.name(), + after.name() + )?; + } + } + + Ok(()) +} diff --git a/trust-quorum/tests/cluster.rs b/trust-quorum/tests/cluster.rs index 9bc7da94c65..c4ddd620daa 100644 --- a/trust-quorum/tests/cluster.rs +++ b/trust-quorum/tests/cluster.rs @@ -5,10 +5,8 @@ //! Property based test driving multiple trust quorum nodes use daft::Diffable; -use iddqd::id_ord_map::RefMut; -use iddqd::{IdOrdItem, IdOrdMap, id_upcast}; +use dropshot::test_util::log_prefix_for_test; use omicron_test_utils::dev::test_setup_log; -use omicron_uuid_kinds::RackUuid; use prop::sample::Index; use proptest::collection::{btree_set, size_range}; use proptest::prelude::*; @@ -17,265 +15,18 @@ use slog::{Logger, info, o}; use std::collections::{BTreeMap, BTreeSet}; use test_strategy::{Arbitrary, proptest}; use trust_quorum::{ - Configuration, CoordinatorOperation, Envelope, Epoch, Node, NodeCallerCtx, - NodeCommonCtx, NodeCtx, PeerMsgKind, PlatformId, ReconfigureMsg, Threshold, + CoordinatorOperation, Epoch, NodeCommonCtx, PlatformId, Threshold, +}; +use trust_quorum_test_utils::TqState; +use trust_quorum_test_utils::{ + Event, EventLog, + nexus::{NexusConfig, NexusOp, NexusReply}, }; - -/// The system under test -/// -/// This is our real code. -pub struct Sut { - /// All nodes in the member universe - pub nodes: BTreeMap, -} - -impl Sut { - pub fn new(log: &Logger, universe: Vec) -> Sut { - let nodes = universe - .into_iter() - .map(|id| { - let mut ctx = NodeCtx::new(id.clone()); - let node = Node::new(log, &mut ctx); - (id, (node, ctx)) - }) - .collect(); - Sut { nodes } - } -} - -#[derive(Debug, PartialEq, Eq)] -pub enum NexusOp { - Committed, - Aborted, - Preparing, -} - -/// A single nexus configuration -#[derive(Debug)] -pub struct NexusConfig { - op: NexusOp, - epoch: Epoch, - last_committed_epoch: Option, - coordinator: PlatformId, - members: BTreeSet, - // This is our `K` parameter - threshold: Threshold, - - // This is our `Z` parameter. - // - // Nexus can commit when it has seen K+Z prepare acknowledgements - // - // Only nexus needs to know this value since it alone determines when a - // commit may occur. - commit_crash_tolerance: u8, - - prepared_members: BTreeSet, - committed_members: BTreeSet, -} - -impl NexusConfig { - pub fn new( - epoch: Epoch, - last_committed_epoch: Option, - coordinator: PlatformId, - members: BTreeSet, - threshold: Threshold, - ) -> NexusConfig { - // We want a few extra nodes beyond `threshold` to ack before we commit. - // This is the number of nodes that can go offline while still allowing - // an unlock to occur. - let commit_crash_tolerance = match members.len() - threshold.0 as usize - { - 0..=1 => 0, - 2..=4 => 1, - 5..=7 => 2, - _ => 3, - }; - NexusConfig { - op: NexusOp::Preparing, - epoch, - last_committed_epoch, - coordinator, - members, - threshold, - commit_crash_tolerance, - prepared_members: BTreeSet::new(), - committed_members: BTreeSet::new(), - } - } - - pub fn to_reconfigure_msg(&self, rack_id: RackUuid) -> ReconfigureMsg { - ReconfigureMsg { - rack_id, - epoch: self.epoch, - last_committed_epoch: self.last_committed_epoch, - members: self.members.clone(), - threshold: self.threshold, - } - } - - // Are there enough prepared members to commit? - pub fn can_commit(&self) -> bool { - self.prepared_members.len() - >= (self.threshold.0 + self.commit_crash_tolerance) as usize - } -} - -impl IdOrdItem for NexusConfig { - type Key<'a> = Epoch; - - fn key(&self) -> Self::Key<'_> { - self.epoch - } - - id_upcast!(); -} - -/// A model of Nexus's view of the world during the test -pub struct NexusState { - // No reason to change the rack_id - pub rack_id: RackUuid, - - pub configs: IdOrdMap, -} - -impl NexusState { - pub fn new() -> NexusState { - NexusState { rack_id: RackUuid::new_v4(), configs: IdOrdMap::new() } - } - - // Create a `ReconfigureMsg` for the latest nexus config - pub fn reconfigure_msg_for_latest_config( - &self, - ) -> (&PlatformId, ReconfigureMsg) { - let config = self.configs.iter().last().expect("at least one config"); - (&config.coordinator, config.to_reconfigure_msg(self.rack_id)) - } - - /// Abort the latest reconfiguration attempt - pub fn abort_reconfiguration(&mut self) { - let config = self.configs.iter().last().expect("at least one config"); - // Can only abort while preparing - assert_eq!(config.op, NexusOp::Preparing); - } - - pub fn latest_config(&self) -> &NexusConfig { - self.configs.iter().last().expect("at least one config") - } - - pub fn latest_config_mut(&mut self) -> RefMut<'_, NexusConfig> { - self.configs.iter_mut().last().expect("at least one config") - } - - pub fn last_committed_config(&self) -> Option<&NexusConfig> { - // IdOrdMap doesn't allow reverse iteration. - // We therefore iterate through all configs to find the latest committed one. - // We could track this out of band but that leaves more room for error. - let mut found: Option<&NexusConfig> = None; - for c in &self.configs { - if c.op == NexusOp::Committed { - found = Some(c) - } - } - found - } -} - -/// Faults in our system. It's useful to keep these self contained and not -/// in separate fields in `TestState` so that we can access them all at once -/// independently of other `TestState` fields. -#[derive(Default)] -pub struct Faults { - // We allow nodes to crash and restart and therefore track crashed nodes here. - // - // A crashed node is implicitly disconnected from every other node. We don't - // bother storing the pairs in `disconnected_nodes`, but instead check both - // fields when necessary. - pub crashed_nodes: BTreeSet, - - /// The set of disconnected nodes - pub disconnected_nodes: DisconnectedNodes, -} - -impl Faults { - pub fn is_connected(&self, node1: PlatformId, node2: PlatformId) -> bool { - !self.crashed_nodes.contains(&node1) - && !self.crashed_nodes.contains(&node2) - && !self.disconnected_nodes.contains(node1, node2) - } -} - -/// For cardinality purposes, we assume all nodes are connected and explicitly -/// disconnect some of them. This allows us to track and compare much less data. -#[derive(Default)] -pub struct DisconnectedNodes { - // We sort each pair on insert for quick lookups - pairs: BTreeSet<(PlatformId, PlatformId)>, -} - -impl DisconnectedNodes { - // Return true if the pair is newly inserted - pub fn insert(&mut self, node1: PlatformId, node2: PlatformId) -> bool { - assert_ne!(node1, node2); - - let pair = if node1 < node2 { (node1, node2) } else { (node2, node1) }; - self.pairs.insert(pair) - } - - // Return true if the pair of nodes is disconnected, false otherwise - pub fn contains(&self, node1: PlatformId, node2: PlatformId) -> bool { - assert_ne!(node1, node2); - let pair = if node1 < node2 { (node1, node2) } else { (node2, node1) }; - self.pairs.contains(&pair) - } -} - -pub enum NexusReply { - CommitAck { from: PlatformId, epoch: Epoch }, -} /// The state of our test +#[derive(Clone, Diffable)] struct TestState { - /// A logger for our test - pub log: Logger, - - /// Our system under test - pub sut: Sut, - - /// All in flight messages between nodes - pub bootstrap_network: BTreeMap>, - - /// All in flight responses to nexus. We don't model the requests, as those - /// are `Node` public method calls. But we don't want to synchronously - /// update nexus state as a result of those calls, because that ruins any - /// possible interleaving with other actions. - /// - /// This is a way to allow interleaving of nexus replies without changing - /// the Node API to accept a separate set of Nexus messages and return - /// messages. We may decide that we want to do that, but for now we'll stick - /// with a concrete `Node` method based API that is "triggered" by nexus - /// messages. - pub underlay_network: Vec, - - /// A model of Nexus's view of the world during the test - pub nexus: NexusState, - - /// A cache of our member universe, so we only have to generate it once - pub member_universe: Vec, - - /// All possible system faults in our test - pub faults: Faults, - - /// All configurations ever generated by a coordinator. - /// - /// If an epoch got skipped due to a crashed coordinator then there will not - /// be a configuration for that epoch. - pub all_coordinated_configs: IdOrdMap, - - /// Expunged nodes cannot be added to a cluster. We never reuse nodes in - /// this test. We include nodes here that may not know yet that they have - /// been expunged in the `Sut`. - pub expunged: BTreeSet, + pub tq_state: TqState, /// Keep track of the number of generated `Action`s that get skipped /// @@ -287,29 +38,23 @@ struct TestState { impl TestState { pub fn new(log: Logger) -> TestState { - let sut = Sut::new(&log, member_universe()); - TestState { - log: log.new(o!("component" => "tq-proptest")), - sut, - bootstrap_network: BTreeMap::new(), - underlay_network: Vec::new(), - nexus: NexusState::new(), - member_universe: member_universe(), - faults: Faults::default(), - all_coordinated_configs: IdOrdMap::new(), - expunged: BTreeSet::new(), - skipped_actions: 0, - } + TestState { tq_state: TqState::new(log), skipped_actions: 0 } } - pub fn create_nexus_initial_config( - &mut self, + fn initial_config_event( + &self, config: GeneratedConfiguration, - ) { + down_nodes: BTreeSet, + ) -> Event { + // `tq_state` doesn't create the member universe until the first event is + // applied. We duplicate it here so we can create that initial config + // event. + let member_universe = + trust_quorum_test_utils::member_universe(MEMBER_UNIVERSE_SIZE); let members: BTreeSet = config .members .iter() - .map(|index| self.member_universe[*index].clone()) + .map(|index| member_universe[*index].clone()) .collect(); let threshold = Threshold(usize::max( 2, @@ -319,135 +64,21 @@ impl TestState { let coordinator = members.first().cloned().expect("at least one member"); let last_committed_epoch = None; - let nexus_config = NexusConfig::new( + let config = NexusConfig::new( epoch, last_committed_epoch, coordinator, members, threshold, ); - self.nexus.configs.insert_unique(nexus_config).expect("new config"); - } - - pub fn setup_initial_connections(&mut self, down_nodes: BTreeSet) { - self.faults.crashed_nodes = down_nodes + let crashed_nodes = down_nodes .into_iter() - .map(|index| self.member_universe[index].clone()) + .map(|index| member_universe[index].clone()) .collect(); - - for (from, (node, ctx)) in self - .sut - .nodes - .iter_mut() - .filter(|(id, _)| !self.faults.crashed_nodes.contains(id)) - { - for to in self.member_universe.iter().filter(|id| { - !self.faults.crashed_nodes.contains(id) && from != *id - }) { - node.on_connect(ctx, to.clone()); - } - } - } - - /// Send the latest `ReconfigureMsg` from `Nexus` to the coordinator node - /// - /// If the node is not available, then abort the configuration at nexus - pub fn send_reconfigure_msg(&mut self) { - let (coordinator, msg) = self.nexus.reconfigure_msg_for_latest_config(); - let epoch_to_config = msg.epoch; - if self.faults.crashed_nodes.contains(coordinator) { - // We must abort the configuration. This mimics a timeout. - self.nexus.abort_reconfiguration(); - } else { - let (node, ctx) = self - .sut - .nodes - .get_mut(coordinator) - .expect("coordinator exists"); - - node.coordinate_reconfiguration(ctx, msg) - .expect("valid configuration"); - - // Do we have a `Configuration` for this epoch yet? - // - // For most reconfigurations, shares for the last committed - // configuration must be retrieved before the configuration is - // generated and saved in the persistent state. - let latest_persisted_config = - ctx.persistent_state().latest_config().expect("config exists"); - if latest_persisted_config.epoch == epoch_to_config { - // Save the configuration for later - self.all_coordinated_configs - .insert_unique(latest_persisted_config.clone()) - .expect("unique"); - } - } - } - - /// Check postcondition assertions after initial configuration - pub fn postcondition_initial_configuration( - &mut self, - ) -> Result<(), TestCaseError> { - let (coordinator, msg) = self.nexus.reconfigure_msg_for_latest_config(); - - // The coordinator should have received the `ReconfigureMsg` from Nexus - if !self.faults.crashed_nodes.contains(coordinator) { - let (node, ctx) = self - .sut - .nodes - .get_mut(coordinator) - .expect("coordinator exists"); - let mut connected_members = 0; - // The coordinator should start preparing by sending a `PrepareMsg` to all - // connected nodes in the membership set. - for member in - msg.members.iter().filter(|&id| id != coordinator).cloned() - { - if self.faults.is_connected(coordinator.clone(), member.clone()) - { - connected_members += 1; - let msg_found = ctx.envelopes().any(|envelope| { - envelope.to == member - && envelope.from == *coordinator - && matches!( - envelope.msg.kind, - PeerMsgKind::Prepare { .. } - ) - }); - prop_assert!(msg_found); - } - } - assert_eq!(connected_members, ctx.envelopes().count()); - - // The coordinator should be in the prepare phase - let cs = node.get_coordinator_state().expect("is coordinating"); - assert!(matches!(cs.op(), CoordinatorOperation::Prepare { .. })); - - // The persistent state should have changed - assert!(ctx.persistent_state_change_check_and_reset()); - assert!(ctx.persistent_state().has_prepared(msg.epoch)); - assert!(ctx.persistent_state().latest_committed_epoch().is_none()); - } - - Ok(()) - } - - /// Put any outgoing coordinator messages from the latest configuration on the wire - pub fn send_envelopes_from_coordinator(&mut self) { - let coordinator = { - let (coordinator, _) = - self.nexus.reconfigure_msg_for_latest_config(); - coordinator.clone() - }; - self.send_envelopes_from(&coordinator); - } - - pub fn send_envelopes_from(&mut self, id: &PlatformId) { - let (_, ctx) = self.sut.nodes.get_mut(id).expect("node exists"); - for envelope in ctx.drain_envelopes() { - let msgs = - self.bootstrap_network.entry(envelope.to.clone()).or_default(); - msgs.push(envelope); + Event::InitialSetup { + member_universe_size: MEMBER_UNIVERSE_SIZE, + config, + crashed_nodes, } } @@ -455,174 +86,107 @@ impl TestState { pub fn run_actions( &mut self, actions: Vec, + event_log: &mut EventLog, ) -> Result<(), TestCaseError> { for action in actions { - let skipped = match action { - Action::DeliverEnvelopes(indices) => { - self.action_deliver_envelopes(indices) - } - Action::PollPrepareAcks => self.action_poll_prepare_acks(), - Action::Commit(indices) => self.action_commit(indices), - Action::DeliverNexusReplies(n) => { - self.action_deliver_nexus_replies(n) - } - Action::Reconfigure { - num_added_nodes, - removed_nodes, - threshold, - coordinator, - } => self.action_reconfigure( - num_added_nodes, - removed_nodes, - threshold, - coordinator, - ), - }; - - if skipped { - self.skipped_actions += 1; - } else { + let events = self.action_to_events(action); + for event in &events { + event_log.record(event); + } + let check_invariants = !events.is_empty(); + for event in events { + self.tq_state.apply_event(event); + } + if check_invariants { self.check_invariants()?; + } else { + self.skipped_actions += 1; } } Ok(()) } - // Deliver network messages to generated destinations - fn action_deliver_envelopes(&mut self, indices: Vec) -> bool { - let destinations: Vec<_> = - self.bootstrap_network.keys().cloned().collect(); - if destinations.is_empty() { - // nothing to do - return true; - } - for index in indices { - let id = index.get(&destinations); - if let Some(envelope) = - self.bootstrap_network.get_mut(id).unwrap().pop() - { - let (node, ctx) = - self.sut.nodes.get_mut(id).expect("destination exists"); - node.handle(ctx, envelope.from, envelope.msg); - - // If this is the first time we've seen a configuration, track it - // - // We have to do this here because for reconfigurations, shares - // for the last committed reconfiguration are gathered before - // the config is created. We don't know exactly when config - // generation occurs, but know that it happens after envelopes - // are delivered, except for configurations that don't have - // a last committed config. This is normally the initial - // configuration, but can be later ones if the initial config - // is aborted. - if ctx.persistent_state_change_check_and_reset() { - if let Some(latest_config) = - ctx.persistent_state().latest_config() - { - if !self - .all_coordinated_configs - .contains_key(&latest_config.epoch) - { - // The coordinator must be the first node to create - // the configuration. - assert_eq!( - &latest_config.coordinator, - ctx.platform_id() - ); - - self.all_coordinated_configs - .insert_unique(latest_config.clone()) - .expect("unique config"); - } - } - } - - // Send any messages as a result of handling this message - send_envelopes(ctx, &mut self.bootstrap_network); + fn action_to_events(&self, action: Action) -> Vec { + match action { + Action::DeliverEnvelopes(indices) => { + self.action_to_events_deliver_envelopes(indices) + } + Action::PollPrepareAcks => { + self.action_to_events_poll_prepare_acks() } + Action::Commit(indices) => self.action_to_events_commit(indices), + Action::DeliverNexusReplies(n) => { + self.action_to_events_deliver_nexus_replies(n) + } + Action::Reconfigure { + num_added_nodes, + removed_nodes, + threshold, + coordinator, + } => self.action_to_events_reconfigure( + num_added_nodes, + removed_nodes, + threshold, + coordinator, + ), } - - // Remove any destinations with zero messages in-flight - self.bootstrap_network.retain(|_, msgs| !msgs.is_empty()); - - false } - // Call `Node::commit_reconfiguration` for nodes that have prepared and have - // not yet acked their commit. - fn action_commit(&mut self, indices: Vec) -> bool { - let rack_id = self.nexus.rack_id; - let latest_config = self.nexus.latest_config(); - if latest_config.op != NexusOp::Committed { - return true; - } - let committable: Vec<_> = latest_config - .prepared_members - .difference(&latest_config.committed_members) - .collect(); - - if committable.is_empty() { - // All members have committed - self.skipped_actions += 1; - return true; + fn action_to_events_deliver_envelopes( + &self, + indices: Vec, + ) -> Vec { + let mut events = vec![]; + let destinations: Vec<_> = + self.tq_state.bootstrap_network.keys().cloned().collect(); + if destinations.is_empty() { + // nothing to do + return events; } - // We shouldn't be calling commit twice or sending multiple replies - // to nexus, but a random bunch of indices might result in that. We - // therefore track nodes that have committed already. - let mut committed: BTreeSet = BTreeSet::new(); - + // Add an event only if there is actually an envelope to send + let mut counts = BTreeMap::new(); for index in indices { - let id = *index.get(&committable); - if committed.contains(id) { - continue; + let id = index.get(&destinations); + let count = counts.entry(id).or_insert(0usize); + *count += 1; + let num_envelopes = self + .tq_state + .bootstrap_network + .get(id) + .expect("destination exists") + .len(); + if *count <= num_envelopes { + events.push(Event::DeliverEnvelope { destination: id.clone() }); } - let (node, ctx) = - self.sut.nodes.get_mut(id).expect("destination exists"); - node.commit_configuration(ctx, rack_id, latest_config.epoch) - .expect("commit succeeded"); - committed.insert(id.clone()); - } - - let epoch = latest_config.epoch; - for from in committed { - self.underlay_network.push(NexusReply::CommitAck { from, epoch }); } - false - } - fn action_deliver_nexus_replies(&mut self, n: usize) -> bool { - let mut config = self.nexus.latest_config_mut(); - let n = usize::min(n, self.underlay_network.len()); - for reply in self.underlay_network.drain(0..n) { - match reply { - NexusReply::CommitAck { from, epoch } => { - if config.epoch == epoch { - config.committed_members.insert(from); - } - } - } - } - false + events } - /// Poll the coordinator for acks if nexus is preparing, and commit - /// if enough acks have been received. - fn action_poll_prepare_acks(&mut self) -> bool { - let mut latest_config = self.nexus.latest_config_mut(); + fn action_to_events_poll_prepare_acks(&self) -> Vec { + let mut events = vec![]; + let latest_config = self.tq_state.nexus.latest_config(); if latest_config.op != NexusOp::Preparing { // No point in checking. Commit or abort has occurred. - return true; + return events; } // If the coordinator has crashed then Nexus should abort. // Crashing is not actually implemented yet, but it will be. - if self.faults.crashed_nodes.contains(&latest_config.coordinator) { - latest_config.op = NexusOp::Aborted; + if self + .tq_state + .faults + .crashed_nodes + .contains(&latest_config.coordinator) + { + events.push(Event::AbortConfiguration(latest_config.epoch)); + return events; } // Lookup the coordinator node let (coordinator, ctx) = self + .tq_state .sut .nodes .get(&latest_config.coordinator) @@ -635,7 +199,7 @@ impl TestState { .latest_config() .map_or(Epoch(0), |c| c.epoch); if coordinator_epoch != latest_config.epoch { - return true; + return events; } // Poll the coordinator for acks. @@ -644,68 +208,66 @@ impl TestState { // crashed and nexus is still preparing. // // In a real system this request would go over the network, but would - // end up at the same place. It's not apparent that its worth the - // complexity here to delay poll replies to Nexus, but we can do that - // if necessary and then deliver them when the `DeliverNexusReplies` - // action fires. + // end up at the same place. let cs = coordinator .get_coordinator_state() .expect("coordinator is coordinating"); - latest_config.prepared_members.extend(cs.op().acked_prepares()); - - // Commit if possible - if latest_config.can_commit() { - info!(self.log, "nexus committed"; - "epoch" => %latest_config.epoch, - "coordinator" => %latest_config.coordinator - ); - - latest_config.op = NexusOp::Committed; - - let new_members = latest_config.members.clone(); - let new_epoch = latest_config.epoch; + // Put the reply on the network + events.push(Event::SendNexusReplyOnUnderlay( + NexusReply::AckedPreparesFromCoordinator { + epoch: coordinator_epoch, + acks: cs.op().acked_prepares(), + }, + )); + events + } - // Expunge any removed nodes from the last committed configuration - if let Some(last_committed_epoch) = - latest_config.last_committed_epoch - { - // Release our mutable borrow - drop(latest_config); + fn action_to_events_commit(&self, indices: Vec) -> Vec { + let mut events = vec![]; + let latest_config = self.tq_state.nexus.latest_config(); + if latest_config.op != NexusOp::Committed { + return events; + } + let committable: Vec<_> = latest_config + .prepared_members + .difference(&latest_config.committed_members) + .collect(); - let last_committed_config = self - .nexus - .configs - .get(&last_committed_epoch) - .expect("config exists"); + if committable.is_empty() { + return events; + } - let expunged = last_committed_config - .members - .difference(&new_members) - .cloned(); + // De-duplicate the Index->PlatformId mapping + let mut nodes: BTreeSet = BTreeSet::new(); + for index in indices { + let id = *index.get(&committable); + nodes.insert(id.clone()); + } + for node in nodes { + events.push(Event::CommitConfiguration(node)); + } + events + } - for e in expunged { - info!( - self.log, - "expunged node"; - "epoch" => %new_epoch, - "platform_id" => %e); - self.expunged.insert(e); - } - } + fn action_to_events_deliver_nexus_replies(&self, n: usize) -> Vec { + let mut events = vec![]; + let n = usize::min(n, self.tq_state.underlay_network.len()); + for _ in 0..n { + events.push(Event::DeliverNexusReply); } - false + events } - fn action_reconfigure( - &mut self, + fn action_to_events_reconfigure( + &self, num_added_nodes: usize, removed_nodes: Vec, threshold: Index, coordinator: Selector, - ) -> bool { - let latest_epoch = self.nexus.latest_config().epoch; - let last_committed_config = self.nexus.last_committed_config(); + ) -> Vec { + let latest_epoch = self.tq_state.nexus.latest_config().epoch; + let last_committed_config = self.tq_state.nexus.last_committed_config(); // We must leave at least one node available to coordinate between the // new and old configurations. let (new_members, coordinator) = match last_committed_config { @@ -720,7 +282,7 @@ impl TestState { let num_nodes_to_add = usize::min( MEMBER_UNIVERSE_SIZE - c.members.len() - - self.expunged.len(), + - self.tq_state.expunged.len(), possible_num_nodes_to_add, ); @@ -737,7 +299,7 @@ impl TestState { // We can only start a reconfiguration if Nexus has an // acknowledgement that at least one node has seen the commit. if c.committed_members.is_empty() { - return true; + return vec![]; } let coordinator = coordinator.select(c.committed_members.iter()); @@ -762,11 +324,13 @@ impl TestState { // Just pick the first set of nodes in `member_universe` // that are not in the current membership and not expunged. let mut nodes_to_add = BTreeSet::new(); - for id in self.member_universe.iter() { + for id in self.tq_state.member_universe.iter() { if nodes_to_add.len() == num_nodes_to_add { break; } - if !self.expunged.contains(id) && !c.members.contains(id) { + if !self.tq_state.expunged.contains(id) + && !c.members.contains(id) + { nodes_to_add.insert(id.clone()); } } @@ -785,11 +349,12 @@ impl TestState { // We are generating a new config if num_added_nodes < MIN_CLUSTER_SIZE { // Nothing to do here. - return true; + return vec![]; } // Pick the first `num_added_nodes` from member_universe // It's as good a choice as any and deterministic let new_members: BTreeSet<_> = self + .tq_state .member_universe .iter() .take(num_added_nodes) @@ -819,9 +384,7 @@ impl TestState { new_members, threshold, ); - self.nexus.configs.insert_unique(nexus_config).expect("new config"); - self.send_reconfigure_msg(); - false + vec![Event::Reconfigure(nexus_config)] } /// At every point during the running of the test, invariants over the system @@ -845,8 +408,9 @@ impl TestState { fn invariant_all_nodes_have_same_configuration_per_epoch( &self, ) -> Result<(), TestCaseError> { - for (id, (_, ctx)) in &self.sut.nodes { + for (id, (_, ctx)) in &self.tq_state.sut.nodes { let diff = self + .tq_state .all_coordinated_configs .diff(&ctx.persistent_state().configs); // No new configs exist @@ -872,8 +436,9 @@ impl TestState { &self, ) -> Result<(), TestCaseError> { let (acked, epoch) = { - let latest_config = self.nexus.latest_config(); + let latest_config = self.tq_state.nexus.latest_config(); let (node, _) = self + .tq_state .sut .nodes .get(&latest_config.coordinator) @@ -900,7 +465,8 @@ impl TestState { // Make sure the coordinator actually is coordinating for this epoch for id in acked { - let (_, ctx) = self.sut.nodes.get(&id).expect("node exists"); + let (_, ctx) = + self.tq_state.sut.nodes.get(&id).expect("node exists"); prop_assert!(ctx.persistent_state().has_prepared(epoch)); } @@ -916,13 +482,14 @@ impl TestState { fn invariant_nodes_have_committed_if_nexus_has_acks( &self, ) -> Result<(), TestCaseError> { - let latest_config = self.nexus.latest_config(); + let latest_config = self.tq_state.nexus.latest_config(); if latest_config.op != NexusOp::Committed { return Ok(()); } for id in &latest_config.committed_members { - let (_, ctx) = self.sut.nodes.get(&id).expect("node exists"); + let (_, ctx) = + self.tq_state.sut.nodes.get(&id).expect("node exists"); let ps = ctx.persistent_state(); prop_assert!(ps.commits.contains(&latest_config.epoch)); prop_assert!(ps.has_prepared(latest_config.epoch)); @@ -943,7 +510,7 @@ impl TestState { fn invariant_nodes_not_coordinating_and_computing_key_share_simultaneously( &self, ) -> Result<(), TestCaseError> { - for (id, (node, _)) in &self.sut.nodes { + for (id, (node, _)) in &self.tq_state.sut.nodes { prop_assert!( !(node.get_coordinator_state().is_some() && node.is_computing_key_share()), @@ -957,7 +524,7 @@ impl TestState { // Ensure there has been no alarm at any node fn invariant_no_alarms(&self) -> Result<(), TestCaseError> { - for (id, (_, ctx)) in &self.sut.nodes { + for (id, (_, ctx)) in &self.tq_state.sut.nodes { let alarms = ctx.alarms(); prop_assert!( alarms.is_empty(), @@ -970,18 +537,6 @@ impl TestState { } } -/// Broken out of `TestState` to alleviate borrow checker woes -fn send_envelopes( - ctx: &mut NodeCtx, - bootstrap_network: &mut BTreeMap>, -) { - for envelope in ctx.drain_envelopes() { - let envelopes = - bootstrap_network.entry(envelope.to.clone()).or_default(); - envelopes.push(envelope); - } -} - // A high-level set of generated actions to drive the test forward. #[derive(Debug, Arbitrary)] #[allow(clippy::large_enum_variant)] @@ -1056,13 +611,7 @@ pub struct GeneratedConfiguration { /// still be duplicated due to the shift implementation used. Therefore we /// instead just choose from a constrained set of usize values that we can /// use directly as indexes into our fixed size structure for all tests. - /// - /// Note that we intentionally set the max set size to MAX_CLUSTER_SIZE-1. - /// This is because we always want to include the coordinator in the - /// configuration, but its value may not be chosen randomly. In this case, - /// we have to add it to the actual membership set we generate from this - /// configuration with [`TestState::generated_config_to_reconfigure_msg`]. - #[strategy(btree_set(0..=MEMBER_UNIVERSE_SIZE, MIN_CLUSTER_SIZE..MAX_CLUSTER_SIZE))] + #[strategy(btree_set(0..MEMBER_UNIVERSE_SIZE, MIN_CLUSTER_SIZE..MAX_CLUSTER_SIZE))] pub members: BTreeSet, /// An index is roughly equivalent to a threshold, since a threshold cannot @@ -1073,20 +622,13 @@ pub struct GeneratedConfiguration { pub threshold: Index, } -/// All possible members used in a test -fn member_universe() -> Vec { - (0..=MEMBER_UNIVERSE_SIZE) - .map(|serial| PlatformId::new("test".into(), serial.to_string())) - .collect() -} - #[derive(Debug, Arbitrary)] pub struct TestInput { initial_config: GeneratedConfiguration, // We choose a set of nodes to be crashed, resulting in them being // disconnected from every other node. - #[strategy(btree_set(0..=MEMBER_UNIVERSE_SIZE, 0..MAX_INITIAL_DOWN_NODES))] + #[strategy(btree_set(0..MEMBER_UNIVERSE_SIZE, 0..MAX_INITIAL_DOWN_NODES))] initial_down_nodes: BTreeSet, #[any(size_range(MIN_ACTIONS..MAX_ACTIONS).lift())] actions: Vec, @@ -1095,28 +637,28 @@ pub struct TestInput { #[proptest] fn test_trust_quorum_protocol(input: TestInput) { let logctx = test_setup_log("test_trust_quorum_protocol"); + let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); + let event_log_path = parent_dir.join(format!("{prefix}-events.json")); + let mut event_log = EventLog::new(&event_log_path); - let mut state = TestState::new(logctx.log.clone()); + let log = logctx.log.new(o!("component" => "tq-proptest")); + let mut state = TestState::new(log.clone()); // Perform the initial setup - state.create_nexus_initial_config(input.initial_config); - state.setup_initial_connections(input.initial_down_nodes); - state.send_reconfigure_msg(); - - // Check the results of the initial setup - state.postcondition_initial_configuration()?; - - // Put the coordinator's outgoing messages on the wire if there are any - state.send_envelopes_from_coordinator(); + let event = state + .initial_config_event(input.initial_config, input.initial_down_nodes); + event_log.record(&event); + state.tq_state.apply_event(event); // Start executing the actions - state.run_actions(input.actions)?; + state.run_actions(input.actions, &mut event_log)?; info!( - state.log, + log, "Test complete"; "skipped_actions" => state.skipped_actions ); + let _ = std::fs::remove_file(event_log_path); logctx.cleanup_successful(); } diff --git a/trust-quorum/tqdb/Cargo.toml b/trust-quorum/tqdb/Cargo.toml new file mode 100644 index 00000000000..4436cc99fbc --- /dev/null +++ b/trust-quorum/tqdb/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "tqdb" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +anyhow.workspace = true +camino.workspace = true +clap.workspace = true +colored.workspace = true +daft.workspace = true +iddqd.workspace = true +omicron-repl-utils.workspace = true +reedline.workspace = true +reconfigurator-cli.workspace = true +serde_json.workspace = true +slog.workspace = true +tabled.workspace = true +trust-quorum = { workspace = true, features = ["danger_partial_eq_ct_wrapper"] } +trust-quorum-test-utils.workspace = true + +omicron-workspace-hack.workspace = true + +[[bin]] +name = "tqdb" +path = "src/bin/tqdb/main.rs" diff --git a/trust-quorum/tqdb/example-event-logs/cluster-49df2a4b903c778a-test_trust_quorum_protocol.14368.453-events.json b/trust-quorum/tqdb/example-event-logs/cluster-49df2a4b903c778a-test_trust_quorum_protocol.14368.453-events.json new file mode 100644 index 00000000000..f518a84590c --- /dev/null +++ b/trust-quorum/tqdb/example-event-logs/cluster-49df2a4b903c778a-test_trust_quorum_protocol.14368.453-events.json @@ -0,0 +1,4668 @@ +[ +{ + "InitialSetup": { + "member_universe_size": 40, + "config": { + "op": "Preparing", + "epoch": 1, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "1" + }, + "members": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "3" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "7" + }, + { + "part_number": "test", + "serial_number": "9" + } + ], + "threshold": 2, + "commit_crash_tolerance": 3, + "prepared_members": [], + "committed_members": [] + }, + "crashed_nodes": [ + { + "part_number": "test", + "serial_number": "11" + }, + { + "part_number": "test", + "serial_number": "16" + }, + { + "part_number": "test", + "serial_number": "3" + }, + { + "part_number": "test", + "serial_number": "7" + } + ] + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "37" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "25" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "9" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "32" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "34" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "5" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "39" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "27" + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "4" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "15" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 1, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "15" + }, + { + "part_number": "test", + "serial_number": "25" + }, + { + "part_number": "test", + "serial_number": "27" + }, + { + "part_number": "test", + "serial_number": "32" + }, + { + "part_number": "test", + "serial_number": "34" + }, + { + "part_number": "test", + "serial_number": "37" + }, + { + "part_number": "test", + "serial_number": "39" + }, + { + "part_number": "test", + "serial_number": "4" + }, + { + "part_number": "test", + "serial_number": "5" + }, + { + "part_number": "test", + "serial_number": "9" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 2, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "1" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 2, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 3, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "3" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "AbortConfiguration": 3 +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 4, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "3" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "AbortConfiguration": 4 +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 5, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "2" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 3, + "commit_crash_tolerance": 0, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 5, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 5, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 5, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 5, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 6, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "2" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ], + "threshold": 2, + "commit_crash_tolerance": 0, + "prepared_members": [], + "committed_members": [] + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 6, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 7, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "2" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ], + "threshold": 2, + "commit_crash_tolerance": 0, + "prepared_members": [], + "committed_members": [] + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 8, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "0" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 8, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 8, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 8, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 8, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 8, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 8, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 8, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 8, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 9, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "2" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 9, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 9, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 9, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 9, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 10, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "1" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 10, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 11, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "1" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 11, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 12, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "1" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 12, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 12, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 12, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 12, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 12, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 12, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 12, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 13, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "0" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 13, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 14, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "2" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 3, + "commit_crash_tolerance": 0, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 15, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "3" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "AbortConfiguration": 15 +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 16, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "2" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ], + "threshold": 2, + "commit_crash_tolerance": 0, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 17, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "1" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ], + "threshold": 2, + "commit_crash_tolerance": 0, + "prepared_members": [], + "committed_members": [] + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "1" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 17, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "Reconfigure": { + "op": "Preparing", + "epoch": 18, + "last_committed_epoch": null, + "coordinator": { + "part_number": "test", + "serial_number": "2" + }, + "members": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + }, + { + "part_number": "test", + "serial_number": "3" + } + ], + "threshold": 2, + "commit_crash_tolerance": 1, + "prepared_members": [], + "committed_members": [] + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "0" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "1" + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +{ + "DeliverEnvelope": { + "destination": { + "part_number": "test", + "serial_number": "2" + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply", +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +{ + "SendNexusReplyOnUnderlay": { + "AckedPreparesFromCoordinator": { + "epoch": 18, + "acks": [ + { + "part_number": "test", + "serial_number": "0" + }, + { + "part_number": "test", + "serial_number": "1" + }, + { + "part_number": "test", + "serial_number": "2" + } + ] + } + } +}, +"DeliverNexusReply", +"DeliverNexusReply" +] diff --git a/trust-quorum/tqdb/src/bin/tqdb/main.rs b/trust-quorum/tqdb/src/bin/tqdb/main.rs new file mode 100644 index 00000000000..b7e44e590fe --- /dev/null +++ b/trust-quorum/tqdb/src/bin/tqdb/main.rs @@ -0,0 +1,716 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! The Trust Quorum Debugger +//! +//! Capable of executing and stepping through event streams generated by +//! trust quorum proptests. + +use anyhow::{Context, bail}; +use camino::Utf8PathBuf; +use clap::{Args, Parser, Subcommand}; +use daft::Diffable; +use omicron_repl_utils::run_repl_on_stdin_customized; +use reconfigurator_cli::LogCapture; +use reedline::{ + ColumnarMenu, DefaultCompleter, DefaultPrompt, DefaultPromptSegment, Emacs, + FileBackedHistory, KeyCode, KeyModifiers, MenuBuilder, Reedline, + ReedlineEvent, default_emacs_keybindings, +}; +use slog::{Logger, o}; +use std::collections::{BTreeMap, BTreeSet}; +use std::fmt::Write; +use std::fs; +use std::io::IsTerminal; +use tabled::Tabled; +use trust_quorum::PlatformId; +use trust_quorum_test_utils::{Event, TqState}; + +fn main() -> Result<(), anyhow::Error> { + let repl = TqdbRepl {}; + repl.exec() +} + +/// Internal debugger state +pub struct Tqdb { + event_log_path: Option, + + events: Vec, + + // Current state of the trust-quorum + current_state: TqState, + + // Index of the next event to be applied + next_event: usize, + + // All set breakpoints at log event index + breakpoints: BTreeSet, + + // All snapshots ever taken. + // + // Snapshots are taken automatically when a breakpoint is hit. + snapshots: BTreeMap, + + // Snapshot requests for events that haven't yet been applied + pending_snapshots: BTreeSet, +} + +impl Tqdb { + pub fn new(log: &Logger) -> Self { + let log = log.new(o!("component" => "tqdb")); + Tqdb { + event_log_path: None, + events: vec![], + current_state: TqState::new(log), + next_event: 0, + breakpoints: BTreeSet::new(), + snapshots: BTreeMap::new(), + pending_snapshots: BTreeSet::new(), + } + } + + pub fn reset_state(&mut self) { + let Tqdb { + event_log_path, + events, + current_state, + next_event, + breakpoints, + snapshots, + pending_snapshots, + } = self; + let log = current_state.log.clone(); + + *event_log_path = None; + *events = vec![]; + *current_state = TqState::new(log); + *next_event = 0; + *breakpoints = BTreeSet::new(); + *snapshots = BTreeMap::new(); + *pending_snapshots = BTreeSet::new(); + } + + pub fn toggle_breakpoint(&mut self, index: usize) -> anyhow::Result { + if index >= self.events.len() { + bail!( + "Invalid event index: {index}. Only {} total events.", + self.events.len() + ); + } + if !self.breakpoints.remove(&index) { + let _ = self.breakpoints.insert(index); + Ok(true) + } else { + Ok(false) + } + } + + // Reset the state to the beginning of time + // + // Don't remove any breakpoints, snapshots, or pending snapshots + pub fn rewind(&mut self) { + let Tqdb { + event_log_path: _, + events: _, + current_state, + next_event, + breakpoints: _, + snapshots: _, + pending_snapshots: _, + } = self; + + *current_state = TqState::new(current_state.log.clone()); + *next_event = 0; + } + + pub fn breakpoints(&self) -> &BTreeSet { + &self.breakpoints + } + + pub fn maybe_snapshot(&mut self) { + if self.next_event == 0 { + return; + } + let curr_event = self.next_event - 1; + if self.pending_snapshots.remove(&curr_event) { + self.snapshots.insert(curr_event, self.current_state.clone()); + } + } +} + +/// Interactive REPL for our trust quorum debugger +pub struct TqdbRepl {} + +impl TqdbRepl { + /// Execute the command. + pub fn exec(self) -> anyhow::Result<()> { + let (log_capture, log) = + LogCapture::new(std::io::stdout().is_terminal()); + + let mut tqdb = Tqdb::new(&log); + + let mut completer = Box::new(DefaultCompleter::with_inclusions(&['-'])); + completer.insert(Self::commands()); + let completion_menu = + Box::new(ColumnarMenu::default().with_name("commands")); + let mut keybindings = default_emacs_keybindings(); + keybindings.add_binding( + KeyModifiers::NONE, + KeyCode::Tab, + ReedlineEvent::UntilFound(vec![ + ReedlineEvent::Menu("commands".to_string()), + ReedlineEvent::MenuNext, + ]), + ); + let edit_mode = Box::new(Emacs::new(keybindings)); + + let history = Box::new( + FileBackedHistory::with_file( + 10000, + "/tmp/.tqdb-history.txt".into(), + ) + .expect("Error configuring history with file"), + ); + + let ed = Reedline::create() + .with_history(history) + .with_completer(completer) + .with_menu(reedline::ReedlineMenu::EngineCompleter(completion_menu)) + .with_edit_mode(edit_mode); + + let prompt = DefaultPrompt::new( + DefaultPromptSegment::Basic("tqdb".into()), + DefaultPromptSegment::Empty, + ); + + run_repl_on_stdin_customized(ed, &prompt, &mut |cmd: TopLevelArgs| { + process_command(&mut tqdb, cmd, &log_capture) + }) + } + + // Update this with each new subcommand + fn commands() -> Vec { + // This assignment and match exists soley to ensure we update our list + // when we add or remove a command. + let c = Commands::Run; + match c { + Commands::Open { .. } + | Commands::Run + | Commands::Step { .. } + | Commands::BreakpointToggle { .. } + | Commands::BreakpointList + | Commands::Snapshot { .. } + | Commands::SnapshotList + | Commands::SnapshotListPending + | Commands::SnapshotShow { .. } + | Commands::Diff { .. } + | Commands::NodeShow { .. } + | Commands::Rewind + | Commands::Events(_) + | Commands::Show + | Commands::Summary => {} + } + + [ + "open", + "run", + "step", + "breakpoint-toggle", + "breakpoint-list", + "snapshot", + "snapshot-list", + "snapshot-list-pending", + "snapshot-show", + "diff", + "node-show", + "rewind", + "events", + "show", + "summary", + ] + .into_iter() + .map(Into::into) + .collect() + } +} + +/// Arguments for our debugger REPL +#[derive(Parser, Debug)] +struct TopLevelArgs { + #[command(subcommand)] + command: Commands, +} + +#[derive(Debug, Subcommand)] +enum Commands { + // open event log file + #[clap(alias = "o")] + Open { + /// path to the event log file + path: Utf8PathBuf, + }, + // apply all events until completion or a breakpoint + #[clap(alias = "r")] + Run, + /// step over n events by applying them + /// + /// This command steps over breakpoints. Use `run` if you want to stop at + /// breakpoints. + #[clap(alias = "s")] + Step { + /// number of events to apply, 1 if not given + num_events: Option, + }, + + /// toggle a breakpoint at a given event + #[clap(alias = "b")] + BreakpointToggle { + /// index of the event in the log to apply the breakpoint to + index: usize, + }, + /// display all existing breakpoints + BreakpointList, + + /// take a snapshot of the current state, or at the given event when reached + Snapshot { + /// index of the event to take snapshot + index: Option, + }, + /// display all existing snapshots + SnapshotList, + /// show a snapshot for the given event + SnapshotShow { + /// index of the event where the snapshot was taken + index: usize, + }, + /// list all pending snapshots + SnapshotListPending, + + /// show the difference between the current state and the snapshot + /// or two snapshots if two indexes are given + #[clap(alias = "d")] + Diff { + /// The event log index of where the snapshot was taken + snapshot1: usize, + + /// An optional second snapshot index + snapshot2: Option, + }, + + /// display the current state of a SUT node and its context + NodeShow { + /// The serial number of the node to print. + /// Print all state if not present. + serial: usize, + }, + + /// Reset the state to the beginning of the trace + /// + /// This does not remove breakpoints or pending snapshots + Rewind, + + /// show the full state of the system + Show, + + /// print an overview of the current state of the system + Summary, + + /// display log entries - next entry by default + Events(EventsArgs), +} + +#[derive(Debug, Args)] +struct EventsArgs { + #[clap(subcommand)] + command: Option, +} + +#[derive(Debug, Subcommand)] +pub enum EventsCommand { + All, + Next { num_events: usize }, + Range { start: usize, end: usize }, +} + +/// Processes one "line" of user input. +fn process_command( + tqdb: &mut Tqdb, + cmd: TopLevelArgs, + logs: &LogCapture, +) -> anyhow::Result> { + let TopLevelArgs { command } = cmd; + let cmd_result = match command { + Commands::Open { path } => cmd_open(tqdb, path), + Commands::Run {} => cmd_run(tqdb), + Commands::Step { num_events } => cmd_step(tqdb, num_events), + Commands::BreakpointToggle { index } => { + cmd_toggle_breakpoint(tqdb, index) + } + Commands::BreakpointList {} => cmd_breakpoint_list(tqdb), + Commands::Diff { snapshot1, snapshot2 } => { + cmd_diff(tqdb, snapshot1, snapshot2) + } + Commands::Snapshot { index } => cmd_snapshot(tqdb, index), + Commands::SnapshotList {} => cmd_snapshot_list(tqdb), + Commands::SnapshotListPending => cmd_snapshot_list_pending(tqdb), + Commands::SnapshotShow { index } => cmd_snapshot_show(tqdb, index), + Commands::NodeShow { serial } => cmd_node_show(tqdb, serial), + Commands::Rewind => cmd_rewind(tqdb), + Commands::Show => cmd_show(tqdb), + Commands::Events(args) => cmd_log_show(tqdb, args), + Commands::Summary {} => cmd_summary(tqdb), + }; + + for line in logs.take_log_lines() { + println!("{line}"); + } + + cmd_result +} + +/// Open an event log file for debugging +fn cmd_open( + tqdb: &mut Tqdb, + path: Utf8PathBuf, +) -> anyhow::Result> { + tqdb.reset_state(); + let json = fs::read_to_string(&path).context(path.clone())?; + let events: Vec = serde_json::from_str(&json) + .context("failed to deserialize event log")?; + tqdb.event_log_path = Some(path.clone()); + tqdb.events = events; + Ok(Some(format!("loaded event log: {path}\n{} events.", tqdb.events.len()))) +} + +/// Apply all events until completion or a breakpoint +fn cmd_run(tqdb: &mut Tqdb) -> anyhow::Result> { + if tqdb.event_log_path.is_none() { + bail!("please open an event log file"); + } + + let mut num_events = 0; + if tqdb.next_event < tqdb.events.len() { + let end = tqdb + .breakpoints + .iter() + .cloned() + .find(|&i| i > tqdb.next_event) + .unwrap_or(tqdb.events.len()); + let events: Vec<_> = tqdb.events[tqdb.next_event..end].to_vec(); + for event in events { + tqdb.current_state.apply_event(event); + num_events += 1; + tqdb.next_event += 1; + tqdb.maybe_snapshot(); + } + } + + let output = if tqdb.next_event == tqdb.events.len() { + format!("done: applied {} events", num_events) + } else { + format!( + "stopped at breakpoint {} after applying {} events", + tqdb.next_event, num_events + ) + }; + Ok(Some(output)) +} + +// Step through each event by applying them +fn cmd_step( + tqdb: &mut Tqdb, + num_events: Option, +) -> anyhow::Result> { + if tqdb.event_log_path.is_none() { + bail!("please open an event log file"); + } + + let num_events = num_events.unwrap_or(1); + + let end = tqdb.next_event + num_events; + if end > tqdb.events.len() { + bail!("Number of events to step exceeds remaining events"); + } + + let mut s = String::new(); + let mut applied_events = 0; + let events: Vec<_> = tqdb.events[tqdb.next_event..end].to_vec(); + for event in events { + writeln!(&mut s, "{} {event:#?}", tqdb.next_event)?; + tqdb.current_state.apply_event(event.clone()); + applied_events += 1; + tqdb.next_event += 1; + tqdb.maybe_snapshot(); + } + writeln!(&mut s, "done: applied {} events", applied_events)?; + Ok(Some(s)) +} + +fn cmd_toggle_breakpoint( + tqdb: &mut Tqdb, + index: usize, +) -> anyhow::Result> { + let output = if tqdb.toggle_breakpoint(index)? { + format!("breakpoint set at event {index}") + } else { + format!("breakpoint removed at event {index}") + }; + Ok(Some(output)) +} + +fn cmd_breakpoint_list(tqdb: &mut Tqdb) -> anyhow::Result> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct Breakpoint { + index: usize, + event: String, + } + + let rows = tqdb.breakpoints.iter().map(|i| Breakpoint { + index: *i, + event: format!("{:#?}", tqdb.events[*i]), + }); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + Ok(Some(table)) +} + +fn cmd_snapshot_list(tqdb: &mut Tqdb) -> anyhow::Result> { + let output = if tqdb.snapshots.is_empty() { + "no snapshots present".to_string() + } else { + let mut s = String::new(); + writeln!(&mut s, "Snapshot indexes: ")?; + for i in tqdb.snapshots.keys() { + writeln!(&mut s, "{i} ")?; + } + s + }; + Ok(Some(output)) +} + +fn cmd_snapshot_list_pending( + tqdb: &mut Tqdb, +) -> anyhow::Result> { + let output = if tqdb.pending_snapshots.is_empty() { + "no snapshots pending".to_string() + } else { + let mut s = String::new(); + writeln!(&mut s, "pending snapshot indexes: ")?; + for i in &tqdb.pending_snapshots { + writeln!(&mut s, "{i} ")?; + } + s + }; + Ok(Some(output)) +} + +fn cmd_snapshot( + tqdb: &mut Tqdb, + index: Option, +) -> anyhow::Result> { + if tqdb.event_log_path.is_none() { + bail!("please open an event log file"); + } + + if tqdb.next_event == 0 && index.is_none() { + bail!("please apply an event to generate a useful state"); + } + + let output = if let Some(index) = index { + if index < tqdb.next_event { + tqdb.pending_snapshots.insert(index); + "Setting pending snapshot.\n + Already applied event however. + Use 'rewind' to start over." + .to_string() + } else if index > tqdb.events.len() { + bail!( + "index out of bounds. Only {} total events.", + tqdb.events.len() + ); + } else { + tqdb.pending_snapshots.insert(index); + "Setting pending snapshot".to_string() + } + } else { + tqdb.snapshots.insert( + tqdb.next_event.checked_sub(1).unwrap(), + tqdb.current_state.clone(), + ); + "Taking snapshot at current state".to_string() + }; + + Ok(Some(output)) +} + +fn cmd_snapshot_show( + tqdb: &mut Tqdb, + index: usize, +) -> anyhow::Result> { + match tqdb.snapshots.get(&index) { + Some(s) => Ok(Some(format!("{s:#?}"))), + None => bail!("no such snapshot"), + } +} + +fn cmd_diff( + tqdb: &mut Tqdb, + snapshot1: usize, + snapshot2: Option, +) -> anyhow::Result> { + if tqdb.event_log_path.is_none() { + bail!("please open an event log file"); + } + + if snapshot2.is_none() && tqdb.next_event == 0 { + bail!("please apply an event to get a useful state to diff with"); + } + + let Some(s1) = tqdb.snapshots.get(&snapshot1) else { + bail!("snapshot at {snapshot1} doesn't exist"); + }; + let diff = match snapshot2 { + Some(snapshot2) => { + let Some(s2) = tqdb.snapshots.get(&snapshot2) else { + bail!("snapshot at {snapshot2} doesn't exist"); + }; + if snapshot1 < snapshot2 { s1.diff(s2) } else { s2.diff(s1) } + } + None => { + if snapshot1 < tqdb.next_event { + s1.diff(&tqdb.current_state) + } else { + tqdb.current_state.diff(&s1) + } + } + }; + Ok(Some(format!("{diff}"))) +} + +fn cmd_show(tqdb: &Tqdb) -> anyhow::Result> { + if tqdb.event_log_path.is_none() { + bail!("please open an event log file"); + } + Ok(Some(format!("{:#?}", tqdb.current_state))) +} + +fn cmd_node_show( + tqdb: &mut Tqdb, + serial: usize, +) -> anyhow::Result> { + let id = PlatformId::new("test".into(), serial.to_string()); + let Some((node, ctx)) = tqdb.current_state.sut.nodes.get(&id) else { + bail!("failed to load node: {id}"); + }; + + Ok(Some(format!("{node:#?}\n{ctx:#?}"))) +} + +fn cmd_rewind(tqdb: &mut Tqdb) -> anyhow::Result> { + tqdb.rewind(); + + let mut s = String::new(); + writeln!(&mut s, "Re-initialized state and set next-event to 0")?; + writeln!(&mut s, "Breakpoints, Snapshots, and pending snapshots remain")?; + + Ok(Some(s)) +} + +fn cmd_log_show( + tqdb: &Tqdb, + args: EventsArgs, +) -> anyhow::Result> { + if tqdb.events.is_empty() { + bail!("no events loaded. Please call 'open' on a valid file"); + } + + // Find the possible start and end range of events + let (start, end) = match args.command { + Some(EventsCommand::All) => (0, tqdb.events.len()), + Some(EventsCommand::Next { num_events }) => { + (tqdb.next_event, tqdb.next_event + num_events) + } + Some(EventsCommand::Range { start, end }) => (start, end), + None => (tqdb.next_event, tqdb.next_event + 1), + }; + + let mut s = String::new(); + if start == tqdb.events.len() { + writeln!(&mut s, "finished applying events")?; + } else { + let end = usize::min(end, tqdb.events.len()); + for i in start..end { + writeln!(&mut s, "{i} {:#?}", tqdb.events[i])?; + } + } + + Ok(Some(s)) +} + +fn cmd_summary(tqdb: &mut Tqdb) -> anyhow::Result> { + let mut s = String::new(); + if let Some(path) = &tqdb.event_log_path { + writeln!(&mut s, "event log path: {:?}", path)?; + writeln!(&mut s, "total events in log: {}", tqdb.events.len())?; + } else { + bail!("no event log loaded: Please call 'open'"); + } + if tqdb.next_event != tqdb.events.len() { + writeln!(&mut s, "next event to apply: {}", tqdb.next_event)?; + writeln!(&mut s, " {:#?}", tqdb.events[tqdb.next_event])?; + } else { + writeln!(&mut s, "finished applying events")?; + } + + writeln!( + &mut s, + "total nodes under test: {}", + tqdb.current_state.sut.nodes.len() + )?; + let total_bootstrap_msgs = tqdb + .current_state + .bootstrap_network + .iter() + .fold(0, |acc, (_, e)| acc + e.len()); + writeln!( + &mut s, + "bootstrap network messages in flight: {}", + total_bootstrap_msgs + )?; + + if tqdb.next_event > 0 { + let latest_config = tqdb.current_state.nexus.latest_config(); + writeln!(&mut s, "nexus config: ")?; + writeln!(&mut s, " epoch: {}", latest_config.epoch)?; + writeln!(&mut s, " op: {:?}", latest_config.op)?; + writeln!( + &mut s, + " coordinator: {}", + latest_config.coordinator.serial_number() + )?; + writeln!(&mut s, " total members: {}", latest_config.members.len())?; + writeln!( + &mut s, + " prepared members: {}", + latest_config.prepared_members.len() + )?; + writeln!( + &mut s, + " committed members: {}", + latest_config.committed_members.len() + )?; + writeln!(&mut s, " threshold: {}", latest_config.threshold.0)?; + writeln!( + &mut s, + " commit crash tolerance: {}", + latest_config.commit_crash_tolerance + )?; + } + + Ok(Some(s)) +} From b7c2392aa29cb9da346d5f4fbbf97b4606fed8a8 Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Wed, 27 Aug 2025 19:42:54 -0700 Subject: [PATCH 08/38] Update kstat sampler futures when a target is updated (#8915) - When a target is added to the `KstatSampler`, we also add a future that resolves when we need to generate a sample from that target. Prior to this, when a target was updated or removed, that future was never touched! That means we could be polling for samples for removed targets or that we might have multiple futures for the same target. This ensures that we always update or remove the futures, along with the targets themselves. - Fixes #8889 --- oximeter/instruments/src/kstat/link.rs | 72 +++++++++++++++ oximeter/instruments/src/kstat/sampler.rs | 104 +++++++++++++++++++--- 2 files changed, 162 insertions(+), 14 deletions(-) diff --git a/oximeter/instruments/src/kstat/link.rs b/oximeter/instruments/src/kstat/link.rs index ecaa2dfd97b..be4cf6a0c0c 100644 --- a/oximeter/instruments/src/kstat/link.rs +++ b/oximeter/instruments/src/kstat/link.rs @@ -740,4 +740,76 @@ mod tests { // kept the final samples. assert!(count.value() > 4096); } + + // Regression for https://github.com/oxidecomputer/omicron/issues/8889 + #[tokio::test] + async fn updating_target_changes_existing_sampling_interval() { + let log = test_logger(); + let sampler = KstatSampler::new(&log).unwrap(); + let link = TestEtherstub::new(); + info!(log, "created test etherstub"; "name" => &link.name); + let target = SledDataLinkTarget { + rack_id: RACK_ID, + sled_id: SLED_ID, + sled_serial: SLED_SERIAL.into(), + link_name: link.name.clone().into(), + kind: KIND.into(), + sled_model: SLED_MODEL.into(), + sled_revision: SLED_REVISION, + zone_name: ZONE_NAME.into(), + }; + let dl = SledDataLink::new(target.clone(), true); + let collection_interval = Duration::from_millis(10); + let details = CollectionDetails::never(collection_interval); + let id = sampler.add_target(dl.clone(), details).await.unwrap(); + + // Update the target. + let new_duration = Duration::from_millis(15); + sampler + .update_target(dl, CollectionDetails::never(new_duration)) + .await + .unwrap(); + + // Get the futures that the sampler knows about and ensure the value has + // been updated. + let futs = sampler.future_details().await; + assert_eq!(futs.len(), 1, "should have updated the only target"); + assert_eq!( + futs[0], + (id, new_duration), + "failed to correctly update target" + ); + } + + #[tokio::test] + async fn no_futures_to_await_after_removing_target() { + let log = test_logger(); + let sampler = KstatSampler::new(&log).unwrap(); + let link = TestEtherstub::new(); + info!(log, "created test etherstub"; "name" => &link.name); + let target = SledDataLinkTarget { + rack_id: RACK_ID, + sled_id: SLED_ID, + sled_serial: SLED_SERIAL.into(), + link_name: link.name.clone().into(), + kind: KIND.into(), + sled_model: SLED_MODEL.into(), + sled_revision: SLED_REVISION, + zone_name: ZONE_NAME.into(), + }; + let dl = SledDataLink::new(target.clone(), true); + let collection_interval = Duration::from_millis(100); + let details = CollectionDetails::never(collection_interval); + let id = sampler.add_target(dl.clone(), details).await.unwrap(); + + // And remove right away. + sampler.remove_target(id).await.unwrap(); + + // And ensure there are zero actual futures + let futs = sampler.future_details().await; + assert!( + futs.is_empty(), + "should have zero futures to poll after removing target" + ); + } } diff --git a/oximeter/instruments/src/kstat/sampler.rs b/oximeter/instruments/src/kstat/sampler.rs index b2ffa95e199..bce4dd7a514 100644 --- a/oximeter/instruments/src/kstat/sampler.rs +++ b/oximeter/instruments/src/kstat/sampler.rs @@ -187,6 +187,10 @@ enum Request { CreationTimes { reply_tx: oneshot::Sender>>, }, + /// Return the list of IDs and intervals in the set of futures the sampler + /// is tracking. + #[cfg(all(test, target_os = "illumos"))] + FutureDetails { reply_tx: oneshot::Sender> }, } /// Data about a single kstat target. @@ -267,6 +271,17 @@ impl core::future::Future for YieldIdAfter { } } +// The operation we want to take on a future in our set, after handling an inbox +// message. +enum Operation { + // We want to add a new future. + Add(YieldIdAfter), + // Remove a future with the existing ID. + Remove(TargetId), + // We want to update an existing future. + Update((TargetId, Duration)), +} + /// An owned type used to keep track of the creation time for each kstat in /// which interest has been signaled. #[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] @@ -345,6 +360,9 @@ struct KstatSamplerWorker { /// at construction time. In that case, we'll try again the next time we /// need it. self_stats: Option, + + /// The futures that resolve when it's time to sample the next target. + sample_timeouts: FuturesUnordered, } fn hostname() -> Option { @@ -358,7 +376,7 @@ fn hostname() -> Option { /// Stores the number of samples taken, used for testing. #[cfg(all(test, target_os = "illumos"))] -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Default)] pub(crate) struct SampleCounts { pub total: usize, pub overflow: usize, @@ -393,6 +411,7 @@ impl KstatSamplerWorker { sample_limit, self_stat_queue, self_stats, + sample_timeouts: FuturesUnordered::new(), }) } @@ -405,7 +424,6 @@ impl KstatSamplerWorker { #[cfg(all(test, target_os = "illumos"))] sample_count_tx: mpsc::UnboundedSender, ) { - let mut sample_timeouts = FuturesUnordered::new(); let mut creation_prune_interval = interval(CREATION_TIME_PRUNE_INTERVAL); creation_prune_interval.tick().await; // Completes immediately. @@ -420,7 +438,7 @@ impl KstatSamplerWorker { ); } } - maybe_id = sample_timeouts.next(), if !sample_timeouts.is_empty() => { + maybe_id = self.sample_timeouts.next(), if !self.sample_timeouts.is_empty() => { let Some((id, interval)) = maybe_id else { unreachable!(); }; @@ -430,7 +448,7 @@ impl KstatSamplerWorker { #[cfg(all(test, target_os = "illumos"))] &sample_count_tx, ) { - sample_timeouts.push(next_timeout); + self.sample_timeouts.push(next_timeout); } } maybe_request = self.inbox.recv() => { @@ -443,8 +461,45 @@ impl KstatSamplerWorker { "received request on inbox"; "request" => ?request, ); - if let Some(next_timeout) = self.handle_inbox_request(request) { - sample_timeouts.push(next_timeout); + if let Some(next_op) = self.handle_inbox_request(request) { + self.update_sample_timeouts(next_op); + } + } + } + } + } + + fn update_sample_timeouts(&mut self, next_op: Operation) { + match next_op { + Operation::Add(fut) => self.sample_timeouts.push(fut), + Operation::Remove(id) => { + // Swap out all futures, and then filter out the one we're now + // removing. + let old = std::mem::take(&mut self.sample_timeouts); + self.sample_timeouts + .extend(old.into_iter().filter(|fut| fut.id != id)); + } + Operation::Update((new_id, new_interval)) => { + // Update just the one future, if it exists, or insert one. + // + // NOTE: we update the _interval_, not the sleep object itself, + // which means this won't take effect until the next tick. + match self + .sample_timeouts + .iter_mut() + .find(|fut| fut.id == new_id) + { + Some(old) => old.interval = new_interval, + None => { + warn!( + &self.log, + "attempting to update the samping future \ + for a target, but no active future found \ + in the set, it will be added directly"; + "id" => %&new_id, + ); + self.sample_timeouts + .push(YieldIdAfter::new(new_id, new_interval)); } } } @@ -452,10 +507,7 @@ impl KstatSamplerWorker { } // Handle a message on the worker's inbox. - fn handle_inbox_request( - &mut self, - request: Request, - ) -> Option { + fn handle_inbox_request(&mut self, request: Request) -> Option { match request { Request::AddTarget { target, details, reply_tx } => { match self.add_target(target, details) { @@ -475,7 +527,10 @@ impl KstatSamplerWorker { "error" => ?e, ), } - Some(YieldIdAfter::new(id, details.interval)) + Some(Operation::Add(YieldIdAfter::new( + id, + details.interval, + ))) } Err(e) => { error!( @@ -513,7 +568,7 @@ impl KstatSamplerWorker { "error" => ?e, ), } - Some(YieldIdAfter::new(id, details.interval)) + Some(Operation::Update((id, details.interval))) } Err(e) => { error!( @@ -534,7 +589,7 @@ impl KstatSamplerWorker { } } Request::RemoveTarget { id, reply_tx } => { - self.targets.remove(&id); + let do_remove = self.targets.remove(&id).is_some(); if let Some(remaining_samples) = self.samples.lock().unwrap().remove(&id) { @@ -555,7 +610,7 @@ impl KstatSamplerWorker { "error" => ?e, ), } - None + if do_remove { Some(Operation::Remove(id)) } else { None } } Request::TargetStatus { id, reply_tx } => { trace!( @@ -594,6 +649,18 @@ impl KstatSamplerWorker { debug!(self.log, "sent reply for creation times"); None } + #[cfg(all(test, target_os = "illumos"))] + Request::FutureDetails { reply_tx } => { + debug!(self.log, "request for future details"); + let details = self + .sample_timeouts + .iter() + .map(|fut| (fut.id, fut.interval)) + .collect(); + reply_tx.send(details).unwrap(); + debug!(self.log, "sent reply for future details"); + None + } } } @@ -1296,6 +1363,15 @@ impl KstatSampler { self.outbox.send(request).await.map_err(|_| Error::SendError).unwrap(); reply_rx.await.map_err(|_| Error::RecvError).unwrap() } + + /// Return the IDs and sampling intervals for all futures in the sampler. + #[cfg(all(test, target_os = "illumos"))] + pub(crate) async fn future_details(&self) -> Vec<(TargetId, Duration)> { + let (reply_tx, reply_rx) = oneshot::channel(); + let request = Request::FutureDetails { reply_tx }; + self.outbox.send(request).await.map_err(|_| Error::SendError).unwrap(); + reply_rx.await.map_err(|_| Error::RecvError).unwrap() + } } impl oximeter::Producer for KstatSampler { From 93c7540957bca98fe44c14ce46edba1b651d35cb Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 28 Aug 2025 02:31:32 -0400 Subject: [PATCH 09/38] TQ: Add support for "Expunge" messages (#8874) When a node requests a share for an old configuration it will receive an `Expunged` message if the share request receiving node has a later committed configuration where the node is not a member. When the `Expunged` message is receieved, the expunged node persists this fact and stops replying to peer messages. We also fix a bug in the test where nexus wasn't actually committing configurations at all. So far invariants really slow the test down. On my machine checking the invariants after every applied event makes the test take ~120s. Without invariant checking, it takes about 14s. I definitely want to add more invariants for correctness, but maybe not check them when they aren't applicable. Some may become postconditions on certain events instead. --- trust-quorum/src/lib.rs | 4 +- trust-quorum/src/messages.rs | 2 +- trust-quorum/src/node.rs | 144 ++++++++++++++++++++++++--- trust-quorum/src/persistent_state.rs | 11 +- trust-quorum/test-utils/src/state.rs | 50 +++++++++- trust-quorum/tests/cluster.rs | 34 ++++++- 6 files changed, 226 insertions(+), 19 deletions(-) diff --git a/trust-quorum/src/lib.rs b/trust-quorum/src/lib.rs index aed8a518b9e..bd0536f10d2 100644 --- a/trust-quorum/src/lib.rs +++ b/trust-quorum/src/lib.rs @@ -39,7 +39,9 @@ pub use node::{Node, NodeDiff}; // public only for docs. pub use node_ctx::NodeHandlerCtx; pub use node_ctx::{NodeCallerCtx, NodeCommonCtx, NodeCtx, NodeCtxDiff}; -pub use persistent_state::{PersistentState, PersistentStateSummary}; +pub use persistent_state::{ + ExpungedMetadata, PersistentState, PersistentStateSummary, +}; #[derive( Debug, diff --git a/trust-quorum/src/messages.rs b/trust-quorum/src/messages.rs index 3167cba5002..c373a4350d8 100644 --- a/trust-quorum/src/messages.rs +++ b/trust-quorum/src/messages.rs @@ -65,7 +65,7 @@ pub enum PeerMsgKind { LrtqShare(LrtqShare), /// Inform a node that it is no longer part of the trust quorum as of the - /// given epoch + /// given epoch, which the responder knows is commmitted. Expunged(Epoch), /// Inform a node that it is utilizing an old committed onfiguration and diff --git a/trust-quorum/src/node.rs b/trust-quorum/src/node.rs index 16503dbef88..7684222f9dc 100644 --- a/trust-quorum/src/node.rs +++ b/trust-quorum/src/node.rs @@ -20,8 +20,8 @@ use crate::validators::{ MismatchedRackIdError, ReconfigurationError, ValidatedReconfigureMsg, }; use crate::{ - Alarm, Configuration, CoordinatorState, Epoch, NodeHandlerCtx, PlatformId, - messages::*, + Alarm, Configuration, CoordinatorState, Epoch, ExpungedMetadata, + NodeHandlerCtx, PlatformId, messages::*, }; use daft::{Diffable, Leaf}; use gfss::shamir::Share; @@ -101,9 +101,10 @@ impl Node { }; if let Some(kcs) = &self.key_share_computer { - // We know from our `ValidatedReconfigureMsg` that we haven't seen a newer - // configuration and we have the correct last committed configuration. Therefore if we are computing a key share, - // we must be doing it for a stale commit and should cancel it. + // We know from our `ValidatedReconfigureMsg` that we haven't seen + // a newer configuration and we have the correct last committed + // configuration. Therefore if we are computing a key share, we must + // be doing it for a stale commit and should cancel it. // // I don't think it's actually possible to hit this condition, but // we check anyway. @@ -139,6 +140,19 @@ impl Node { { let ps = ctx.persistent_state(); + if let Some(expunged) = &ps.expunged { + error!( + self.log, + "Commit attempted on expunged node"; + "expunged_epoch" => %expunged.epoch, + "expunging_node" => %expunged.from + ); + return Err(CommitError::Expunged { + epoch: expunged.epoch, + from: expunged.from.clone(), + }); + } + // If we have a configuration the rack id must match the one from // Nexus if let Some(ps_rack_id) = ps.rack_id() { @@ -243,13 +257,26 @@ impl Node { from: PlatformId, msg: PeerMsg, ) { + if ctx.persistent_state().is_expunged() { + warn!( + self.log, + "Received message while expunged. Dropping."; + "from" => %from, + "msg" => msg.kind.name() + ); + return; + } + if let Some(rack_id) = ctx.persistent_state().rack_id() { if rack_id != msg.rack_id { - error!(self.log, "Mismatched rack id"; - "from" => %from, - "msg" => msg.kind.name(), - "expected" => %rack_id, - "got" => %msg.rack_id); + error!( + self.log, + "Mismatched rack id"; + "from" => %from, + "msg" => msg.kind.name(), + "expected" => %rack_id, + "got" => %msg.rack_id + ); return; } } @@ -269,6 +296,9 @@ impl Node { PeerMsgKind::CommitAdvance(config) => { self.handle_commit_advance(ctx, from, config) } + PeerMsgKind::Expunged(epoch) => { + self.handle_expunged(ctx, from, epoch); + } _ => todo!( "cannot handle message variant yet - not implemented: {msg:?}" ), @@ -308,6 +338,85 @@ impl Node { } } + fn handle_expunged( + &mut self, + ctx: &mut impl NodeHandlerCtx, + from: PlatformId, + epoch: Epoch, + ) { + if let Some(config) = ctx.persistent_state().latest_config() { + if epoch < config.epoch { + // It's possible, but unlikely, that we were expunged at `epoch` + // and later re-added to the trust-quorum, but the reply to + // an old message is still floating in the network. This is + // especially unlikely since, we should really have restarted + // sprockets connections in this case. In any event, the race + // condition exists at the protocol level, and so we handle it. + if config.members.contains_key(ctx.platform_id()) { + let m = concat!( + "Received Expunged message for old epoch. ", + "We must have been re-added as a trust-quorum member." + ); + warn!( + self.log, + "{m}"; + "from" => %from, + "received_epoch" => %epoch, + "epoch" => %config.epoch + ); + } + return; + } else if epoch > config.epoch { + let m = concat!( + "Received Expunged message for newer epoch. ", + "Recording expungement in persistent state." + ); + warn!( + self.log, + "{m}"; + "from" => %from, + "received_epoch" => %epoch, + "epoch" => %config.epoch + ); + // Intentionally fall through + } else { + let m = concat!( + "Received Expunged message for latest known epoch. ", + "Recording expungement in persistent state." + ); + warn!( + self.log, + "{m}"; + "from" => %from, + "received_epoch" => %epoch, + "epoch" => %config.epoch + ); + // Intentionally fall through + } + + // Perform the actual expunge + ctx.update_persistent_state(|ps| { + ps.expunged = Some(ExpungedMetadata { epoch, from }); + true + }); + + // Stop coordinating and computing a key share + self.coordinator_state = None; + self.key_share_computer = None; + } else { + let m = concat!( + "Received Expunge message, but we have no configurations. ", + "We must have been factory reset already." + ); + error!( + self.log, + "{m}"; + "from" => %from, + "received_epoch" => %epoch + ); + } + } + fn handle_commit_advance( &mut self, ctx: &mut impl NodeHandlerCtx, @@ -469,7 +578,10 @@ impl Node { %latest_committed_config.epoch, "requested_epoch" => %epoch ); - // TODO: Send an expunged message + ctx.send( + from, + PeerMsgKind::Expunged(latest_committed_config.epoch), + ); return; } info!( @@ -499,7 +611,13 @@ impl Node { "from" => %from, "epoch" => %epoch ); - // TODO: Send an expunged message + // Technically, this node does not yet know that the + // configuration at `epoch` has been committed. However, + // requesting nodes only ask for key shares when they know that + // the configuration has been committed. Therefore, rather than + // introduce a new message such as `NotAMember`, we inform the + // requesting node that they have been expunged. + ctx.send(from, PeerMsgKind::Expunged(epoch)); return; } } @@ -720,6 +838,8 @@ pub enum CommitError { ), #[error("cannot commit: not prepared for epoch {0}")] NotPrepared(Epoch), + #[error("cannot commit: expunged at epoch {epoch} by {from}")] + Expunged { epoch: Epoch, from: PlatformId }, } #[cfg(test)] diff --git a/trust-quorum/src/persistent_state.rs b/trust-quorum/src/persistent_state.rs index d2a9a090396..28435de15db 100644 --- a/trust-quorum/src/persistent_state.rs +++ b/trust-quorum/src/persistent_state.rs @@ -31,7 +31,7 @@ pub struct PersistentState { // Has the node been informed that it is no longer part of the trust quorum? // - // If at any time this gets set, than the it remains true for the lifetime + // If at any time this gets set, then the it remains true for the lifetime // of the node. The sled corresponding to the node must be factory reset by // wiping its storage. pub expunged: Option, @@ -62,11 +62,13 @@ impl PersistentState { self.lrtq.is_some() && self.latest_committed_epoch().is_none() } - // Are there any committed configurations or lrtq data? + /// Are there any committed configurations or lrtq data? pub fn is_uninitialized(&self) -> bool { self.lrtq.is_none() && self.latest_committed_epoch().is_none() } + /// The latest configuration that we know about, regardless of whether it + /// has been committed. pub fn latest_config(&self) -> Option<&Configuration> { self.configs.iter().last() } @@ -108,6 +110,11 @@ impl PersistentState { pub fn has_prepared(&self, epoch: Epoch) -> bool { self.configs.contains_key(&epoch) && self.shares.contains_key(&epoch) } + + /// Has this node been expunged? + pub fn is_expunged(&self) -> bool { + self.expunged.is_some() + } } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] diff --git a/trust-quorum/test-utils/src/state.rs b/trust-quorum/test-utils/src/state.rs index 35ae9f13e84..007408e7aed 100644 --- a/trust-quorum/test-utils/src/state.rs +++ b/trust-quorum/test-utils/src/state.rs @@ -10,7 +10,7 @@ use crate::nexus::{ use crate::{Event, member_universe}; use daft::{BTreeMapDiff, BTreeSetDiff, Diffable, Leaf}; use iddqd::IdOrdMap; -use slog::Logger; +use slog::{Logger, info}; use std::collections::{BTreeMap, BTreeSet}; use std::fmt::Display; use trust_quorum::{ @@ -280,6 +280,11 @@ impl TqState { NexusReply::AckedPreparesFromCoordinator { epoch, acks } => { if epoch == latest_config.epoch { latest_config.prepared_members.extend(acks); + + if latest_config.can_commit() { + drop(latest_config); + self.nexus_commit(); + } } } NexusReply::CommitAck { from, epoch } => { @@ -347,6 +352,47 @@ impl TqState { self.send_reconfigure_msg(); self.send_envelopes_from_coordinator(); } + + // Commit at nexus when preparing + fn nexus_commit(&mut self) { + let mut latest_config = self.nexus.latest_config_mut(); + info!( + self.log, + "nexus committed"; + "epoch" => %latest_config.epoch, + "coordinator" => %latest_config.coordinator + ); + + latest_config.op = NexusOp::Committed; + + let new_members = latest_config.members.clone(); + let new_epoch = latest_config.epoch; + + // Expunge any removed nodes from the last committed configuration + if let Some(last_committed_epoch) = latest_config.last_committed_epoch { + // Release our mutable borrow + drop(latest_config); + + let last_committed_config = self + .nexus + .configs + .get(&last_committed_epoch) + .expect("config exists"); + + let expunged = + last_committed_config.members.difference(&new_members).cloned(); + + for e in expunged { + info!( + self.log, + "expunged node"; + "epoch" => %new_epoch, + "platform_id" => %e + ); + self.expunged.insert(e); + } + } + } } /// Broken out of `TqState` to alleviate borrow checker woes @@ -528,7 +574,7 @@ fn display_nexus_state_diff( f: &mut std::fmt::Formatter<'_>, ) -> std::fmt::Result { if diff.configs.modified().count() != 0 { - writeln!(f, " nexus state changed:")?; + writeln!(f, "nexus state changed:")?; } // Nexus configs can only be added or modified diff --git a/trust-quorum/tests/cluster.rs b/trust-quorum/tests/cluster.rs index c4ddd620daa..c514e861f28 100644 --- a/trust-quorum/tests/cluster.rs +++ b/trust-quorum/tests/cluster.rs @@ -309,7 +309,7 @@ impl TestState { for s in removed_nodes { // The same selection can be chosen more than once. so we // must add the extra check rather than shrinking the length - // of the `removed_nodes` iterator with `take`.; + // of the `removed_nodes` iterator with `take`. if nodes_to_remove.len() == max_nodes_to_remove { break; } @@ -398,6 +398,38 @@ impl TestState { self.invariant_nodes_have_committed_if_nexus_has_acks()?; self.invariant_nodes_not_coordinating_and_computing_key_share_simultaneously()?; self.invariant_no_alarms()?; + self.invariant_expunged_nodes_have_actually_been_expunged()?; + Ok(()) + } + + /// For all expunged nodes ensure that either: + /// * they know they are expunged + /// * have a latest committed configuration where they are still a member + /// * have no committed configurations + fn invariant_expunged_nodes_have_actually_been_expunged( + &self, + ) -> Result<(), TestCaseError> { + for id in &self.tq_state.expunged { + let (_, ctx) = + self.tq_state.sut.nodes.get(id).expect("node exists"); + let ps = ctx.persistent_state(); + if ps.is_expunged() { + continue; + } + if let Some(config) = ps.latest_committed_configuration() { + let nexus_config = self + .tq_state + .nexus + .configs + .get(&config.epoch) + .expect("config exists"); + prop_assert!(config.members.contains_key(ctx.platform_id())); + prop_assert!(nexus_config.members.contains(ctx.platform_id())); + } else { + continue; + } + } + Ok(()) } From 1ad3d57498e6bc347026b54205f273a14fd988e4 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Thu, 28 Aug 2025 12:10:17 -0400 Subject: [PATCH 10/38] [omdb] Add RoT bootloader, RoT, and host OS information to `omdb nexus update-status` (#8901) I'll give this a spin on a racklette and report back before merging, but I think this is straightforward enough to go ahead and review without that. Closes #8883. --- .../omdb/src/bin/omdb/nexus/update_status.rs | 203 ++++++++- nexus/types/src/internal_api/views.rs | 421 ++++++++++++++---- nexus/types/src/inventory.rs | 7 + openapi/nexus-internal.json | 270 ++++++++++- 4 files changed, 789 insertions(+), 112 deletions(-) diff --git a/dev-tools/omdb/src/bin/omdb/nexus/update_status.rs b/dev-tools/omdb/src/bin/omdb/nexus/update_status.rs index d5acced5d7a..8cc6a8252a3 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus/update_status.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus/update_status.rs @@ -5,7 +5,12 @@ //! omdb commands related to update status use anyhow::Context; -use nexus_types::internal_api::views::{SpStatus, ZoneStatus}; +use gateway_types::rot::RotSlot; +use nexus_types::internal_api::views::{ + HostPhase1Status, HostPhase2Status, RotBootloaderStatus, RotStatus, + SpStatus, ZoneStatus, +}; +use omicron_common::disk::M2Slot; use omicron_uuid_kinds::SledUuid; use tabled::Tabled; @@ -19,8 +24,44 @@ pub async fn cmd_nexus_update_status( .context("retrieving update status")? .into_inner(); - print_zones(status.zones.into_iter()); - print_sps(status.sps.into_iter()); + print_rot_bootloaders( + status + .mgs_driven + .iter() + .map(|s| (s.baseboard_description.clone(), &s.rot_bootloader)), + ); + println!(); + print_rots( + status + .mgs_driven + .iter() + .map(|s| (s.baseboard_description.clone(), &s.rot)), + ); + println!(); + print_sps( + status + .mgs_driven + .iter() + .map(|s| (s.baseboard_description.clone(), &s.sp)), + ); + println!(); + print_host_phase_1s( + status + .mgs_driven + .iter() + .map(|s| (s.baseboard_description.clone(), &s.host_os_phase_1)), + ); + println!(); + print_host_phase_2s( + status.sleds.iter().map(|s| (s.sled_id, &s.host_phase_2)), + ); + println!(); + print_zones( + status + .sleds + .iter() + .map(|s| (s.sled_id, s.zones.iter().cloned().collect())), + ); Ok(()) } @@ -59,22 +100,85 @@ fn print_zones(zones: impl Iterator)>) { println!("{}", table); } -fn print_sps(sps: impl Iterator) { +fn print_rot_bootloaders<'a>( + bootloaders: impl Iterator, +) { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct BootloaderRow { + baseboard_id: String, + stage0_version: String, + stage0_next_version: String, + } + + let mut rows = Vec::new(); + for (baseboard_id, status) in bootloaders { + let RotBootloaderStatus { stage0_version, stage0_next_version } = + status; + rows.push(BootloaderRow { + baseboard_id, + stage0_version: stage0_version.to_string(), + stage0_next_version: stage0_next_version.to_string(), + }); + } + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("Installed RoT Bootloader Software"); + println!("{}", table); +} + +fn print_rots<'a>(rots: impl Iterator) { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct RotRow { + baseboard_id: String, + slot_a_version: String, + slot_b_version: String, + } + + let mut rows = Vec::new(); + for (baseboard_id, status) in rots { + let RotStatus { active_slot, slot_a_version, slot_b_version } = status; + let (slot_a_suffix, slot_b_suffix) = match active_slot { + Some(RotSlot::A) => (" (active)", ""), + Some(RotSlot::B) => ("", " (active)"), + // This is not expected! Be louder. + None => ("", " (ACTIVE SLOT UNKNOWN)"), + }; + rows.push(RotRow { + baseboard_id, + slot_a_version: format!("{slot_a_version}{slot_a_suffix}"), + slot_b_version: format!("{slot_b_version}{slot_b_suffix}"), + }); + } + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("Installed RoT Software"); + println!("{}", table); +} + +fn print_sps<'a>(sps: impl Iterator) { #[derive(Tabled)] #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] struct SpRow { baseboard_id: String, - sled_id: String, slot0_version: String, slot1_version: String, } let mut rows = Vec::new(); for (baseboard_id, status) in sps { - let SpStatus { sled_id, slot0_version, slot1_version } = status; + let SpStatus { slot0_version, slot1_version } = status; rows.push(SpRow { baseboard_id, - sled_id: sled_id.map_or("".to_string(), |id| id.to_string()), slot0_version: slot0_version.to_string(), slot1_version: slot1_version.to_string(), }); @@ -88,3 +192,88 @@ fn print_sps(sps: impl Iterator) { println!("Installed SP Software"); println!("{}", table); } + +fn print_host_phase_1s<'a>( + phase_1s: impl Iterator, +) { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct HostPhase1Row { + baseboard_id: String, + sled_id: String, + slot_a_version: String, + slot_b_version: String, + } + + let mut rows = Vec::new(); + for (baseboard_id, status) in phase_1s { + match status { + HostPhase1Status::NotASled => continue, + HostPhase1Status::Sled { + sled_id, + active_slot, + slot_a_version, + slot_b_version, + } => { + let (slot_a_suffix, slot_b_suffix) = match active_slot { + Some(M2Slot::A) => (" (active)", ""), + Some(M2Slot::B) => ("", " (active)"), + // This is not expected! Be louder. + None => ("", " (ACTIVE SLOT UNKNOWN)"), + }; + rows.push(HostPhase1Row { + baseboard_id, + sled_id: sled_id + .map_or("".to_string(), |id| id.to_string()), + slot_a_version: format!("{slot_a_version}{slot_a_suffix}"), + slot_b_version: format!("{slot_b_version}{slot_b_suffix}"), + }); + } + } + } + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("Installed Host Phase 1 Software"); + println!("{}", table); +} + +fn print_host_phase_2s<'a>( + sleds: impl Iterator, +) { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct HostPhase2Row { + sled_id: String, + slot_a_version: String, + slot_b_version: String, + } + + let mut rows = Vec::new(); + for (sled_id, status) in sleds { + let HostPhase2Status { boot_disk, slot_a_version, slot_b_version } = + status; + let (slot_a_suffix, slot_b_suffix) = match boot_disk { + Ok(M2Slot::A) => (" (boot disk)", "".to_string()), + Ok(M2Slot::B) => ("", " (boot disk)".to_string()), + // This is not expected! Be louder. + Err(err) => ("", format!(" (BOOT DISK UNKNOWN: {err})")), + }; + rows.push(HostPhase2Row { + sled_id: sled_id.to_string(), + slot_a_version: format!("{slot_a_version}{slot_a_suffix}"), + slot_b_version: format!("{slot_b_version}{slot_b_suffix}"), + }); + } + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("Installed Host Phase 2 Software"); + println!("{}", table); +} diff --git a/nexus/types/src/internal_api/views.rs b/nexus/types/src/internal_api/views.rs index f7db6d86612..49d744fc715 100644 --- a/nexus/types/src/internal_api/views.rs +++ b/nexus/types/src/internal_api/views.rs @@ -5,7 +5,6 @@ use crate::deployment::PendingMgsUpdate; use crate::deployment::TargetReleaseDescription; use crate::inventory::BaseboardId; -use crate::inventory::Caboose; use crate::inventory::CabooseWhich; use crate::inventory::Collection; use chrono::DateTime; @@ -13,9 +12,13 @@ use chrono::SecondsFormat; use chrono::Utc; use futures::future::ready; use futures::stream::StreamExt; +use gateway_client::types::SpType; +use gateway_types::rot::RotSlot; use iddqd::IdOrdItem; use iddqd::IdOrdMap; use iddqd::id_upcast; +use nexus_sled_agent_shared::inventory::BootPartitionContents; +use nexus_sled_agent_shared::inventory::BootPartitionDetails; use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryResult; use nexus_sled_agent_shared::inventory::OmicronZoneImageSource; use nexus_sled_agent_shared::inventory::OmicronZoneType; @@ -23,6 +26,7 @@ use omicron_common::api::external::MacAddr; use omicron_common::api::external::ObjectStream; use omicron_common::api::external::TufArtifactMeta; use omicron_common::api::external::Vni; +use omicron_common::disk::M2Slot; use omicron_common::snake_case_result; use omicron_common::snake_case_result::SnakeCaseResult; use omicron_uuid_kinds::DemoSagaUuid; @@ -41,6 +45,8 @@ use std::time::Duration; use std::time::Instant; use steno::SagaResultErr; use steno::UndoActionError; +use tufaceous_artifact::ArtifactHash; +use tufaceous_artifact::ArtifactKind; use tufaceous_artifact::KnownArtifactKind; use uuid::Uuid; @@ -536,6 +542,33 @@ pub enum TufRepoVersion { Error(String), } +impl TufRepoVersion { + fn for_artifact( + old: &TargetReleaseDescription, + new: &TargetReleaseDescription, + artifact_hash: ArtifactHash, + ) -> TufRepoVersion { + let matching_artifact = |a: &TufArtifactMeta| a.hash == artifact_hash; + + if let Some(new) = new.tuf_repo() { + if new.artifacts.iter().any(matching_artifact) { + return TufRepoVersion::Version( + new.repo.system_version.clone(), + ); + } + } + if let Some(old) = old.tuf_repo() { + if old.artifacts.iter().any(matching_artifact) { + return TufRepoVersion::Version( + old.repo.system_version.clone(), + ); + } + } + + TufRepoVersion::Unknown + } +} + impl Display for TufRepoVersion { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -560,112 +593,253 @@ pub struct ZoneStatus { pub version: TufRepoVersion, } +impl IdOrdItem for ZoneStatus { + type Key<'a> = OmicronZoneUuid; + + fn key(&self) -> Self::Key<'_> { + self.zone_id + } + + id_upcast!(); +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub struct HostPhase2Status { + #[serde(with = "snake_case_result")] + #[schemars(schema_with = "SnakeCaseResult::::json_schema")] + pub boot_disk: Result, + pub slot_a_version: TufRepoVersion, + pub slot_b_version: TufRepoVersion, +} + +impl HostPhase2Status { + fn new( + inv: &BootPartitionContents, + old: &TargetReleaseDescription, + new: &TargetReleaseDescription, + ) -> Self { + Self { + boot_disk: inv.boot_disk.clone(), + slot_a_version: Self::slot_version(old, new, &inv.slot_a), + slot_b_version: Self::slot_version(old, new, &inv.slot_b), + } + } + + fn slot_version( + old: &TargetReleaseDescription, + new: &TargetReleaseDescription, + details: &Result, + ) -> TufRepoVersion { + let artifact_hash = match details.as_ref() { + Ok(details) => details.artifact_hash, + Err(err) => return TufRepoVersion::Error(err.clone()), + }; + TufRepoVersion::for_artifact(old, new, artifact_hash) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub struct SledAgentUpdateStatus { + pub sled_id: SledUuid, + pub zones: IdOrdMap, + pub host_phase_2: HostPhase2Status, +} + +impl IdOrdItem for SledAgentUpdateStatus { + type Key<'a> = SledUuid; + + fn key(&self) -> Self::Key<'_> { + self.sled_id + } + + id_upcast!(); +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub struct RotBootloaderStatus { + pub stage0_version: TufRepoVersion, + pub stage0_next_version: TufRepoVersion, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub struct RotStatus { + pub active_slot: Option, + pub slot_a_version: TufRepoVersion, + pub slot_b_version: TufRepoVersion, +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] pub struct SpStatus { - pub sled_id: Option, pub slot0_version: TufRepoVersion, pub slot1_version: TufRepoVersion, } -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -pub struct UpdateStatus { - pub zones: BTreeMap>, - pub sps: BTreeMap, +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "snake_case", tag = "kind")] +pub enum HostPhase1Status { + /// This device has no host phase 1 status because it is not a sled (e.g., + /// it's a PSC or switch). + NotASled, + Sled { + sled_id: Option, + active_slot: Option, + slot_a_version: TufRepoVersion, + slot_b_version: TufRepoVersion, + }, } -impl UpdateStatus { - pub fn new( +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub struct MgsDrivenUpdateStatus { + // This is a stringified [`BaseboardId`]. We can't use `BaseboardId` as a + // key in JSON maps, so we squish it into a string. + pub baseboard_description: String, + pub rot_bootloader: RotBootloaderStatus, + pub rot: RotStatus, + pub sp: SpStatus, + pub host_os_phase_1: HostPhase1Status, +} + +impl MgsDrivenUpdateStatus { + fn new( + inventory: &Collection, + baseboard_id: &BaseboardId, + sp_type: SpType, old: &TargetReleaseDescription, new: &TargetReleaseDescription, - inventory: &Collection, - ) -> UpdateStatus { - let sleds = inventory - .sled_agents - .iter() - .map(|agent| (&agent.sled_id, &agent.last_reconciliation)); - let zones = sleds - .map(|(sled_id, inv)| { - ( - *sled_id, - inv.as_ref().map_or(vec![], |inv| { - inv.reconciled_omicron_zones() - .map(|(conf, res)| ZoneStatus { - zone_id: conf.id, - zone_type: conf.zone_type.clone(), - version: Self::zone_image_source_to_version( - old, - new, - &conf.image_source, - res, - ), - }) - .collect() - }), - ) - }) - .collect(); - let baseboard_ids: Vec<_> = inventory.sps.keys().cloned().collect(); - - // Find all SP versions and git commits via cabooses - let mut sps: BTreeMap = baseboard_ids - .into_iter() - .map(|baseboard_id| { - let slot0_version = inventory - .caboose_for(CabooseWhich::SpSlot0, &baseboard_id) - .map_or(TufRepoVersion::Unknown, |c| { - Self::caboose_to_version(old, new, &c.caboose) - }); - let slot1_version = inventory - .caboose_for(CabooseWhich::SpSlot1, &baseboard_id) - .map_or(TufRepoVersion::Unknown, |c| { - Self::caboose_to_version(old, new, &c.caboose) - }); - ( - (*baseboard_id).clone(), - SpStatus { sled_id: None, slot0_version, slot1_version }, - ) - }) - .collect(); + sled_ids: &BTreeMap<&BaseboardId, SledUuid>, + ) -> Self { + MgsDrivenUpdateStatusBuilder { + inventory, + baseboard_id, + sp_type, + old, + new, + sled_ids, + } + .build() + } +} - // Fill in the sled_id for the sp if known - for sa in inventory.sled_agents.iter() { - if let Some(baseboard_id) = &sa.baseboard_id { - if let Some(sp) = sps.get_mut(baseboard_id) { - sp.sled_id = Some(sa.sled_id); - } - } +impl IdOrdItem for MgsDrivenUpdateStatus { + type Key<'a> = &'a str; + + fn key(&self) -> Self::Key<'_> { + &self.baseboard_description + } + + id_upcast!(); +} + +struct MgsDrivenUpdateStatusBuilder<'a> { + inventory: &'a Collection, + baseboard_id: &'a BaseboardId, + sp_type: SpType, + old: &'a TargetReleaseDescription, + new: &'a TargetReleaseDescription, + sled_ids: &'a BTreeMap<&'a BaseboardId, SledUuid>, +} + +impl MgsDrivenUpdateStatusBuilder<'_> { + fn build(&self) -> MgsDrivenUpdateStatus { + let host_os_phase_1 = match self.sp_type { + SpType::Power | SpType::Switch => HostPhase1Status::NotASled, + SpType::Sled => HostPhase1Status::Sled { + sled_id: self.sled_ids.get(self.baseboard_id).copied(), + active_slot: self + .inventory + .host_phase_1_active_slot_for(self.baseboard_id) + .map(|s| s.slot), + slot_a_version: self.version_for_host_phase_1(M2Slot::A), + slot_b_version: self.version_for_host_phase_1(M2Slot::B), + }, + }; + + MgsDrivenUpdateStatus { + baseboard_description: self.baseboard_id.to_string(), + rot_bootloader: RotBootloaderStatus { + stage0_version: self.version_for_caboose(CabooseWhich::Stage0), + stage0_next_version: self + .version_for_caboose(CabooseWhich::Stage0Next), + }, + rot: RotStatus { + active_slot: self + .inventory + .rot_state_for(self.baseboard_id) + .map(|state| state.active_slot), + slot_a_version: self + .version_for_caboose(CabooseWhich::RotSlotA), + slot_b_version: self + .version_for_caboose(CabooseWhich::RotSlotB), + }, + sp: SpStatus { + slot0_version: self.version_for_caboose(CabooseWhich::SpSlot0), + slot1_version: self.version_for_caboose(CabooseWhich::SpSlot1), + }, + host_os_phase_1, } + } - let sps = sps.into_iter().map(|(k, v)| (k.to_string(), v)).collect(); + fn version_for_host_phase_1(&self, slot: M2Slot) -> TufRepoVersion { + let Some(artifact_hash) = self + .inventory + .host_phase_1_flash_hash_for(slot, self.baseboard_id) + .map(|h| h.hash) + else { + return TufRepoVersion::Unknown; + }; - UpdateStatus { zones, sps } + TufRepoVersion::for_artifact(self.old, self.new, artifact_hash) } - fn caboose_to_version( - old: &TargetReleaseDescription, - new: &TargetReleaseDescription, - caboose: &Caboose, - ) -> TufRepoVersion { + fn version_for_caboose(&self, which: CabooseWhich) -> TufRepoVersion { + let Some(caboose) = self + .inventory + .caboose_for(which, self.baseboard_id) + .map(|c| &c.caboose) + else { + return TufRepoVersion::Unknown; + }; + + // TODO-cleanup This is really fragile! The RoT and bootloader kinds + // here aren't `KnownArtifactKind`s, so if we add more + // `ArtifactKind` constants we have to remember to update these + // lists. Maybe we fix this as a part of + // https://github.com/oxidecomputer/tufaceous/issues/37? + let matching_kinds = match which { + CabooseWhich::SpSlot0 | CabooseWhich::SpSlot1 => [ + ArtifactKind::from_known(KnownArtifactKind::GimletSp), + ArtifactKind::from_known(KnownArtifactKind::PscSp), + ArtifactKind::from_known(KnownArtifactKind::SwitchSp), + ], + CabooseWhich::RotSlotA => [ + ArtifactKind::GIMLET_ROT_IMAGE_A, + ArtifactKind::PSC_ROT_IMAGE_A, + ArtifactKind::SWITCH_ROT_IMAGE_A, + ], + CabooseWhich::RotSlotB => [ + ArtifactKind::GIMLET_ROT_IMAGE_B, + ArtifactKind::PSC_ROT_IMAGE_B, + ArtifactKind::SWITCH_ROT_IMAGE_B, + ], + CabooseWhich::Stage0 | CabooseWhich::Stage0Next => [ + ArtifactKind::GIMLET_ROT_STAGE0, + ArtifactKind::PSC_ROT_STAGE0, + ArtifactKind::SWITCH_ROT_STAGE0, + ], + }; let matching_caboose = |a: &TufArtifactMeta| { - caboose.board == a.id.name - && matches!( - a.id.kind.to_known(), - Some( - KnownArtifactKind::GimletSp - | KnownArtifactKind::PscSp - | KnownArtifactKind::SwitchSp - ) - ) + Some(&caboose.board) == a.board.as_ref() && caboose.version == a.id.version.to_string() + && matching_kinds.contains(&a.id.kind) }; - if let Some(new) = new.tuf_repo() { + if let Some(new) = self.new.tuf_repo() { if new.artifacts.iter().any(matching_caboose) { return TufRepoVersion::Version( new.repo.system_version.clone(), ); } } - if let Some(old) = old.tuf_repo() { + if let Some(old) = self.old.tuf_repo() { if old.artifacts.iter().any(matching_caboose) { return TufRepoVersion::Version( old.repo.system_version.clone(), @@ -675,6 +849,89 @@ impl UpdateStatus { TufRepoVersion::Unknown } +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +pub struct UpdateStatus { + pub mgs_driven: IdOrdMap, + pub sleds: IdOrdMap, +} + +impl UpdateStatus { + pub fn new( + old: &TargetReleaseDescription, + new: &TargetReleaseDescription, + inventory: &Collection, + ) -> UpdateStatus { + let sleds = inventory + .sled_agents + .iter() + .map(|sa| { + let Some(inv) = sa.last_reconciliation.as_ref() else { + return SledAgentUpdateStatus { + sled_id: sa.sled_id, + zones: IdOrdMap::new(), + host_phase_2: HostPhase2Status { + boot_disk: Err("unknown".to_string()), + slot_a_version: TufRepoVersion::Unknown, + slot_b_version: TufRepoVersion::Unknown, + }, + }; + }; + + SledAgentUpdateStatus { + sled_id: sa.sled_id, + zones: inv + .reconciled_omicron_zones() + .map(|(conf, res)| ZoneStatus { + zone_id: conf.id, + zone_type: conf.zone_type.clone(), + version: Self::zone_image_source_to_version( + old, + new, + &conf.image_source, + res, + ), + }) + .collect(), + host_phase_2: HostPhase2Status::new( + &inv.boot_partitions, + old, + new, + ), + } + }) + .collect(); + + // Build a map so we can look up the sled ID for a given baseboard (when + // collecting the MGS-driven update status below, all we have is the + // baseboard). + let sled_ids_by_baseboard: BTreeMap<&BaseboardId, SledUuid> = inventory + .sled_agents + .iter() + .filter_map(|sa| { + let baseboard_id = sa.baseboard_id.as_deref()?; + Some((baseboard_id, sa.sled_id)) + }) + .collect(); + + let mgs_driven = inventory + .sps + .iter() + .map(|(baseboard_id, sp)| { + MgsDrivenUpdateStatus::new( + inventory, + baseboard_id, + sp.sp_type, + old, + new, + &sled_ids_by_baseboard, + ) + }) + .collect::>(); + + UpdateStatus { sleds, mgs_driven } + } fn zone_image_source_to_version( old: &TargetReleaseDescription, diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 603085d9fde..7ca78f007ea 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -211,6 +211,13 @@ impl Collection { .and_then(|by_bb| by_bb.get(baseboard_id)) } + pub fn rot_state_for( + &self, + baseboard_id: &BaseboardId, + ) -> Option<&RotState> { + self.rots.get(baseboard_id) + } + pub fn rot_page_for( &self, which: RotPageWhich, diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 0bc5543a0b8..3d41de3fe62 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -4814,6 +4814,118 @@ "id" ] }, + "HostPhase1Status": { + "oneOf": [ + { + "description": "This device has no host phase 1 status because it is not a sled (e.g., it's a PSC or switch).", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "not_a_sled" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "type": "object", + "properties": { + "active_slot": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/M2Slot" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "sled" + ] + }, + "sled_id": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForSledKind" + } + ] + }, + "slot_a_version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "slot_b_version": { + "$ref": "#/components/schemas/TufRepoVersion" + } + }, + "required": [ + "kind", + "slot_a_version", + "slot_b_version" + ] + } + ] + }, + "HostPhase2Status": { + "type": "object", + "properties": { + "boot_disk": { + "x-rust-type": { + "crate": "std", + "parameters": [ + { + "$ref": "#/components/schemas/M2Slot" + }, + { + "type": "string" + } + ], + "path": "::std::result::Result", + "version": "*" + }, + "oneOf": [ + { + "type": "object", + "properties": { + "ok": { + "$ref": "#/components/schemas/M2Slot" + } + }, + "required": [ + "ok" + ] + }, + { + "type": "object", + "properties": { + "err": { + "type": "string" + } + }, + "required": [ + "err" + ] + } + ] + }, + "slot_a_version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "slot_b_version": { + "$ref": "#/components/schemas/TufRepoVersion" + } + }, + "required": [ + "boot_disk", + "slot_a_version", + "slot_b_version" + ] + }, "IdMapBlueprintDatasetConfig": { "type": "object", "additionalProperties": { @@ -5393,6 +5505,33 @@ "minLength": 5, "maxLength": 17 }, + "MgsDrivenUpdateStatus": { + "type": "object", + "properties": { + "baseboard_description": { + "type": "string" + }, + "host_os_phase_1": { + "$ref": "#/components/schemas/HostPhase1Status" + }, + "rot": { + "$ref": "#/components/schemas/RotStatus" + }, + "rot_bootloader": { + "$ref": "#/components/schemas/RotBootloaderStatus" + }, + "sp": { + "$ref": "#/components/schemas/SpStatus" + } + }, + "required": [ + "baseboard_description", + "host_os_phase_1", + "rot", + "rot_bootloader", + "sp" + ] + }, "MgsUpdateDriverStatus": { "description": "Status of ongoing update attempts, recently completed attempts, and update requests that are waiting for retry.", "type": "object", @@ -7906,6 +8045,21 @@ "time" ] }, + "RotBootloaderStatus": { + "type": "object", + "properties": { + "stage0_next_version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "stage0_version": { + "$ref": "#/components/schemas/TufRepoVersion" + } + }, + "required": [ + "stage0_next_version", + "stage0_version" + ] + }, "RotSlot": { "oneOf": [ { @@ -7938,6 +8092,29 @@ } ] }, + "RotStatus": { + "type": "object", + "properties": { + "active_slot": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/RotSlot" + } + ] + }, + "slot_a_version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "slot_b_version": { + "$ref": "#/components/schemas/TufRepoVersion" + } + }, + "required": [ + "slot_a_version", + "slot_b_version" + ] + }, "RouteConfig": { "type": "object", "properties": { @@ -8280,6 +8457,40 @@ "usable_physical_ram" ] }, + "SledAgentUpdateStatus": { + "type": "object", + "properties": { + "host_phase_2": { + "$ref": "#/components/schemas/HostPhase2Status" + }, + "sled_id": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "zones": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/ZoneStatus" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/ZoneStatus" + }, + "uniqueItems": true + } + }, + "required": [ + "host_phase_2", + "sled_id", + "zones" + ] + }, "SledCpuFamily": { "description": "Identifies the kind of CPU present on a sled, determined by reading CPUID.\n\nThis is intended to broadly support the control plane answering the question \"can I run this instance on that sled?\" given an instance with either no or some CPU platform requirement. It is not enough information for more precise placement questions - for example, is a CPU a high-frequency part or many-core part? We don't include Genoa here, but in that CPU family there are high frequency parts, many-core parts, and large-cache parts. To support those questions (or satisfactorily answer #8730) we would need to collect additional information and send it along.", "oneOf": [ @@ -8504,14 +8715,6 @@ "SpStatus": { "type": "object", "properties": { - "sled_id": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/TypedUuidForSledKind" - } - ] - }, "slot0_version": { "$ref": "#/components/schemas/TufRepoVersion" }, @@ -8945,25 +9148,46 @@ "UpdateStatus": { "type": "object", "properties": { - "sps": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/SpStatus" - } + "mgs_driven": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/MgsDrivenUpdateStatus" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/MgsDrivenUpdateStatus" + }, + "uniqueItems": true }, - "zones": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ZoneStatus" - } - } + "sleds": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/SledAgentUpdateStatus" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/SledAgentUpdateStatus" + }, + "uniqueItems": true } }, "required": [ - "sps", - "zones" + "mgs_driven", + "sleds" ] }, "UplinkAddressConfig": { From 672aed56dd432611d64e28fcfb4ae34f81d0f09d Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 28 Aug 2025 13:14:59 -0400 Subject: [PATCH 11/38] TQ: cluster proptest Action/Event cleanup (#8899) This builds on #8874 To make using using tqdb easier, we put all necessary information inside events. This works because of deterministic replay using the same infrastucture as the proptest itself. To ensure we are truly deterministically replaying we must assert that any Event in our log has actually been generated when we go to apply it in tqdb. In the common case case this is equivalent to asserting that the `DeliveredEnvelope` is actually the same as the one pulled off the `bootstrap_network` vec during runs. In order to guarantee this a few changes needed to be made: 1. Determinism exists, except for in the crypto code because we don't seed the various random number generators, and therefore different key shares and rack secrets get generated in different runs. We could seed them, but then this makes it possible that we accidentally seed them in production code. More importantly for our current purposes, though, is that it's tedious and unnecessary. Instead we just implement comparison methods for messages that ignore things like key shares. This works fine because if the shares are not self-consistent with the rack secret and the parameters such as threshold that we don't change the crypto code itself will fail immediately. 2. We had a few actions that generated multiple events. Unfortunately, applying these events would result in mutating the test networks multiple times, resulting in a difference between which message was recorded and which was actually pulled out to be delivered. This was fixed by changing each action to only do a single thing at a time, like deliver one envelope or commit a configuration at one node. This also allows for finer grain interleaving and matches better with our TLA+ spec/model checking. This was observable by looking at the event logs. The change to an action typically generating a single event means, however, that we need to generate more actions per run to get the equivalent functionality. A single action won't result in N commits or N envelopes delivered. Therefore, each test run now needs to have significantly more actions generated. Unfortunately this can make test runs even longer. In order to help alleviate this we made three other changes: 1. We change the invariant checks to only look at nodes that could possibly be mutated by an event application. We call these the "affected nodes". This prevents looping over every node in each check. 2. We reduce the member universe, and hence the size of the state space. 3. We reduce the total number of test cases per run. --- Cargo.toml | 5 + trust-quorum/Cargo.toml | 1 + trust-quorum/src/configuration.rs | 24 +++ trust-quorum/src/lib.rs | 9 + trust-quorum/src/messages.rs | 32 ++++ trust-quorum/test-utils/Cargo.toml | 2 +- trust-quorum/test-utils/src/event.rs | 30 +++- trust-quorum/test-utils/src/nexus.rs | 4 +- trust-quorum/test-utils/src/state.rs | 23 ++- trust-quorum/tests/cluster.rs | 257 +++++++++++++++------------ 10 files changed, 258 insertions(+), 129 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0af37939fb2..495e504b205 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -842,6 +842,11 @@ opt-level = 3 [profile.dev.package.bootstore] opt-level = 3 +[profile.dev.package.trust-quorum] +opt-level = 3 +[profile.dev.package.trust-quorum-test-utils] +opt-level = 3 + # Crypto stuff always needs optimizations [profile.dev.package.sha3] opt-level = 3 diff --git a/trust-quorum/Cargo.toml b/trust-quorum/Cargo.toml index 0d6ac6863c0..a358b625fb5 100644 --- a/trust-quorum/Cargo.toml +++ b/trust-quorum/Cargo.toml @@ -52,3 +52,4 @@ trust-quorum-test-utils.workspace = true # subtle when we do this. On the other hand its very useful for testing and # debugging outside of production. danger_partial_eq_ct_wrapper = ["gfss/danger_partial_eq_ct_wrapper"] +testing = [] diff --git a/trust-quorum/src/configuration.rs b/trust-quorum/src/configuration.rs index 8b116e6f4a8..2418f8f14f0 100644 --- a/trust-quorum/src/configuration.rs +++ b/trust-quorum/src/configuration.rs @@ -13,6 +13,7 @@ use iddqd::{IdOrdItem, id_upcast}; use omicron_uuid_kinds::RackUuid; use secrecy::ExposeSecret; use serde::{Deserialize, Serialize}; +use serde_with::serde_as; use slog_error_chain::SlogInlineError; use std::collections::BTreeMap; @@ -31,6 +32,7 @@ pub enum ConfigurationError { /// The configuration for a given epoch. /// /// Only valid for non-lrtq configurations +#[serde_as] #[derive( Debug, Clone, @@ -53,6 +55,7 @@ pub struct Configuration { pub coordinator: PlatformId, // All members of the current configuration and the hash of their key shares + #[serde_as(as = "Vec<(_, _)>")] pub members: BTreeMap, /// The number of sleds required to reconstruct the rack secret @@ -121,4 +124,25 @@ impl Configuration { shares, )) } + + #[cfg(feature = "testing")] + pub fn equal_except_for_crypto_data(&self, other: &Self) -> bool { + let encrypted_rack_secrets_match = + match (&self.encrypted_rack_secrets, &other.encrypted_rack_secrets) + { + (None, None) => true, + (Some(_), Some(_)) => true, + _ => false, + }; + self.rack_id == other.rack_id + && self.epoch == other.epoch + && self.coordinator == other.coordinator + && self + .members + .keys() + .zip(other.members.keys()) + .all(|(id1, id2)| id1 == id2) + && self.threshold == other.threshold + && encrypted_rack_secrets_match + } } diff --git a/trust-quorum/src/lib.rs b/trust-quorum/src/lib.rs index bd0536f10d2..6880831dfbf 100644 --- a/trust-quorum/src/lib.rs +++ b/trust-quorum/src/lib.rs @@ -139,6 +139,15 @@ pub struct Envelope { pub msg: PeerMsg, } +#[cfg(feature = "testing")] +impl Envelope { + pub fn equal_except_for_crypto_data(&self, other: &Self) -> bool { + self.to == other.to + && self.from == other.from + && self.msg.equal_except_for_crypto_data(&other.msg) + } +} + /// Check if a received share is valid for a given configuration /// /// Return true if valid, false otherwise. diff --git a/trust-quorum/src/messages.rs b/trust-quorum/src/messages.rs index c373a4350d8..0d502bdcc50 100644 --- a/trust-quorum/src/messages.rs +++ b/trust-quorum/src/messages.rs @@ -30,6 +30,14 @@ pub struct PeerMsg { pub kind: PeerMsgKind, } +impl PeerMsg { + #[cfg(feature = "testing")] + pub fn equal_except_for_crypto_data(&self, other: &Self) -> bool { + self.rack_id == other.rack_id + && self.kind.equal_except_for_crypto_data(&other.kind) + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[cfg_attr(feature = "danger_partial_eq_ct_wrapper", derive(PartialEq, Eq))] pub enum PeerMsgKind { @@ -92,4 +100,28 @@ impl PeerMsgKind { Self::CommitAdvance(_) => "commit_advance", } } + + /// This is useful for our replay tests without having to worry about seeding + /// the various random number generators in our production code. + #[cfg(feature = "testing")] + pub fn equal_except_for_crypto_data(&self, other: &Self) -> bool { + match (self, other) { + ( + Self::Prepare { config: config1, .. }, + Self::Prepare { config: config2, .. }, + ) => config1.equal_except_for_crypto_data(config2), + (Self::Config(config1), Self::Config(config2)) => { + config1.equal_except_for_crypto_data(config2) + } + ( + Self::Share { epoch: epoch1, .. }, + Self::Share { epoch: epoch2, .. }, + ) => epoch1 == epoch2, + (Self::LrtqShare(_), Self::LrtqShare(_)) => true, + (Self::CommitAdvance(config1), Self::CommitAdvance(config2)) => { + config1.equal_except_for_crypto_data(config2) + } + (s, o) => s == o, + } + } } diff --git a/trust-quorum/test-utils/Cargo.toml b/trust-quorum/test-utils/Cargo.toml index f2701c471a2..0e20b7c8466 100644 --- a/trust-quorum/test-utils/Cargo.toml +++ b/trust-quorum/test-utils/Cargo.toml @@ -16,6 +16,6 @@ omicron-uuid-kinds.workspace = true serde.workspace = true serde_json.workspace = true slog.workspace = true -trust-quorum = { workspace = true, features = ["danger_partial_eq_ct_wrapper"] } +trust-quorum = { workspace = true, features = ["danger_partial_eq_ct_wrapper", "testing"] } omicron-workspace-hack.workspace = true diff --git a/trust-quorum/test-utils/src/event.rs b/trust-quorum/test-utils/src/event.rs index 6a573d95852..8ad64aff258 100644 --- a/trust-quorum/test-utils/src/event.rs +++ b/trust-quorum/test-utils/src/event.rs @@ -7,7 +7,7 @@ use crate::nexus::{NexusConfig, NexusReply}; use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; -use trust_quorum::{Epoch, PlatformId}; +use trust_quorum::{Envelope, Epoch, PlatformId}; /// An event that can be fed into our system under test (SUT) /// @@ -22,12 +22,30 @@ pub enum Event { }, AbortConfiguration(Epoch), SendNexusReplyOnUnderlay(NexusReply), - /// Pull an envelope off the bootstrap network and call `Node::handle` - DeliverEnvelope { - destination: PlatformId, - }, + /// Call `Node::handle` with the given Envelope. + /// + /// Since replay is deterministic, we actually know what this value is, + /// even though a prior event may not have yet sent the message. + DeliverEnvelope(Envelope), /// Pull a `NexusReply` off the underlay network and update the `NexusState` - DeliverNexusReply, + DeliverNexusReply(NexusReply), CommitConfiguration(PlatformId), Reconfigure(NexusConfig), } + +impl Event { + /// Return which nodes the event may have mutated. + pub fn affected_nodes(&self) -> Vec { + match self { + Self::InitialSetup { config, crashed_nodes, .. } => { + config.members.union(&crashed_nodes).cloned().collect() + } + Self::AbortConfiguration(_) => vec![], + Self::SendNexusReplyOnUnderlay(_) => vec![], + Self::DeliverEnvelope(envelope) => vec![envelope.to.clone()], + Self::DeliverNexusReply(_) => vec![], + Self::CommitConfiguration(id) => vec![id.clone()], + Self::Reconfigure(_) => vec![], + } + } +} diff --git a/trust-quorum/test-utils/src/nexus.rs b/trust-quorum/test-utils/src/nexus.rs index a64acb39d09..84237fe2cdf 100644 --- a/trust-quorum/test-utils/src/nexus.rs +++ b/trust-quorum/test-utils/src/nexus.rs @@ -113,7 +113,9 @@ pub struct NexusState { impl NexusState { #[allow(clippy::new_without_default)] pub fn new() -> NexusState { - NexusState { rack_id: RackUuid::new_v4(), configs: IdOrdMap::new() } + // We end up replaying events in tqdb, and can't use a random rack + // uuid. + NexusState { rack_id: RackUuid::nil(), configs: IdOrdMap::new() } } // Create a `ReconfigureMsg` for the latest nexus config diff --git a/trust-quorum/test-utils/src/state.rs b/trust-quorum/test-utils/src/state.rs index 007408e7aed..74ec59a3cbd 100644 --- a/trust-quorum/test-utils/src/state.rs +++ b/trust-quorum/test-utils/src/state.rs @@ -201,11 +201,11 @@ impl TqState { Event::SendNexusReplyOnUnderlay(reply) => { self.apply_event_send_nexus_reply_on_underlay(reply) } - Event::DeliverEnvelope { destination } => { - self.apply_event_deliver_envelope(destination); + Event::DeliverEnvelope(envelope) => { + self.apply_event_deliver_envelope(envelope); } - Event::DeliverNexusReply => { - self.apply_event_deliver_nexus_reply(); + Event::DeliverNexusReply(reply) => { + self.apply_event_deliver_nexus_reply(reply); } Event::CommitConfiguration(dest) => { self.apply_event_commit(dest); @@ -273,9 +273,10 @@ impl TqState { self.underlay_network.push(reply); } - fn apply_event_deliver_nexus_reply(&mut self) { + fn apply_event_deliver_nexus_reply(&mut self, recorded_reply: NexusReply) { let mut latest_config = self.nexus.latest_config_mut(); let reply = self.underlay_network.pop().expect("reply exists"); + assert_eq!(recorded_reply, reply); match reply { NexusReply::AckedPreparesFromCoordinator { epoch, acks } => { if epoch == latest_config.epoch { @@ -301,13 +302,21 @@ impl TqState { latest_config.op = NexusOp::Aborted; } - fn apply_event_deliver_envelope(&mut self, destination: PlatformId) { + fn apply_event_deliver_envelope(&mut self, recorded_envelope: Envelope) { let envelope = self .bootstrap_network - .get_mut(&destination) + .get_mut(&recorded_envelope.to) .unwrap() .pop() .expect("envelope in bootstrap network"); + + // The recorded envelope must be exactly the same as the one pulled + // off the bootstrap network. We ignore crypto data because we don't + // currently seed and track random number generators. For our purposes, + // validating the other fields is enough, because the test will fail if + // the crypto doesn't work and decrypt to the same plaintext. + assert!(recorded_envelope.equal_except_for_crypto_data(&envelope)); + let (node, ctx) = self.sut.nodes.get_mut(&envelope.to).expect("destination exists"); node.handle(ctx, envelope.from, envelope.msg); diff --git a/trust-quorum/tests/cluster.rs b/trust-quorum/tests/cluster.rs index c514e861f28..fe13bbf3967 100644 --- a/trust-quorum/tests/cluster.rs +++ b/trust-quorum/tests/cluster.rs @@ -12,7 +12,7 @@ use proptest::collection::{btree_set, size_range}; use proptest::prelude::*; use proptest::sample::Selector; use slog::{Logger, info, o}; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::BTreeSet; use test_strategy::{Arbitrary, proptest}; use trust_quorum::{ CoordinatorOperation, Epoch, NodeCommonCtx, PlatformId, Threshold, @@ -22,6 +22,7 @@ use trust_quorum_test_utils::{ Event, EventLog, nexus::{NexusConfig, NexusOp, NexusReply}, }; +use uuid::Uuid; /// The state of our test #[derive(Clone, Diffable)] @@ -93,13 +94,13 @@ impl TestState { for event in &events { event_log.record(event); } - let check_invariants = !events.is_empty(); + let skip_actions = events.is_empty(); for event in events { + let affected_nodes = event.affected_nodes(); self.tq_state.apply_event(event); + self.check_invariants(affected_nodes)?; } - if check_invariants { - self.check_invariants()?; - } else { + if skip_actions { self.skipped_actions += 1; } } @@ -108,15 +109,15 @@ impl TestState { fn action_to_events(&self, action: Action) -> Vec { match action { - Action::DeliverEnvelopes(indices) => { - self.action_to_events_deliver_envelopes(indices) + Action::DeliverEnvelope(selector) => { + self.action_to_events_deliver_envelope(selector) } Action::PollPrepareAcks => { self.action_to_events_poll_prepare_acks() } - Action::Commit(indices) => self.action_to_events_commit(indices), - Action::DeliverNexusReplies(n) => { - self.action_to_events_deliver_nexus_replies(n) + Action::Commit(index) => self.action_to_events_commit(index), + Action::DeliverNexusReply => { + self.action_to_events_deliver_nexus_reply() } Action::Reconfigure { num_added_nodes, @@ -132,35 +133,28 @@ impl TestState { } } - fn action_to_events_deliver_envelopes( + fn action_to_events_deliver_envelope( &self, - indices: Vec, + selector: Selector, ) -> Vec { let mut events = vec![]; - let destinations: Vec<_> = - self.tq_state.bootstrap_network.keys().cloned().collect(); - if destinations.is_empty() { + if self.tq_state.bootstrap_network.is_empty() { // nothing to do return events; } + let destination = + selector.select(self.tq_state.bootstrap_network.keys()); - // Add an event only if there is actually an envelope to send - let mut counts = BTreeMap::new(); - for index in indices { - let id = index.get(&destinations); - let count = counts.entry(id).or_insert(0usize); - *count += 1; - let num_envelopes = self - .tq_state - .bootstrap_network - .get(id) - .expect("destination exists") - .len(); - if *count <= num_envelopes { - events.push(Event::DeliverEnvelope { destination: id.clone() }); - } - } - + // We pop from the back and push on the front + let envelope = self + .tq_state + .bootstrap_network + .get(destination) + .expect("destination exists") + .last() + .expect("envelope exists") + .clone(); + events.push(Event::DeliverEnvelope(envelope)); events } @@ -223,7 +217,7 @@ impl TestState { events } - fn action_to_events_commit(&self, indices: Vec) -> Vec { + fn action_to_events_commit(&self, index: Index) -> Vec { let mut events = vec![]; let latest_config = self.tq_state.nexus.latest_config(); if latest_config.op != NexusOp::Committed { @@ -238,25 +232,17 @@ impl TestState { return events; } - // De-duplicate the Index->PlatformId mapping - let mut nodes: BTreeSet = BTreeSet::new(); - for index in indices { - let id = *index.get(&committable); - nodes.insert(id.clone()); - } - for node in nodes { - events.push(Event::CommitConfiguration(node)); - } + let id = index.get(&committable); + events.push(Event::CommitConfiguration((*id).clone())); events } - fn action_to_events_deliver_nexus_replies(&self, n: usize) -> Vec { - let mut events = vec![]; - let n = usize::min(n, self.tq_state.underlay_network.len()); - for _ in 0..n { - events.push(Event::DeliverNexusReply); + fn action_to_events_deliver_nexus_reply(&self) -> Vec { + if let Some(reply) = self.tq_state.underlay_network.last() { + vec![Event::DeliverNexusReply(reply.clone())] + } else { + vec![] } - events } fn action_to_events_reconfigure( @@ -392,13 +378,25 @@ impl TestState { /// /// We typically only check the current configuration as the checks hold /// inductively as configurations advance. - fn check_invariants(&self) -> Result<(), TestCaseError> { - self.invariant_all_nodes_have_same_configuration_per_epoch()?; - self.invariant_nodes_have_prepared_if_coordinator_has_acks()?; - self.invariant_nodes_have_committed_if_nexus_has_acks()?; - self.invariant_nodes_not_coordinating_and_computing_key_share_simultaneously()?; - self.invariant_no_alarms()?; - self.invariant_expunged_nodes_have_actually_been_expunged()?; + /// + /// Furthermore, we only test the modified node where we can to prevent + /// having to loop over all nodes when they haven't changed. + fn check_invariants( + &self, + affected_nodes: Vec, + ) -> Result<(), TestCaseError> { + self.invariant_all_nodes_have_same_configuration_per_epoch( + &affected_nodes, + )?; + self.invariant_nodes_have_prepared_if_coordinator_has_acks( + &affected_nodes, + )?; + self.invariant_nodes_have_committed_if_nexus_has_acks(&affected_nodes)?; + self.invariant_nodes_not_coordinating_and_computing_key_share_simultaneously(&affected_nodes)?; + self.invariant_no_alarms(&affected_nodes)?; + self.invariant_expunged_nodes_have_actually_been_expunged( + &affected_nodes, + )?; Ok(()) } @@ -408,25 +406,30 @@ impl TestState { /// * have no committed configurations fn invariant_expunged_nodes_have_actually_been_expunged( &self, + affected_nodes: &[PlatformId], ) -> Result<(), TestCaseError> { - for id in &self.tq_state.expunged { - let (_, ctx) = - self.tq_state.sut.nodes.get(id).expect("node exists"); - let ps = ctx.persistent_state(); - if ps.is_expunged() { - continue; - } - if let Some(config) = ps.latest_committed_configuration() { - let nexus_config = self - .tq_state - .nexus - .configs - .get(&config.epoch) - .expect("config exists"); - prop_assert!(config.members.contains_key(ctx.platform_id())); - prop_assert!(nexus_config.members.contains(ctx.platform_id())); - } else { - continue; + for id in affected_nodes { + if self.tq_state.expunged.contains(id) { + let (_, ctx) = + self.tq_state.sut.nodes.get(id).expect("node exists"); + let ps = ctx.persistent_state(); + if ps.is_expunged() { + continue; + } + if let Some(config) = ps.latest_committed_configuration() { + let nexus_config = self + .tq_state + .nexus + .configs + .get(&config.epoch) + .expect("config exists"); + prop_assert!( + config.members.contains_key(ctx.platform_id()) + ); + prop_assert!( + nexus_config.members.contains(ctx.platform_id()) + ); + } } } @@ -439,8 +442,11 @@ impl TestState { /// Sometimes nodes may not have a configuration for a given epoch. fn invariant_all_nodes_have_same_configuration_per_epoch( &self, + affected_nodes: &[PlatformId], ) -> Result<(), TestCaseError> { - for (id, (_, ctx)) in &self.tq_state.sut.nodes { + for id in affected_nodes { + let (_, ctx) = + self.tq_state.sut.nodes.get(id).expect("node exists"); let diff = self .tq_state .all_coordinated_configs @@ -466,6 +472,7 @@ impl TestState { /// only have acknowledgments from nodes that have seen the `Prepare`. fn invariant_nodes_have_prepared_if_coordinator_has_acks( &self, + affected_nodes: &[PlatformId], ) -> Result<(), TestCaseError> { let (acked, epoch) = { let latest_config = self.tq_state.nexus.latest_config(); @@ -494,12 +501,15 @@ impl TestState { } (acked, latest_config.epoch) }; - // Make sure the coordinator actually is coordinating for this epoch - for id in acked { - let (_, ctx) = - self.tq_state.sut.nodes.get(&id).expect("node exists"); - prop_assert!(ctx.persistent_state().has_prepared(epoch)); + // If any affected node was one of the acked nodes, then it should have + // prepared for this epoch. + for id in affected_nodes { + if acked.contains(id) { + let (_, ctx) = + self.tq_state.sut.nodes.get(&id).expect("node exists"); + prop_assert!(ctx.persistent_state().has_prepared(epoch)); + } } Ok(()) @@ -513,18 +523,21 @@ impl TestState { /// configuration and share for this epoch. fn invariant_nodes_have_committed_if_nexus_has_acks( &self, + affected_nodes: &[PlatformId], ) -> Result<(), TestCaseError> { let latest_config = self.tq_state.nexus.latest_config(); if latest_config.op != NexusOp::Committed { return Ok(()); } - for id in &latest_config.committed_members { - let (_, ctx) = - self.tq_state.sut.nodes.get(&id).expect("node exists"); - let ps = ctx.persistent_state(); - prop_assert!(ps.commits.contains(&latest_config.epoch)); - prop_assert!(ps.has_prepared(latest_config.epoch)); + for id in affected_nodes { + if latest_config.committed_members.contains(id) { + let (_, ctx) = + self.tq_state.sut.nodes.get(id).expect("node exists"); + let ps = ctx.persistent_state(); + prop_assert!(ps.commits.contains(&latest_config.epoch)); + prop_assert!(ps.has_prepared(latest_config.epoch)); + } } Ok(()) @@ -541,8 +554,11 @@ impl TestState { // key share for the latest committed configuration that they know of. fn invariant_nodes_not_coordinating_and_computing_key_share_simultaneously( &self, + affected_nodes: &[PlatformId], ) -> Result<(), TestCaseError> { - for (id, (node, _)) in &self.tq_state.sut.nodes { + for id in affected_nodes { + let (node, _) = + self.tq_state.sut.nodes.get(id).expect("node exists"); prop_assert!( !(node.get_coordinator_state().is_some() && node.is_computing_key_share()), @@ -554,9 +570,14 @@ impl TestState { Ok(()) } - // Ensure there has been no alarm at any node - fn invariant_no_alarms(&self) -> Result<(), TestCaseError> { - for (id, (_, ctx)) in &self.tq_state.sut.nodes { + // Ensure there has been no alarm at any affected node + fn invariant_no_alarms( + &self, + affected_nodes: &[PlatformId], + ) -> Result<(), TestCaseError> { + for id in affected_nodes { + let (_, ctx) = + self.tq_state.sut.nodes.get(id).expect("node exists"); let alarms = ctx.alarms(); prop_assert!( alarms.is_empty(), @@ -573,17 +594,12 @@ impl TestState { #[derive(Debug, Arbitrary)] #[allow(clippy::large_enum_variant)] pub enum Action { - /// For each indexed member deliver an in-flight bootstrap network msg if - /// there is one. + /// Deliver an in-flight bootstrap network msg if there is one. /// - /// The indexes here are used to index into the `PlatformIds` of + /// The selector here is used to index into the `PlatformIds` of /// `test_state.bootstrap_network`. - /// - /// We may deliver more than one message to each member. - #[weight(4)] - DeliverEnvelopes( - #[any(size_range(1..MAX_DELIVERED_ENVELOPES).lift())] Vec, - ), + #[weight(30)] + DeliverEnvelope(Selector), /// Have Nexus poll the coordinator for the latest configuration if it is /// still being prepared. @@ -592,19 +608,19 @@ pub enum Action { /// simulates recording this information in CRDB. If Nexus has witnessed /// that enough nodes have acked prepares then it changes the config /// operation to committed. - #[weight(4)] + #[weight(10)] PollPrepareAcks, /// If the current configuration at nexus is marked `NexusOp::Committed` - /// then call `Node::commit_configuration` for each indexed + /// then call `Node::commit_configuration` for the indexed /// node in `NexusConfig::prepared_members` that is not also in /// `NexusConfig::committed_members`. - #[weight(4)] - Commit(#[any(size_range(1..MAX_CONCURRENT_COMMITS).lift())] Vec), + #[weight(10)] + Commit(Index), /// Deliver in-flight messages to Nexus from the underlay network - #[weight(4)] - DeliverNexusReplies(#[strategy(1..10usize)] usize), + #[weight(10)] + DeliverNexusReply, /// Generate a new configuration by adding a number of *new* (non-expunged) /// nodes to the cluster from `member_universe` and removing the specific @@ -621,15 +637,25 @@ pub enum Action { } const MIN_CLUSTER_SIZE: usize = 3; -const MAX_CLUSTER_SIZE: usize = 20; -const MEMBER_UNIVERSE_SIZE: usize = 40; +const MAX_CLUSTER_SIZE: usize = 12; +const MEMBER_UNIVERSE_SIZE: usize = 16; const MAX_INITIAL_DOWN_NODES: usize = 5; const MAX_ADDED_NODES: usize = 5; const MAX_REMOVED_NODES: usize = 3; -const MAX_DELIVERED_ENVELOPES: usize = 20; -const MAX_CONCURRENT_COMMITS: usize = 10; -const MIN_ACTIONS: usize = 100; -const MAX_ACTIONS: usize = 1000; + +// This is how long each test case is +const MIN_ACTIONS: usize = 300; +const MAX_ACTIONS: usize = 2000; + +// This is the number of test cases to run. We prefer to run longer tests with +// fewer test cases to maximize the results of each one having an interesting +// interleaving while stepping through multiple configurations. This is +// particularly true since we generate actions before tests run, and therefore +// don't know which actions are valid. +// +// We can always set this much higher for periodic runs, but we want to limit it +// primarily for CI and general testing. +const MAX_TEST_CASES: u32 = 100; /// Information about configurations used at test generation time #[derive(Debug, Clone, Arbitrary)] @@ -666,11 +692,14 @@ pub struct TestInput { actions: Vec, } -#[proptest] +#[proptest(cases = MAX_TEST_CASES)] fn test_trust_quorum_protocol(input: TestInput) { - let logctx = test_setup_log("test_trust_quorum_protocol"); - let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); - let event_log_path = parent_dir.join(format!("{prefix}-events.json")); + // We add a uuid so that we can match log files and event traces + // across multiple proptest runs. + let test_name = format!("test_trust_quorum_protocol_{}", Uuid::new_v4()); + let logctx = test_setup_log(test_name.as_str()); + let (parent_dir, _) = log_prefix_for_test(logctx.test_name()); + let event_log_path = parent_dir.join(format!("{test_name}.events.json")); let mut event_log = EventLog::new(&event_log_path); let log = logctx.log.new(o!("component" => "tq-proptest")); From 7c98617ae258ce1444b6e4db38c11318b4c792c6 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Thu, 28 Aug 2025 15:31:13 -0400 Subject: [PATCH 12/38] [reconfigurator-cli] DNS diffs: summarize unchanged names (#8923) This pretty significantly improves the signal-to-noise ratio in reconfigurator-cli tests. Probably should've done this a while ago! --- .../tests/output/cmds-example-stdout | 135 +- ...ds-expunge-newly-added-external-dns-stdout | 602 +--- ...ds-expunge-newly-added-internal-dns-stdout | 610 +--- .../output/cmds-host-phase-2-source-stdout | 496 +-- .../output/cmds-mupdate-update-flow-stdout | 971 +---- .../output/cmds-noop-image-source-stdout | 272 +- .../tests/output/cmds-set-mgs-updates-stdout | 792 +---- .../cmds-set-remove-mupdate-override-stdout | 50 +- .../tests/output/cmds-set-zone-images-stdout | 192 +- .../tests/output/cmds-target-release-stdout | 3168 +---------------- internal-dns/types/src/diff.rs | 20 +- .../types/tests/output/diff_example_empty.out | 5 +- 12 files changed, 131 insertions(+), 7182 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout index 01284a774e8..92232e4b5e6 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout @@ -641,18 +641,15 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled (records: 1) - AAAA fd00:1122:3344:101::1 + name: _internal-ntp._tcp (records: 1) + SRV port 123 b3c9c041-d2f0-4767-bdaf-0e52e9d7a013.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 1) - SRV port 12348 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled.control-plane.oxide.internal + name: b3c9c041-d2f0-4767-bdaf-0e52e9d7a013.host (records: 1) + AAAA fd00:1122:3344:101::21 + unchanged names: 2 (records: 2) external DNS: DNS zone: "oxide.example" (unchanged) - name: example-silo.sys (records: 0) + unchanged names: 1 (records: 0) @@ -721,18 +718,15 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled (records: 1) - AAAA fd00:1122:3344:101::1 + name: _internal-ntp._tcp (records: 1) + SRV port 123 b3c9c041-d2f0-4767-bdaf-0e52e9d7a013.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 1) - SRV port 12348 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled.control-plane.oxide.internal + name: b3c9c041-d2f0-4767-bdaf-0e52e9d7a013.host (records: 1) + AAAA fd00:1122:3344:101::21 + unchanged names: 2 (records: 2) external DNS: DNS zone: "oxide.example" (unchanged) - name: example-silo.sys (records: 0) + unchanged names: 1 (records: 0) @@ -801,18 +795,15 @@ to: blueprint 02697f74-b14a-4418-90f0-c28b2a3a6aa9 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: _internal-ntp._tcp (records: 1) - SRV port 123 b3c9c041-d2f0-4767-bdaf-0e52e9d7a013.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 1) - SRV port 12348 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled.control-plane.oxide.internal - name: b3c9c041-d2f0-4767-bdaf-0e52e9d7a013.host (records: 1) - AAAA fd00:1122:3344:101::21 + unchanged names: 2 (records: 2) external DNS: DNS zone: "oxide.example" (unchanged) - name: example-silo.sys (records: 0) + unchanged names: 1 (records: 0) @@ -1678,141 +1669,31 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 03ba2f16-8b5e-41c4-906d-5a7a0af460ef.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 056fd45c-2498-4ce4-8c97-79dfc9321080.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 0ab3dbe9-8387-4600-b097-cb71ee91ee83.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 17ed8f62-8cd0-416c-86d1-5eaf6f7358ef.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 18b3781d-571b-4d7c-b65d-18a452e5a64a.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 2eb69596-f081-4e2d-9425-9994926e0832.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 32d8d836-4d8a-4e54-8fa9-f31d79c42646.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: 397841de-588e-4fe7-94ee-a944b7340074.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: 4692cc31-6eb6-437c-9634-9688663d06ae.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 4a81477d-5a28-4246-93ef-01db835a5c5e.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: 534dad87-2b4c-4250-8ad9-2ec1d93b2994.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 587da9e8-8fc0-4854-b585-070741a7b00d.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 5954ecdc-c5ad-49a6-9c5f-4689f783ba83.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 6c2a57b0-2de0-4409-a6b9-c9aa5614eefa.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 83ac30bd-ae85-4d6b-84c3-add02d3214cf.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99a750b2-724d-4828-ae5f-0df1aad90166.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 17ed8f62-8cd0-416c-86d1-5eaf6f7358ef.host.control-plane.oxide.internal - SRV port 17000 534dad87-2b4c-4250-8ad9-2ec1d93b2994.host.control-plane.oxide.internal - SRV port 17000 dc2666e6-4c3e-4b8e-99bc-bcdb5f8986e1.host.control-plane.oxide.internal - name: _crucible._tcp.397841de-588e-4fe7-94ee-a944b7340074 (records: 1) - SRV port 32345 397841de-588e-4fe7-94ee-a944b7340074.host.control-plane.oxide.internal - name: _crucible._tcp.4a81477d-5a28-4246-93ef-01db835a5c5e (records: 1) - SRV port 32345 4a81477d-5a28-4246-93ef-01db835a5c5e.host.control-plane.oxide.internal - name: _crucible._tcp.6c2a57b0-2de0-4409-a6b9-c9aa5614eefa (records: 1) - SRV port 32345 6c2a57b0-2de0-4409-a6b9-c9aa5614eefa.host.control-plane.oxide.internal - name: _crucible._tcp.99a750b2-724d-4828-ae5f-0df1aad90166 (records: 1) - SRV port 32345 99a750b2-724d-4828-ae5f-0df1aad90166.host.control-plane.oxide.internal - name: _crucible._tcp.b61b7c3c-d665-44b3-9312-794aa81c59de (records: 1) - SRV port 32345 b61b7c3c-d665-44b3-9312-794aa81c59de.host.control-plane.oxide.internal - name: _crucible._tcp.b957d6cf-f7b2-4bee-9928-c5fde8c59e04 (records: 1) - SRV port 32345 b957d6cf-f7b2-4bee-9928-c5fde8c59e04.host.control-plane.oxide.internal - name: _crucible._tcp.e246f5e3-0650-4afc-860f-ee7114d309c5 (records: 1) - SRV port 32345 e246f5e3-0650-4afc-860f-ee7114d309c5.host.control-plane.oxide.internal - name: _crucible._tcp.e668d83e-a28c-42dc-b574-467e57403cc1 (records: 1) - SRV port 32345 e668d83e-a28c-42dc-b574-467e57403cc1.host.control-plane.oxide.internal - name: _crucible._tcp.f4dc5b5d-6eb6-40a9-a079-971eca862285 (records: 1) - SRV port 32345 f4dc5b5d-6eb6-40a9-a079-971eca862285.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 03ba2f16-8b5e-41c4-906d-5a7a0af460ef.host.control-plane.oxide.internal - SRV port 5353 5954ecdc-c5ad-49a6-9c5f-4689f783ba83.host.control-plane.oxide.internal - SRV port 5353 83ac30bd-ae85-4d6b-84c3-add02d3214cf.host.control-plane.oxide.internal * name: _internal-ntp._tcp (records: 3 -> 2) - SRV port 123 0ab3dbe9-8387-4600-b097-cb71ee91ee83.host.control-plane.oxide.internal - SRV port 123 18b3781d-571b-4d7c-b65d-18a452e5a64a.host.control-plane.oxide.internal - SRV port 123 ac5bb28e-91d5-42f3-a57a-d84e1c414c17.host.control-plane.oxide.internal + SRV port 123 18b3781d-571b-4d7c-b65d-18a452e5a64a.host.control-plane.oxide.internal + SRV port 123 ac5bb28e-91d5-42f3-a57a-d84e1c414c17.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 4692cc31-6eb6-437c-9634-9688663d06ae.host.control-plane.oxide.internal - SRV port 5353 587da9e8-8fc0-4854-b585-070741a7b00d.host.control-plane.oxide.internal - SRV port 5353 ffbf02f0-261d-4723-b613-eb861245acbd.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 056fd45c-2498-4ce4-8c97-79dfc9321080.host.control-plane.oxide.internal - SRV port 12221 a67ac9b3-427b-4ea6-a891-1c76a22720f5.host.control-plane.oxide.internal - SRV port 12221 d856156c-2bc2-41ad-beef-7ca1da5802d3.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2eb69596-f081-4e2d-9425-9994926e0832.sled.control-plane.oxide.internal - SRV port 12348 32d8d836-4d8a-4e54-8fa9-f31d79c42646.sled.control-plane.oxide.internal - SRV port 12348 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled.control-plane.oxide.internal - name: a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: a67ac9b3-427b-4ea6-a891-1c76a22720f5.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: ac5bb28e-91d5-42f3-a57a-d84e1c414c17.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: b61b7c3c-d665-44b3-9312-794aa81c59de.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: b957d6cf-f7b2-4bee-9928-c5fde8c59e04.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: d856156c-2bc2-41ad-beef-7ca1da5802d3.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: dc2666e6-4c3e-4b8e-99bc-bcdb5f8986e1.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: e246f5e3-0650-4afc-860f-ee7114d309c5.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: e668d83e-a28c-42dc-b574-467e57403cc1.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: f4dc5b5d-6eb6-40a9-a079-971eca862285.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: ffbf02f0-261d-4723-b613-eb861245acbd.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 43 (records: 55) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.4 - A 192.0.2.2 - A 192.0.2.3 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout index fea4459e751..cf22a460ed9 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout @@ -471,208 +471,15 @@ to: blueprint 366b0b68-d80e-4bc1-abd3-dc69837847e0 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 063ea59f-076b-48d6-a40a-e3980c37414e.host (records: 1) - AAAA fd00:1122:3344:103::2c - name: 075cf83e-1f6a-4320-820a-95e56fa98f6f.host (records: 1) - AAAA fd00:1122:3344:103::29 - name: 09523a0c-27f2-4ce1-a4cb-004f5ff5c4ef.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: 09d83e68-6773-4837-8575-6a169ec608fb.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 0b4ec47a-6710-498c-a449-e3b461421c62.host (records: 1) - AAAA fd00:1122:3344:103::2a - name: 11bdc7ae-40b8-475b-916a-95159ef5a240.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: 13b78322-24bd-4d90-8209-31a391ce21e8.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 18edee19-6ad3-4741-8467-93f0a7241484.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 2522d081-993f-4dcf-a011-63b4e9d5c66e.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 3799d2f7-88d2-4e70-8727-c3000c9b8c69.host (records: 1) - AAAA fd00:1122:3344:102::2d - name: 395e53ba-b6ef-4072-861d-ec6367275ab5.host (records: 1) - AAAA fd00:1122:3344:103::2d - name: 3acd2d8f-cdb0-4d46-8e2b-2dd248fd12c3.host (records: 1) - AAAA fd00:1122:3344:102::2a - name: 3d60fafa-2b0d-4969-87ea-88aa72de90c1.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 40756b95-495c-4eff-b148-3d71dadb9bce.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 4631c4ed-9ef2-4e0a-b435-91ffad32ca29.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 4bd59032-20b7-41b4-a85a-65c69fbfa654.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: 50502496-9a3e-49c4-8024-65f7a0ddac22.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 57e50d80-ce47-4e2f-8851-da0a0e7730ae.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: 5cfa4c8c-14ff-4718-aee6-84de92515a30.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 5d389008-0709-41e2-b6b5-ce65d098a211.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 5dcb81fc-7d8e-46cd-9b68-1ed176d79026.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: 6209097c-24e4-44d7-9afb-d3a5caeb1637.host (records: 1) - AAAA fd00:1122:3344:103::2b - name: 69f9e32e-e073-4157-bc9d-d07f878b1ca9.host (records: 1) - AAAA fd00:1122:3344:103::2e - name: 6c671c28-3cf9-44ef-b2c1-38f180161ac0.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 711ac7f8-d19e-4572-bdb9-e9b50f6e362a.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: 73751755-adcd-4a1d-98a1-579243c4f59b.host (records: 1) - AAAA fd00:1122:3344:102::29 - name: 7380b0c6-5c23-4041-8b2c-8be4d60ff503.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 74809be0-66e4-4aea-a771-adbd72304f1a.host (records: 1) - AAAA fd00:1122:3344:102::2b - name: 783f4dca-5916-4abf-9fe7-e7207ab4c2e7.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 7dda6d17-0de8-414c-b796-310e2bc73ec9.host (records: 1) - AAAA fd00:1122:3344:102::2e - name: 8429c772-07e8-40a6-acde-2ed47d16cf84.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 88488dad-b6d0-4074-8409-727f223e218d.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: 8c0a1969-15b6-4165-ba6d-a27c24151037.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 9161edf7-ccb0-42d9-bd49-47e093e12548.host (records: 1) - AAAA fd00:1122:3344:102::2c - name: 97cb7367-150d-4807-9c70-562e8105b273.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: 9dc50690-f9bf-4520-bf80-051d0f465c2c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 3d60fafa-2b0d-4969-87ea-88aa72de90c1.host.control-plane.oxide.internal - SRV port 17000 e7835fd2-0ff2-4809-91da-dec23e98dc96.host.control-plane.oxide.internal - SRV port 17000 edebb2c6-d4b2-4a35-816c-17bfd0efa13f.host.control-plane.oxide.internal - name: _crucible._tcp.063ea59f-076b-48d6-a40a-e3980c37414e (records: 1) - SRV port 32345 063ea59f-076b-48d6-a40a-e3980c37414e.host.control-plane.oxide.internal - name: _crucible._tcp.075cf83e-1f6a-4320-820a-95e56fa98f6f (records: 1) - SRV port 32345 075cf83e-1f6a-4320-820a-95e56fa98f6f.host.control-plane.oxide.internal - name: _crucible._tcp.09523a0c-27f2-4ce1-a4cb-004f5ff5c4ef (records: 1) - SRV port 32345 09523a0c-27f2-4ce1-a4cb-004f5ff5c4ef.host.control-plane.oxide.internal - name: _crucible._tcp.09d83e68-6773-4837-8575-6a169ec608fb (records: 1) - SRV port 32345 09d83e68-6773-4837-8575-6a169ec608fb.host.control-plane.oxide.internal - name: _crucible._tcp.0b4ec47a-6710-498c-a449-e3b461421c62 (records: 1) - SRV port 32345 0b4ec47a-6710-498c-a449-e3b461421c62.host.control-plane.oxide.internal - name: _crucible._tcp.13b78322-24bd-4d90-8209-31a391ce21e8 (records: 1) - SRV port 32345 13b78322-24bd-4d90-8209-31a391ce21e8.host.control-plane.oxide.internal - name: _crucible._tcp.18edee19-6ad3-4741-8467-93f0a7241484 (records: 1) - SRV port 32345 18edee19-6ad3-4741-8467-93f0a7241484.host.control-plane.oxide.internal - name: _crucible._tcp.2522d081-993f-4dcf-a011-63b4e9d5c66e (records: 1) - SRV port 32345 2522d081-993f-4dcf-a011-63b4e9d5c66e.host.control-plane.oxide.internal - name: _crucible._tcp.3799d2f7-88d2-4e70-8727-c3000c9b8c69 (records: 1) - SRV port 32345 3799d2f7-88d2-4e70-8727-c3000c9b8c69.host.control-plane.oxide.internal - name: _crucible._tcp.395e53ba-b6ef-4072-861d-ec6367275ab5 (records: 1) - SRV port 32345 395e53ba-b6ef-4072-861d-ec6367275ab5.host.control-plane.oxide.internal - name: _crucible._tcp.3acd2d8f-cdb0-4d46-8e2b-2dd248fd12c3 (records: 1) - SRV port 32345 3acd2d8f-cdb0-4d46-8e2b-2dd248fd12c3.host.control-plane.oxide.internal - name: _crucible._tcp.40756b95-495c-4eff-b148-3d71dadb9bce (records: 1) - SRV port 32345 40756b95-495c-4eff-b148-3d71dadb9bce.host.control-plane.oxide.internal - name: _crucible._tcp.4631c4ed-9ef2-4e0a-b435-91ffad32ca29 (records: 1) - SRV port 32345 4631c4ed-9ef2-4e0a-b435-91ffad32ca29.host.control-plane.oxide.internal - name: _crucible._tcp.4bd59032-20b7-41b4-a85a-65c69fbfa654 (records: 1) - SRV port 32345 4bd59032-20b7-41b4-a85a-65c69fbfa654.host.control-plane.oxide.internal - name: _crucible._tcp.50502496-9a3e-49c4-8024-65f7a0ddac22 (records: 1) - SRV port 32345 50502496-9a3e-49c4-8024-65f7a0ddac22.host.control-plane.oxide.internal - name: _crucible._tcp.57e50d80-ce47-4e2f-8851-da0a0e7730ae (records: 1) - SRV port 32345 57e50d80-ce47-4e2f-8851-da0a0e7730ae.host.control-plane.oxide.internal - name: _crucible._tcp.5cfa4c8c-14ff-4718-aee6-84de92515a30 (records: 1) - SRV port 32345 5cfa4c8c-14ff-4718-aee6-84de92515a30.host.control-plane.oxide.internal - name: _crucible._tcp.5dcb81fc-7d8e-46cd-9b68-1ed176d79026 (records: 1) - SRV port 32345 5dcb81fc-7d8e-46cd-9b68-1ed176d79026.host.control-plane.oxide.internal - name: _crucible._tcp.6209097c-24e4-44d7-9afb-d3a5caeb1637 (records: 1) - SRV port 32345 6209097c-24e4-44d7-9afb-d3a5caeb1637.host.control-plane.oxide.internal - name: _crucible._tcp.69f9e32e-e073-4157-bc9d-d07f878b1ca9 (records: 1) - SRV port 32345 69f9e32e-e073-4157-bc9d-d07f878b1ca9.host.control-plane.oxide.internal - name: _crucible._tcp.73751755-adcd-4a1d-98a1-579243c4f59b (records: 1) - SRV port 32345 73751755-adcd-4a1d-98a1-579243c4f59b.host.control-plane.oxide.internal - name: _crucible._tcp.74809be0-66e4-4aea-a771-adbd72304f1a (records: 1) - SRV port 32345 74809be0-66e4-4aea-a771-adbd72304f1a.host.control-plane.oxide.internal - name: _crucible._tcp.7dda6d17-0de8-414c-b796-310e2bc73ec9 (records: 1) - SRV port 32345 7dda6d17-0de8-414c-b796-310e2bc73ec9.host.control-plane.oxide.internal - name: _crucible._tcp.9161edf7-ccb0-42d9-bd49-47e093e12548 (records: 1) - SRV port 32345 9161edf7-ccb0-42d9-bd49-47e093e12548.host.control-plane.oxide.internal - name: _crucible._tcp.97cb7367-150d-4807-9c70-562e8105b273 (records: 1) - SRV port 32345 97cb7367-150d-4807-9c70-562e8105b273.host.control-plane.oxide.internal - name: _crucible._tcp.b8caf10d-a07d-4b07-a24c-79b433dcfeb9 (records: 1) - SRV port 32345 b8caf10d-a07d-4b07-a24c-79b433dcfeb9.host.control-plane.oxide.internal - name: _crucible._tcp.b9bfd143-8075-4574-9ad2-312c0650401a (records: 1) - SRV port 32345 b9bfd143-8075-4574-9ad2-312c0650401a.host.control-plane.oxide.internal - name: _crucible._tcp.c6b14d88-dcde-4563-b74f-514fe1f70ac5 (records: 1) - SRV port 32345 c6b14d88-dcde-4563-b74f-514fe1f70ac5.host.control-plane.oxide.internal - name: _crucible._tcp.eb00a322-5216-48ed-9b09-a74e99a75f60 (records: 1) - SRV port 32345 eb00a322-5216-48ed-9b09-a74e99a75f60.host.control-plane.oxide.internal - name: _crucible._tcp.ec1e8677-dfa7-4d4d-ba15-5efbef6c0fbd (records: 1) - SRV port 32345 ec1e8677-dfa7-4d4d-ba15-5efbef6c0fbd.host.control-plane.oxide.internal * name: _external-dns._tcp (records: 3 -> 2) - SRV port 5353 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host.control-plane.oxide.internal - SRV port 5353 8429c772-07e8-40a6-acde-2ed47d16cf84.host.control-plane.oxide.internal - SRV port 5353 8c0a1969-15b6-4165-ba6d-a27c24151037.host.control-plane.oxide.internal + SRV port 5353 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host.control-plane.oxide.internal + SRV port 5353 8c0a1969-15b6-4165-ba6d-a27c24151037.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 5d389008-0709-41e2-b6b5-ce65d098a211.host.control-plane.oxide.internal - SRV port 123 6c671c28-3cf9-44ef-b2c1-38f180161ac0.host.control-plane.oxide.internal - SRV port 123 b84beeec-f02a-44b1-9f78-80f161eb3be7.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 783f4dca-5916-4abf-9fe7-e7207ab4c2e7.host.control-plane.oxide.internal - SRV port 5353 88488dad-b6d0-4074-8409-727f223e218d.host.control-plane.oxide.internal - SRV port 5353 e7711206-3b44-4c8c-b882-3c530cb7c02f.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 11bdc7ae-40b8-475b-916a-95159ef5a240.host.control-plane.oxide.internal - SRV port 12221 7380b0c6-5c23-4041-8b2c-8be4d60ff503.host.control-plane.oxide.internal - SRV port 12221 a31945b4-3d49-40dc-a6f8-d6d8657e4880.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 711ac7f8-d19e-4572-bdb9-e9b50f6e362a.sled.control-plane.oxide.internal - SRV port 12348 9dc50690-f9bf-4520-bf80-051d0f465c2c.sled.control-plane.oxide.internal - SRV port 12348 a88790de-5962-4871-8686-61c1fd5b7094.sled.control-plane.oxide.internal - name: a31945b4-3d49-40dc-a6f8-d6d8657e4880.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: a88790de-5962-4871-8686-61c1fd5b7094.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: b84beeec-f02a-44b1-9f78-80f161eb3be7.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: b8caf10d-a07d-4b07-a24c-79b433dcfeb9.host (records: 1) - AAAA fd00:1122:3344:103::2f - name: b9bfd143-8075-4574-9ad2-312c0650401a.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: c6b14d88-dcde-4563-b74f-514fe1f70ac5.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: e7711206-3b44-4c8c-b882-3c530cb7c02f.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: e7835fd2-0ff2-4809-91da-dec23e98dc96.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: eb00a322-5216-48ed-9b09-a74e99a75f60.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: ec1e8677-dfa7-4d4d-ba15-5efbef6c0fbd.host (records: 1) - AAAA fd00:1122:3344:103::28 - name: edebb2c6-d4b2-4a35-816c-17bfd0efa13f.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 91 (records: 103) external DNS: * DNS zone: "oxide.example": @@ -682,10 +489,6 @@ external DNS: - NS ns3.oxide.example + NS ns1.oxide.example + NS ns2.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 * name: ns1 (records: 1 -> 1) - A 198.51.100.1 + A 198.51.100.2 @@ -694,6 +497,7 @@ external DNS: + A 198.51.100.3 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 1 (records: 3) @@ -1171,208 +975,15 @@ to: blueprint 9c998c1d-1a7b-440a-ae0c-40f781dea6e2 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 063ea59f-076b-48d6-a40a-e3980c37414e.host (records: 1) - AAAA fd00:1122:3344:103::2c - name: 075cf83e-1f6a-4320-820a-95e56fa98f6f.host (records: 1) - AAAA fd00:1122:3344:103::29 - name: 09523a0c-27f2-4ce1-a4cb-004f5ff5c4ef.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: 09d83e68-6773-4837-8575-6a169ec608fb.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 0b4ec47a-6710-498c-a449-e3b461421c62.host (records: 1) - AAAA fd00:1122:3344:103::2a - name: 11bdc7ae-40b8-475b-916a-95159ef5a240.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: 13b78322-24bd-4d90-8209-31a391ce21e8.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 18edee19-6ad3-4741-8467-93f0a7241484.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 2522d081-993f-4dcf-a011-63b4e9d5c66e.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 3799d2f7-88d2-4e70-8727-c3000c9b8c69.host (records: 1) - AAAA fd00:1122:3344:102::2d - name: 395e53ba-b6ef-4072-861d-ec6367275ab5.host (records: 1) - AAAA fd00:1122:3344:103::2d - name: 3acd2d8f-cdb0-4d46-8e2b-2dd248fd12c3.host (records: 1) - AAAA fd00:1122:3344:102::2a - name: 3d60fafa-2b0d-4969-87ea-88aa72de90c1.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 40756b95-495c-4eff-b148-3d71dadb9bce.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 4631c4ed-9ef2-4e0a-b435-91ffad32ca29.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 4bd59032-20b7-41b4-a85a-65c69fbfa654.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: 50502496-9a3e-49c4-8024-65f7a0ddac22.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 57e50d80-ce47-4e2f-8851-da0a0e7730ae.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: 5cfa4c8c-14ff-4718-aee6-84de92515a30.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 5d389008-0709-41e2-b6b5-ce65d098a211.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 5dcb81fc-7d8e-46cd-9b68-1ed176d79026.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: 6209097c-24e4-44d7-9afb-d3a5caeb1637.host (records: 1) - AAAA fd00:1122:3344:103::2b - name: 69f9e32e-e073-4157-bc9d-d07f878b1ca9.host (records: 1) - AAAA fd00:1122:3344:103::2e - name: 6c671c28-3cf9-44ef-b2c1-38f180161ac0.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 711ac7f8-d19e-4572-bdb9-e9b50f6e362a.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: 73751755-adcd-4a1d-98a1-579243c4f59b.host (records: 1) - AAAA fd00:1122:3344:102::29 - name: 7380b0c6-5c23-4041-8b2c-8be4d60ff503.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 74809be0-66e4-4aea-a771-adbd72304f1a.host (records: 1) - AAAA fd00:1122:3344:102::2b - name: 783f4dca-5916-4abf-9fe7-e7207ab4c2e7.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 7dda6d17-0de8-414c-b796-310e2bc73ec9.host (records: 1) - AAAA fd00:1122:3344:102::2e - name: 88488dad-b6d0-4074-8409-727f223e218d.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: 8c0a1969-15b6-4165-ba6d-a27c24151037.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 9161edf7-ccb0-42d9-bd49-47e093e12548.host (records: 1) - AAAA fd00:1122:3344:102::2c - name: 97cb7367-150d-4807-9c70-562e8105b273.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: 9dc50690-f9bf-4520-bf80-051d0f465c2c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 3d60fafa-2b0d-4969-87ea-88aa72de90c1.host.control-plane.oxide.internal - SRV port 17000 e7835fd2-0ff2-4809-91da-dec23e98dc96.host.control-plane.oxide.internal - SRV port 17000 edebb2c6-d4b2-4a35-816c-17bfd0efa13f.host.control-plane.oxide.internal - name: _crucible._tcp.063ea59f-076b-48d6-a40a-e3980c37414e (records: 1) - SRV port 32345 063ea59f-076b-48d6-a40a-e3980c37414e.host.control-plane.oxide.internal - name: _crucible._tcp.075cf83e-1f6a-4320-820a-95e56fa98f6f (records: 1) - SRV port 32345 075cf83e-1f6a-4320-820a-95e56fa98f6f.host.control-plane.oxide.internal - name: _crucible._tcp.09523a0c-27f2-4ce1-a4cb-004f5ff5c4ef (records: 1) - SRV port 32345 09523a0c-27f2-4ce1-a4cb-004f5ff5c4ef.host.control-plane.oxide.internal - name: _crucible._tcp.09d83e68-6773-4837-8575-6a169ec608fb (records: 1) - SRV port 32345 09d83e68-6773-4837-8575-6a169ec608fb.host.control-plane.oxide.internal - name: _crucible._tcp.0b4ec47a-6710-498c-a449-e3b461421c62 (records: 1) - SRV port 32345 0b4ec47a-6710-498c-a449-e3b461421c62.host.control-plane.oxide.internal - name: _crucible._tcp.13b78322-24bd-4d90-8209-31a391ce21e8 (records: 1) - SRV port 32345 13b78322-24bd-4d90-8209-31a391ce21e8.host.control-plane.oxide.internal - name: _crucible._tcp.18edee19-6ad3-4741-8467-93f0a7241484 (records: 1) - SRV port 32345 18edee19-6ad3-4741-8467-93f0a7241484.host.control-plane.oxide.internal - name: _crucible._tcp.2522d081-993f-4dcf-a011-63b4e9d5c66e (records: 1) - SRV port 32345 2522d081-993f-4dcf-a011-63b4e9d5c66e.host.control-plane.oxide.internal - name: _crucible._tcp.3799d2f7-88d2-4e70-8727-c3000c9b8c69 (records: 1) - SRV port 32345 3799d2f7-88d2-4e70-8727-c3000c9b8c69.host.control-plane.oxide.internal - name: _crucible._tcp.395e53ba-b6ef-4072-861d-ec6367275ab5 (records: 1) - SRV port 32345 395e53ba-b6ef-4072-861d-ec6367275ab5.host.control-plane.oxide.internal - name: _crucible._tcp.3acd2d8f-cdb0-4d46-8e2b-2dd248fd12c3 (records: 1) - SRV port 32345 3acd2d8f-cdb0-4d46-8e2b-2dd248fd12c3.host.control-plane.oxide.internal - name: _crucible._tcp.40756b95-495c-4eff-b148-3d71dadb9bce (records: 1) - SRV port 32345 40756b95-495c-4eff-b148-3d71dadb9bce.host.control-plane.oxide.internal - name: _crucible._tcp.4631c4ed-9ef2-4e0a-b435-91ffad32ca29 (records: 1) - SRV port 32345 4631c4ed-9ef2-4e0a-b435-91ffad32ca29.host.control-plane.oxide.internal - name: _crucible._tcp.4bd59032-20b7-41b4-a85a-65c69fbfa654 (records: 1) - SRV port 32345 4bd59032-20b7-41b4-a85a-65c69fbfa654.host.control-plane.oxide.internal - name: _crucible._tcp.50502496-9a3e-49c4-8024-65f7a0ddac22 (records: 1) - SRV port 32345 50502496-9a3e-49c4-8024-65f7a0ddac22.host.control-plane.oxide.internal - name: _crucible._tcp.57e50d80-ce47-4e2f-8851-da0a0e7730ae (records: 1) - SRV port 32345 57e50d80-ce47-4e2f-8851-da0a0e7730ae.host.control-plane.oxide.internal - name: _crucible._tcp.5cfa4c8c-14ff-4718-aee6-84de92515a30 (records: 1) - SRV port 32345 5cfa4c8c-14ff-4718-aee6-84de92515a30.host.control-plane.oxide.internal - name: _crucible._tcp.5dcb81fc-7d8e-46cd-9b68-1ed176d79026 (records: 1) - SRV port 32345 5dcb81fc-7d8e-46cd-9b68-1ed176d79026.host.control-plane.oxide.internal - name: _crucible._tcp.6209097c-24e4-44d7-9afb-d3a5caeb1637 (records: 1) - SRV port 32345 6209097c-24e4-44d7-9afb-d3a5caeb1637.host.control-plane.oxide.internal - name: _crucible._tcp.69f9e32e-e073-4157-bc9d-d07f878b1ca9 (records: 1) - SRV port 32345 69f9e32e-e073-4157-bc9d-d07f878b1ca9.host.control-plane.oxide.internal - name: _crucible._tcp.73751755-adcd-4a1d-98a1-579243c4f59b (records: 1) - SRV port 32345 73751755-adcd-4a1d-98a1-579243c4f59b.host.control-plane.oxide.internal - name: _crucible._tcp.74809be0-66e4-4aea-a771-adbd72304f1a (records: 1) - SRV port 32345 74809be0-66e4-4aea-a771-adbd72304f1a.host.control-plane.oxide.internal - name: _crucible._tcp.7dda6d17-0de8-414c-b796-310e2bc73ec9 (records: 1) - SRV port 32345 7dda6d17-0de8-414c-b796-310e2bc73ec9.host.control-plane.oxide.internal - name: _crucible._tcp.9161edf7-ccb0-42d9-bd49-47e093e12548 (records: 1) - SRV port 32345 9161edf7-ccb0-42d9-bd49-47e093e12548.host.control-plane.oxide.internal - name: _crucible._tcp.97cb7367-150d-4807-9c70-562e8105b273 (records: 1) - SRV port 32345 97cb7367-150d-4807-9c70-562e8105b273.host.control-plane.oxide.internal - name: _crucible._tcp.b8caf10d-a07d-4b07-a24c-79b433dcfeb9 (records: 1) - SRV port 32345 b8caf10d-a07d-4b07-a24c-79b433dcfeb9.host.control-plane.oxide.internal - name: _crucible._tcp.b9bfd143-8075-4574-9ad2-312c0650401a (records: 1) - SRV port 32345 b9bfd143-8075-4574-9ad2-312c0650401a.host.control-plane.oxide.internal - name: _crucible._tcp.c6b14d88-dcde-4563-b74f-514fe1f70ac5 (records: 1) - SRV port 32345 c6b14d88-dcde-4563-b74f-514fe1f70ac5.host.control-plane.oxide.internal - name: _crucible._tcp.eb00a322-5216-48ed-9b09-a74e99a75f60 (records: 1) - SRV port 32345 eb00a322-5216-48ed-9b09-a74e99a75f60.host.control-plane.oxide.internal - name: _crucible._tcp.ec1e8677-dfa7-4d4d-ba15-5efbef6c0fbd (records: 1) - SRV port 32345 ec1e8677-dfa7-4d4d-ba15-5efbef6c0fbd.host.control-plane.oxide.internal * name: _external-dns._tcp (records: 2 -> 3) - SRV port 5353 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host.control-plane.oxide.internal - SRV port 5353 8c0a1969-15b6-4165-ba6d-a27c24151037.host.control-plane.oxide.internal + SRV port 5353 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host.control-plane.oxide.internal + SRV port 5353 8c0a1969-15b6-4165-ba6d-a27c24151037.host.control-plane.oxide.internal + SRV port 5353 fe2d5287-24e3-4071-b214-2640b097a759.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 5d389008-0709-41e2-b6b5-ce65d098a211.host.control-plane.oxide.internal - SRV port 123 6c671c28-3cf9-44ef-b2c1-38f180161ac0.host.control-plane.oxide.internal - SRV port 123 b84beeec-f02a-44b1-9f78-80f161eb3be7.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 783f4dca-5916-4abf-9fe7-e7207ab4c2e7.host.control-plane.oxide.internal - SRV port 5353 88488dad-b6d0-4074-8409-727f223e218d.host.control-plane.oxide.internal - SRV port 5353 e7711206-3b44-4c8c-b882-3c530cb7c02f.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 11bdc7ae-40b8-475b-916a-95159ef5a240.host.control-plane.oxide.internal - SRV port 12221 7380b0c6-5c23-4041-8b2c-8be4d60ff503.host.control-plane.oxide.internal - SRV port 12221 a31945b4-3d49-40dc-a6f8-d6d8657e4880.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 711ac7f8-d19e-4572-bdb9-e9b50f6e362a.sled.control-plane.oxide.internal - SRV port 12348 9dc50690-f9bf-4520-bf80-051d0f465c2c.sled.control-plane.oxide.internal - SRV port 12348 a88790de-5962-4871-8686-61c1fd5b7094.sled.control-plane.oxide.internal - name: a31945b4-3d49-40dc-a6f8-d6d8657e4880.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: a88790de-5962-4871-8686-61c1fd5b7094.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: b84beeec-f02a-44b1-9f78-80f161eb3be7.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: b8caf10d-a07d-4b07-a24c-79b433dcfeb9.host (records: 1) - AAAA fd00:1122:3344:103::2f - name: b9bfd143-8075-4574-9ad2-312c0650401a.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: c6b14d88-dcde-4563-b74f-514fe1f70ac5.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: e7711206-3b44-4c8c-b882-3c530cb7c02f.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: e7835fd2-0ff2-4809-91da-dec23e98dc96.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: eb00a322-5216-48ed-9b09-a74e99a75f60.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: ec1e8677-dfa7-4d4d-ba15-5efbef6c0fbd.host (records: 1) - AAAA fd00:1122:3344:103::28 - name: edebb2c6-d4b2-4a35-816c-17bfd0efa13f.host (records: 1) - AAAA fd00:1122:3344:103::25 + name: fe2d5287-24e3-4071-b214-2640b097a759.host (records: 1) + AAAA fd00:1122:3344:103::30 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 91 (records: 103) external DNS: * DNS zone: "oxide.example": @@ -1382,10 +993,6 @@ external DNS: + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 * name: ns1 (records: 1 -> 1) - A 198.51.100.2 + A 198.51.100.1 @@ -1394,6 +1001,7 @@ external DNS: + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + unchanged names: 1 (records: 3) @@ -1867,208 +1475,15 @@ to: blueprint 2ac8c740-444d-42ff-8d66-9812a7e51288 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 063ea59f-076b-48d6-a40a-e3980c37414e.host (records: 1) - AAAA fd00:1122:3344:103::2c - name: 075cf83e-1f6a-4320-820a-95e56fa98f6f.host (records: 1) - AAAA fd00:1122:3344:103::29 - name: 09523a0c-27f2-4ce1-a4cb-004f5ff5c4ef.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: 09d83e68-6773-4837-8575-6a169ec608fb.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 0b4ec47a-6710-498c-a449-e3b461421c62.host (records: 1) - AAAA fd00:1122:3344:103::2a - name: 11bdc7ae-40b8-475b-916a-95159ef5a240.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: 13b78322-24bd-4d90-8209-31a391ce21e8.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 18edee19-6ad3-4741-8467-93f0a7241484.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 2522d081-993f-4dcf-a011-63b4e9d5c66e.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 3799d2f7-88d2-4e70-8727-c3000c9b8c69.host (records: 1) - AAAA fd00:1122:3344:102::2d - name: 395e53ba-b6ef-4072-861d-ec6367275ab5.host (records: 1) - AAAA fd00:1122:3344:103::2d - name: 3acd2d8f-cdb0-4d46-8e2b-2dd248fd12c3.host (records: 1) - AAAA fd00:1122:3344:102::2a - name: 3d60fafa-2b0d-4969-87ea-88aa72de90c1.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 40756b95-495c-4eff-b148-3d71dadb9bce.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 4631c4ed-9ef2-4e0a-b435-91ffad32ca29.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 4bd59032-20b7-41b4-a85a-65c69fbfa654.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: 50502496-9a3e-49c4-8024-65f7a0ddac22.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 57e50d80-ce47-4e2f-8851-da0a0e7730ae.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: 5cfa4c8c-14ff-4718-aee6-84de92515a30.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 5d389008-0709-41e2-b6b5-ce65d098a211.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 5dcb81fc-7d8e-46cd-9b68-1ed176d79026.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: 6209097c-24e4-44d7-9afb-d3a5caeb1637.host (records: 1) - AAAA fd00:1122:3344:103::2b - name: 69f9e32e-e073-4157-bc9d-d07f878b1ca9.host (records: 1) - AAAA fd00:1122:3344:103::2e - name: 6c671c28-3cf9-44ef-b2c1-38f180161ac0.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 711ac7f8-d19e-4572-bdb9-e9b50f6e362a.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: 73751755-adcd-4a1d-98a1-579243c4f59b.host (records: 1) - AAAA fd00:1122:3344:102::29 - name: 7380b0c6-5c23-4041-8b2c-8be4d60ff503.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 74809be0-66e4-4aea-a771-adbd72304f1a.host (records: 1) - AAAA fd00:1122:3344:102::2b - name: 783f4dca-5916-4abf-9fe7-e7207ab4c2e7.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 7dda6d17-0de8-414c-b796-310e2bc73ec9.host (records: 1) - AAAA fd00:1122:3344:102::2e - name: 88488dad-b6d0-4074-8409-727f223e218d.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: 8c0a1969-15b6-4165-ba6d-a27c24151037.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 9161edf7-ccb0-42d9-bd49-47e093e12548.host (records: 1) - AAAA fd00:1122:3344:102::2c - name: 97cb7367-150d-4807-9c70-562e8105b273.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: 9dc50690-f9bf-4520-bf80-051d0f465c2c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 3d60fafa-2b0d-4969-87ea-88aa72de90c1.host.control-plane.oxide.internal - SRV port 17000 e7835fd2-0ff2-4809-91da-dec23e98dc96.host.control-plane.oxide.internal - SRV port 17000 edebb2c6-d4b2-4a35-816c-17bfd0efa13f.host.control-plane.oxide.internal - name: _crucible._tcp.063ea59f-076b-48d6-a40a-e3980c37414e (records: 1) - SRV port 32345 063ea59f-076b-48d6-a40a-e3980c37414e.host.control-plane.oxide.internal - name: _crucible._tcp.075cf83e-1f6a-4320-820a-95e56fa98f6f (records: 1) - SRV port 32345 075cf83e-1f6a-4320-820a-95e56fa98f6f.host.control-plane.oxide.internal - name: _crucible._tcp.09523a0c-27f2-4ce1-a4cb-004f5ff5c4ef (records: 1) - SRV port 32345 09523a0c-27f2-4ce1-a4cb-004f5ff5c4ef.host.control-plane.oxide.internal - name: _crucible._tcp.09d83e68-6773-4837-8575-6a169ec608fb (records: 1) - SRV port 32345 09d83e68-6773-4837-8575-6a169ec608fb.host.control-plane.oxide.internal - name: _crucible._tcp.0b4ec47a-6710-498c-a449-e3b461421c62 (records: 1) - SRV port 32345 0b4ec47a-6710-498c-a449-e3b461421c62.host.control-plane.oxide.internal - name: _crucible._tcp.13b78322-24bd-4d90-8209-31a391ce21e8 (records: 1) - SRV port 32345 13b78322-24bd-4d90-8209-31a391ce21e8.host.control-plane.oxide.internal - name: _crucible._tcp.18edee19-6ad3-4741-8467-93f0a7241484 (records: 1) - SRV port 32345 18edee19-6ad3-4741-8467-93f0a7241484.host.control-plane.oxide.internal - name: _crucible._tcp.2522d081-993f-4dcf-a011-63b4e9d5c66e (records: 1) - SRV port 32345 2522d081-993f-4dcf-a011-63b4e9d5c66e.host.control-plane.oxide.internal - name: _crucible._tcp.3799d2f7-88d2-4e70-8727-c3000c9b8c69 (records: 1) - SRV port 32345 3799d2f7-88d2-4e70-8727-c3000c9b8c69.host.control-plane.oxide.internal - name: _crucible._tcp.395e53ba-b6ef-4072-861d-ec6367275ab5 (records: 1) - SRV port 32345 395e53ba-b6ef-4072-861d-ec6367275ab5.host.control-plane.oxide.internal - name: _crucible._tcp.3acd2d8f-cdb0-4d46-8e2b-2dd248fd12c3 (records: 1) - SRV port 32345 3acd2d8f-cdb0-4d46-8e2b-2dd248fd12c3.host.control-plane.oxide.internal - name: _crucible._tcp.40756b95-495c-4eff-b148-3d71dadb9bce (records: 1) - SRV port 32345 40756b95-495c-4eff-b148-3d71dadb9bce.host.control-plane.oxide.internal - name: _crucible._tcp.4631c4ed-9ef2-4e0a-b435-91ffad32ca29 (records: 1) - SRV port 32345 4631c4ed-9ef2-4e0a-b435-91ffad32ca29.host.control-plane.oxide.internal - name: _crucible._tcp.4bd59032-20b7-41b4-a85a-65c69fbfa654 (records: 1) - SRV port 32345 4bd59032-20b7-41b4-a85a-65c69fbfa654.host.control-plane.oxide.internal - name: _crucible._tcp.50502496-9a3e-49c4-8024-65f7a0ddac22 (records: 1) - SRV port 32345 50502496-9a3e-49c4-8024-65f7a0ddac22.host.control-plane.oxide.internal - name: _crucible._tcp.57e50d80-ce47-4e2f-8851-da0a0e7730ae (records: 1) - SRV port 32345 57e50d80-ce47-4e2f-8851-da0a0e7730ae.host.control-plane.oxide.internal - name: _crucible._tcp.5cfa4c8c-14ff-4718-aee6-84de92515a30 (records: 1) - SRV port 32345 5cfa4c8c-14ff-4718-aee6-84de92515a30.host.control-plane.oxide.internal - name: _crucible._tcp.5dcb81fc-7d8e-46cd-9b68-1ed176d79026 (records: 1) - SRV port 32345 5dcb81fc-7d8e-46cd-9b68-1ed176d79026.host.control-plane.oxide.internal - name: _crucible._tcp.6209097c-24e4-44d7-9afb-d3a5caeb1637 (records: 1) - SRV port 32345 6209097c-24e4-44d7-9afb-d3a5caeb1637.host.control-plane.oxide.internal - name: _crucible._tcp.69f9e32e-e073-4157-bc9d-d07f878b1ca9 (records: 1) - SRV port 32345 69f9e32e-e073-4157-bc9d-d07f878b1ca9.host.control-plane.oxide.internal - name: _crucible._tcp.73751755-adcd-4a1d-98a1-579243c4f59b (records: 1) - SRV port 32345 73751755-adcd-4a1d-98a1-579243c4f59b.host.control-plane.oxide.internal - name: _crucible._tcp.74809be0-66e4-4aea-a771-adbd72304f1a (records: 1) - SRV port 32345 74809be0-66e4-4aea-a771-adbd72304f1a.host.control-plane.oxide.internal - name: _crucible._tcp.7dda6d17-0de8-414c-b796-310e2bc73ec9 (records: 1) - SRV port 32345 7dda6d17-0de8-414c-b796-310e2bc73ec9.host.control-plane.oxide.internal - name: _crucible._tcp.9161edf7-ccb0-42d9-bd49-47e093e12548 (records: 1) - SRV port 32345 9161edf7-ccb0-42d9-bd49-47e093e12548.host.control-plane.oxide.internal - name: _crucible._tcp.97cb7367-150d-4807-9c70-562e8105b273 (records: 1) - SRV port 32345 97cb7367-150d-4807-9c70-562e8105b273.host.control-plane.oxide.internal - name: _crucible._tcp.b8caf10d-a07d-4b07-a24c-79b433dcfeb9 (records: 1) - SRV port 32345 b8caf10d-a07d-4b07-a24c-79b433dcfeb9.host.control-plane.oxide.internal - name: _crucible._tcp.b9bfd143-8075-4574-9ad2-312c0650401a (records: 1) - SRV port 32345 b9bfd143-8075-4574-9ad2-312c0650401a.host.control-plane.oxide.internal - name: _crucible._tcp.c6b14d88-dcde-4563-b74f-514fe1f70ac5 (records: 1) - SRV port 32345 c6b14d88-dcde-4563-b74f-514fe1f70ac5.host.control-plane.oxide.internal - name: _crucible._tcp.eb00a322-5216-48ed-9b09-a74e99a75f60 (records: 1) - SRV port 32345 eb00a322-5216-48ed-9b09-a74e99a75f60.host.control-plane.oxide.internal - name: _crucible._tcp.ec1e8677-dfa7-4d4d-ba15-5efbef6c0fbd (records: 1) - SRV port 32345 ec1e8677-dfa7-4d4d-ba15-5efbef6c0fbd.host.control-plane.oxide.internal * name: _external-dns._tcp (records: 3 -> 2) - SRV port 5353 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host.control-plane.oxide.internal - SRV port 5353 8c0a1969-15b6-4165-ba6d-a27c24151037.host.control-plane.oxide.internal - SRV port 5353 fe2d5287-24e3-4071-b214-2640b097a759.host.control-plane.oxide.internal + SRV port 5353 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host.control-plane.oxide.internal + SRV port 5353 fe2d5287-24e3-4071-b214-2640b097a759.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 5d389008-0709-41e2-b6b5-ce65d098a211.host.control-plane.oxide.internal - SRV port 123 6c671c28-3cf9-44ef-b2c1-38f180161ac0.host.control-plane.oxide.internal - SRV port 123 b84beeec-f02a-44b1-9f78-80f161eb3be7.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 783f4dca-5916-4abf-9fe7-e7207ab4c2e7.host.control-plane.oxide.internal - SRV port 5353 88488dad-b6d0-4074-8409-727f223e218d.host.control-plane.oxide.internal - SRV port 5353 e7711206-3b44-4c8c-b882-3c530cb7c02f.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 11bdc7ae-40b8-475b-916a-95159ef5a240.host.control-plane.oxide.internal - SRV port 12221 7380b0c6-5c23-4041-8b2c-8be4d60ff503.host.control-plane.oxide.internal - SRV port 12221 a31945b4-3d49-40dc-a6f8-d6d8657e4880.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 1374a0cd-8c56-4cb4-bfd1-92825c1e181e.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 711ac7f8-d19e-4572-bdb9-e9b50f6e362a.sled.control-plane.oxide.internal - SRV port 12348 9dc50690-f9bf-4520-bf80-051d0f465c2c.sled.control-plane.oxide.internal - SRV port 12348 a88790de-5962-4871-8686-61c1fd5b7094.sled.control-plane.oxide.internal - name: a31945b4-3d49-40dc-a6f8-d6d8657e4880.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: a88790de-5962-4871-8686-61c1fd5b7094.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: b84beeec-f02a-44b1-9f78-80f161eb3be7.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: b8caf10d-a07d-4b07-a24c-79b433dcfeb9.host (records: 1) - AAAA fd00:1122:3344:103::2f - name: b9bfd143-8075-4574-9ad2-312c0650401a.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: c6b14d88-dcde-4563-b74f-514fe1f70ac5.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: e7711206-3b44-4c8c-b882-3c530cb7c02f.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: e7835fd2-0ff2-4809-91da-dec23e98dc96.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: eb00a322-5216-48ed-9b09-a74e99a75f60.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: ec1e8677-dfa7-4d4d-ba15-5efbef6c0fbd.host (records: 1) - AAAA fd00:1122:3344:103::28 - name: edebb2c6-d4b2-4a35-816c-17bfd0efa13f.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: fe2d5287-24e3-4071-b214-2640b097a759.host (records: 1) - AAAA fd00:1122:3344:103::30 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 91 (records: 103) external DNS: * DNS zone: "oxide.example": @@ -2078,17 +1493,12 @@ external DNS: - NS ns3.oxide.example + NS ns1.oxide.example + NS ns2.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 * name: ns2 (records: 1 -> 1) - A 198.51.100.2 + A 198.51.100.3 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 2 (records: 4) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout index 8f759dc2f04..d18a5821897 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout @@ -469,204 +469,20 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 2bf59188-be61-4c64-9abf-2e336d82f2e6.host (records: 1) - AAAA fd00:1122:3344:103::28 - name: 34e5d326-b993-44d8-8777-4aee373f0ecd.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 50b2774d-e568-4f7b-a4a0-4e29d83e6d6f.host (records: 1) - AAAA fd00:1122:3344:103::2c - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 6e08bf87-8e2a-4950-aabb-01e4441010fe.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 7d0b456b-da4e-414c-8df8-31412a5bd0fe.host (records: 1) - AAAA fd00:1122:3344:102::2d - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 80510139-0fe1-4168-936d-672cdeef55bd.host (records: 1) - AAAA fd00:1122:3344:102::2b - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 8b85951d-c237-4b2e-9bd5-a897351ac01c.host (records: 1) - AAAA fd00:1122:3344:103::2a - name: 8d47aad7-4874-4da3-b841-fff4c19fd9a9.host (records: 1) - AAAA fd00:1122:3344:103::2e - name: 9356ba47-ef3c-46c7-934d-d3acfa39e6ab.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 98ce6087-273d-4c08-be0d-b348ca11870a.host (records: 1) - AAAA fd00:1122:3344:102::2f - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 9a3947f1-47e1-410c-ab33-3e55cc4c41c6.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 9abf0cfc-6c89-4caa-8aab-92c90661941f.host (records: 1) - AAAA fd00:1122:3344:102::2c * name: @ (records: 3 -> 2) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.2bf59188-be61-4c64-9abf-2e336d82f2e6 (records: 1) - SRV port 32345 2bf59188-be61-4c64-9abf-2e336d82f2e6.host.control-plane.oxide.internal - name: _crucible._tcp.34e5d326-b993-44d8-8777-4aee373f0ecd (records: 1) - SRV port 32345 34e5d326-b993-44d8-8777-4aee373f0ecd.host.control-plane.oxide.internal - name: _crucible._tcp.50b2774d-e568-4f7b-a4a0-4e29d83e6d6f (records: 1) - SRV port 32345 50b2774d-e568-4f7b-a4a0-4e29d83e6d6f.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.6e08bf87-8e2a-4950-aabb-01e4441010fe (records: 1) - SRV port 32345 6e08bf87-8e2a-4950-aabb-01e4441010fe.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.7d0b456b-da4e-414c-8df8-31412a5bd0fe (records: 1) - SRV port 32345 7d0b456b-da4e-414c-8df8-31412a5bd0fe.host.control-plane.oxide.internal - name: _crucible._tcp.80510139-0fe1-4168-936d-672cdeef55bd (records: 1) - SRV port 32345 80510139-0fe1-4168-936d-672cdeef55bd.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.8b85951d-c237-4b2e-9bd5-a897351ac01c (records: 1) - SRV port 32345 8b85951d-c237-4b2e-9bd5-a897351ac01c.host.control-plane.oxide.internal - name: _crucible._tcp.8d47aad7-4874-4da3-b841-fff4c19fd9a9 (records: 1) - SRV port 32345 8d47aad7-4874-4da3-b841-fff4c19fd9a9.host.control-plane.oxide.internal - name: _crucible._tcp.9356ba47-ef3c-46c7-934d-d3acfa39e6ab (records: 1) - SRV port 32345 9356ba47-ef3c-46c7-934d-d3acfa39e6ab.host.control-plane.oxide.internal - name: _crucible._tcp.98ce6087-273d-4c08-be0d-b348ca11870a (records: 1) - SRV port 32345 98ce6087-273d-4c08-be0d-b348ca11870a.host.control-plane.oxide.internal - name: _crucible._tcp.9a3947f1-47e1-410c-ab33-3e55cc4c41c6 (records: 1) - SRV port 32345 9a3947f1-47e1-410c-ab33-3e55cc4c41c6.host.control-plane.oxide.internal - name: _crucible._tcp.9abf0cfc-6c89-4caa-8aab-92c90661941f (records: 1) - SRV port 32345 9abf0cfc-6c89-4caa-8aab-92c90661941f.host.control-plane.oxide.internal - name: _crucible._tcp.a41824be-1ebb-4889-a631-f8dc7164cfab (records: 1) - SRV port 32345 a41824be-1ebb-4889-a631-f8dc7164cfab.host.control-plane.oxide.internal - name: _crucible._tcp.a5d2aef8-624b-41c6-9162-583654920631 (records: 1) - SRV port 32345 a5d2aef8-624b-41c6-9162-583654920631.host.control-plane.oxide.internal - name: _crucible._tcp.af4d4eba-4a44-47f4-abd4-69e21e0fcae2 (records: 1) - SRV port 32345 af4d4eba-4a44-47f4-abd4-69e21e0fcae2.host.control-plane.oxide.internal - name: _crucible._tcp.bb9974d0-428b-419f-b617-40ceec19e58a (records: 1) - SRV port 32345 bb9974d0-428b-419f-b617-40ceec19e58a.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.bd945040-9f62-4e7c-af5a-f8cc4b746992 (records: 1) - SRV port 32345 bd945040-9f62-4e7c-af5a-f8cc4b746992.host.control-plane.oxide.internal - name: _crucible._tcp.c49fabd6-b2ad-44c6-b760-90e614601b2b (records: 1) - SRV port 32345 c49fabd6-b2ad-44c6-b760-90e614601b2b.host.control-plane.oxide.internal - name: _crucible._tcp.c590491d-3c92-442a-bb1f-e402363ae172 (records: 1) - SRV port 32345 c590491d-3c92-442a-bb1f-e402363ae172.host.control-plane.oxide.internal - name: _crucible._tcp.c936e8c4-bbf4-4568-8d97-f08286d0cb81 (records: 1) - SRV port 32345 c936e8c4-bbf4-4568-8d97-f08286d0cb81.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _crucible._tcp.fc0bb9d1-dbdc-4f38-a630-825cde4cc5fb (records: 1) - SRV port 32345 fc0bb9d1-dbdc-4f38-a630-825cde4cc5fb.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal * name: _nameservice._tcp (records: 3 -> 2) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: a41824be-1ebb-4889-a631-f8dc7164cfab.host (records: 1) - AAAA fd00:1122:3344:102::2a - name: a5d2aef8-624b-41c6-9162-583654920631.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: af4d4eba-4a44-47f4-abd4-69e21e0fcae2.host (records: 1) - AAAA fd00:1122:3344:102::2e - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bb9974d0-428b-419f-b617-40ceec19e58a.host (records: 1) - AAAA fd00:1122:3344:103::2d - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: bd945040-9f62-4e7c-af5a-f8cc4b746992.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: c49fabd6-b2ad-44c6-b760-90e614601b2b.host (records: 1) - AAAA fd00:1122:3344:102::29 - name: c590491d-3c92-442a-bb1f-e402363ae172.host (records: 1) - AAAA fd00:1122:3344:103::2b - name: c936e8c4-bbf4-4568-8d97-f08286d0cb81.host (records: 1) - AAAA fd00:1122:3344:103::29 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: fc0bb9d1-dbdc-4f38-a630-825cde4cc5fb.host (records: 1) - AAAA fd00:1122:3344:101::2c * name: ns1 (records: 1 -> 1) - AAAA fd00:1122:3344:1::1 + AAAA fd00:1122:3344:2::1 @@ -675,23 +491,11 @@ internal DNS: + AAAA fd00:1122:3344:3::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 87 (records: 97) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -828,217 +632,11 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 2bf59188-be61-4c64-9abf-2e336d82f2e6.host (records: 1) - AAAA fd00:1122:3344:103::28 - name: 34e5d326-b993-44d8-8777-4aee373f0ecd.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 50b2774d-e568-4f7b-a4a0-4e29d83e6d6f.host (records: 1) - AAAA fd00:1122:3344:103::2c - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 6e08bf87-8e2a-4950-aabb-01e4441010fe.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 7d0b456b-da4e-414c-8df8-31412a5bd0fe.host (records: 1) - AAAA fd00:1122:3344:102::2d - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 80510139-0fe1-4168-936d-672cdeef55bd.host (records: 1) - AAAA fd00:1122:3344:102::2b - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 8b85951d-c237-4b2e-9bd5-a897351ac01c.host (records: 1) - AAAA fd00:1122:3344:103::2a - name: 8d47aad7-4874-4da3-b841-fff4c19fd9a9.host (records: 1) - AAAA fd00:1122:3344:103::2e - name: 9356ba47-ef3c-46c7-934d-d3acfa39e6ab.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 98ce6087-273d-4c08-be0d-b348ca11870a.host (records: 1) - AAAA fd00:1122:3344:102::2f - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 9a3947f1-47e1-410c-ab33-3e55cc4c41c6.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 9abf0cfc-6c89-4caa-8aab-92c90661941f.host (records: 1) - AAAA fd00:1122:3344:102::2c - name: @ (records: 2) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.2bf59188-be61-4c64-9abf-2e336d82f2e6 (records: 1) - SRV port 32345 2bf59188-be61-4c64-9abf-2e336d82f2e6.host.control-plane.oxide.internal - name: _crucible._tcp.34e5d326-b993-44d8-8777-4aee373f0ecd (records: 1) - SRV port 32345 34e5d326-b993-44d8-8777-4aee373f0ecd.host.control-plane.oxide.internal - name: _crucible._tcp.50b2774d-e568-4f7b-a4a0-4e29d83e6d6f (records: 1) - SRV port 32345 50b2774d-e568-4f7b-a4a0-4e29d83e6d6f.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.6e08bf87-8e2a-4950-aabb-01e4441010fe (records: 1) - SRV port 32345 6e08bf87-8e2a-4950-aabb-01e4441010fe.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.7d0b456b-da4e-414c-8df8-31412a5bd0fe (records: 1) - SRV port 32345 7d0b456b-da4e-414c-8df8-31412a5bd0fe.host.control-plane.oxide.internal - name: _crucible._tcp.80510139-0fe1-4168-936d-672cdeef55bd (records: 1) - SRV port 32345 80510139-0fe1-4168-936d-672cdeef55bd.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.8b85951d-c237-4b2e-9bd5-a897351ac01c (records: 1) - SRV port 32345 8b85951d-c237-4b2e-9bd5-a897351ac01c.host.control-plane.oxide.internal - name: _crucible._tcp.8d47aad7-4874-4da3-b841-fff4c19fd9a9 (records: 1) - SRV port 32345 8d47aad7-4874-4da3-b841-fff4c19fd9a9.host.control-plane.oxide.internal - name: _crucible._tcp.9356ba47-ef3c-46c7-934d-d3acfa39e6ab (records: 1) - SRV port 32345 9356ba47-ef3c-46c7-934d-d3acfa39e6ab.host.control-plane.oxide.internal - name: _crucible._tcp.98ce6087-273d-4c08-be0d-b348ca11870a (records: 1) - SRV port 32345 98ce6087-273d-4c08-be0d-b348ca11870a.host.control-plane.oxide.internal - name: _crucible._tcp.9a3947f1-47e1-410c-ab33-3e55cc4c41c6 (records: 1) - SRV port 32345 9a3947f1-47e1-410c-ab33-3e55cc4c41c6.host.control-plane.oxide.internal - name: _crucible._tcp.9abf0cfc-6c89-4caa-8aab-92c90661941f (records: 1) - SRV port 32345 9abf0cfc-6c89-4caa-8aab-92c90661941f.host.control-plane.oxide.internal - name: _crucible._tcp.a41824be-1ebb-4889-a631-f8dc7164cfab (records: 1) - SRV port 32345 a41824be-1ebb-4889-a631-f8dc7164cfab.host.control-plane.oxide.internal - name: _crucible._tcp.a5d2aef8-624b-41c6-9162-583654920631 (records: 1) - SRV port 32345 a5d2aef8-624b-41c6-9162-583654920631.host.control-plane.oxide.internal - name: _crucible._tcp.af4d4eba-4a44-47f4-abd4-69e21e0fcae2 (records: 1) - SRV port 32345 af4d4eba-4a44-47f4-abd4-69e21e0fcae2.host.control-plane.oxide.internal - name: _crucible._tcp.bb9974d0-428b-419f-b617-40ceec19e58a (records: 1) - SRV port 32345 bb9974d0-428b-419f-b617-40ceec19e58a.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.bd945040-9f62-4e7c-af5a-f8cc4b746992 (records: 1) - SRV port 32345 bd945040-9f62-4e7c-af5a-f8cc4b746992.host.control-plane.oxide.internal - name: _crucible._tcp.c49fabd6-b2ad-44c6-b760-90e614601b2b (records: 1) - SRV port 32345 c49fabd6-b2ad-44c6-b760-90e614601b2b.host.control-plane.oxide.internal - name: _crucible._tcp.c590491d-3c92-442a-bb1f-e402363ae172 (records: 1) - SRV port 32345 c590491d-3c92-442a-bb1f-e402363ae172.host.control-plane.oxide.internal - name: _crucible._tcp.c936e8c4-bbf4-4568-8d97-f08286d0cb81 (records: 1) - SRV port 32345 c936e8c4-bbf4-4568-8d97-f08286d0cb81.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _crucible._tcp.fc0bb9d1-dbdc-4f38-a630-825cde4cc5fb (records: 1) - SRV port 32345 fc0bb9d1-dbdc-4f38-a630-825cde4cc5fb.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 2) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: a41824be-1ebb-4889-a631-f8dc7164cfab.host (records: 1) - AAAA fd00:1122:3344:102::2a - name: a5d2aef8-624b-41c6-9162-583654920631.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: af4d4eba-4a44-47f4-abd4-69e21e0fcae2.host (records: 1) - AAAA fd00:1122:3344:102::2e - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bb9974d0-428b-419f-b617-40ceec19e58a.host (records: 1) - AAAA fd00:1122:3344:103::2d - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: bd945040-9f62-4e7c-af5a-f8cc4b746992.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: c49fabd6-b2ad-44c6-b760-90e614601b2b.host (records: 1) - AAAA fd00:1122:3344:102::29 - name: c590491d-3c92-442a-bb1f-e402363ae172.host (records: 1) - AAAA fd00:1122:3344:103::2b - name: c936e8c4-bbf4-4568-8d97-f08286d0cb81.host (records: 1) - AAAA fd00:1122:3344:103::29 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: fc0bb9d1-dbdc-4f38-a630-825cde4cc5fb.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: ns1 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 91 (records: 103) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1185,204 +783,20 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 2bf59188-be61-4c64-9abf-2e336d82f2e6.host (records: 1) - AAAA fd00:1122:3344:103::28 - name: 34e5d326-b993-44d8-8777-4aee373f0ecd.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 50b2774d-e568-4f7b-a4a0-4e29d83e6d6f.host (records: 1) - AAAA fd00:1122:3344:103::2c - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 6e08bf87-8e2a-4950-aabb-01e4441010fe.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 7d0b456b-da4e-414c-8df8-31412a5bd0fe.host (records: 1) - AAAA fd00:1122:3344:102::2d - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 80510139-0fe1-4168-936d-672cdeef55bd.host (records: 1) - AAAA fd00:1122:3344:102::2b - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 8b85951d-c237-4b2e-9bd5-a897351ac01c.host (records: 1) - AAAA fd00:1122:3344:103::2a - name: 8d47aad7-4874-4da3-b841-fff4c19fd9a9.host (records: 1) - AAAA fd00:1122:3344:103::2e - name: 9356ba47-ef3c-46c7-934d-d3acfa39e6ab.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 98ce6087-273d-4c08-be0d-b348ca11870a.host (records: 1) - AAAA fd00:1122:3344:102::2f - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 9a3947f1-47e1-410c-ab33-3e55cc4c41c6.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 9abf0cfc-6c89-4caa-8aab-92c90661941f.host (records: 1) - AAAA fd00:1122:3344:102::2c * name: @ (records: 2 -> 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.2bf59188-be61-4c64-9abf-2e336d82f2e6 (records: 1) - SRV port 32345 2bf59188-be61-4c64-9abf-2e336d82f2e6.host.control-plane.oxide.internal - name: _crucible._tcp.34e5d326-b993-44d8-8777-4aee373f0ecd (records: 1) - SRV port 32345 34e5d326-b993-44d8-8777-4aee373f0ecd.host.control-plane.oxide.internal - name: _crucible._tcp.50b2774d-e568-4f7b-a4a0-4e29d83e6d6f (records: 1) - SRV port 32345 50b2774d-e568-4f7b-a4a0-4e29d83e6d6f.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.6e08bf87-8e2a-4950-aabb-01e4441010fe (records: 1) - SRV port 32345 6e08bf87-8e2a-4950-aabb-01e4441010fe.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.7d0b456b-da4e-414c-8df8-31412a5bd0fe (records: 1) - SRV port 32345 7d0b456b-da4e-414c-8df8-31412a5bd0fe.host.control-plane.oxide.internal - name: _crucible._tcp.80510139-0fe1-4168-936d-672cdeef55bd (records: 1) - SRV port 32345 80510139-0fe1-4168-936d-672cdeef55bd.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.8b85951d-c237-4b2e-9bd5-a897351ac01c (records: 1) - SRV port 32345 8b85951d-c237-4b2e-9bd5-a897351ac01c.host.control-plane.oxide.internal - name: _crucible._tcp.8d47aad7-4874-4da3-b841-fff4c19fd9a9 (records: 1) - SRV port 32345 8d47aad7-4874-4da3-b841-fff4c19fd9a9.host.control-plane.oxide.internal - name: _crucible._tcp.9356ba47-ef3c-46c7-934d-d3acfa39e6ab (records: 1) - SRV port 32345 9356ba47-ef3c-46c7-934d-d3acfa39e6ab.host.control-plane.oxide.internal - name: _crucible._tcp.98ce6087-273d-4c08-be0d-b348ca11870a (records: 1) - SRV port 32345 98ce6087-273d-4c08-be0d-b348ca11870a.host.control-plane.oxide.internal - name: _crucible._tcp.9a3947f1-47e1-410c-ab33-3e55cc4c41c6 (records: 1) - SRV port 32345 9a3947f1-47e1-410c-ab33-3e55cc4c41c6.host.control-plane.oxide.internal - name: _crucible._tcp.9abf0cfc-6c89-4caa-8aab-92c90661941f (records: 1) - SRV port 32345 9abf0cfc-6c89-4caa-8aab-92c90661941f.host.control-plane.oxide.internal - name: _crucible._tcp.a41824be-1ebb-4889-a631-f8dc7164cfab (records: 1) - SRV port 32345 a41824be-1ebb-4889-a631-f8dc7164cfab.host.control-plane.oxide.internal - name: _crucible._tcp.a5d2aef8-624b-41c6-9162-583654920631 (records: 1) - SRV port 32345 a5d2aef8-624b-41c6-9162-583654920631.host.control-plane.oxide.internal - name: _crucible._tcp.af4d4eba-4a44-47f4-abd4-69e21e0fcae2 (records: 1) - SRV port 32345 af4d4eba-4a44-47f4-abd4-69e21e0fcae2.host.control-plane.oxide.internal - name: _crucible._tcp.bb9974d0-428b-419f-b617-40ceec19e58a (records: 1) - SRV port 32345 bb9974d0-428b-419f-b617-40ceec19e58a.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.bd945040-9f62-4e7c-af5a-f8cc4b746992 (records: 1) - SRV port 32345 bd945040-9f62-4e7c-af5a-f8cc4b746992.host.control-plane.oxide.internal - name: _crucible._tcp.c49fabd6-b2ad-44c6-b760-90e614601b2b (records: 1) - SRV port 32345 c49fabd6-b2ad-44c6-b760-90e614601b2b.host.control-plane.oxide.internal - name: _crucible._tcp.c590491d-3c92-442a-bb1f-e402363ae172 (records: 1) - SRV port 32345 c590491d-3c92-442a-bb1f-e402363ae172.host.control-plane.oxide.internal - name: _crucible._tcp.c936e8c4-bbf4-4568-8d97-f08286d0cb81 (records: 1) - SRV port 32345 c936e8c4-bbf4-4568-8d97-f08286d0cb81.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _crucible._tcp.fc0bb9d1-dbdc-4f38-a630-825cde4cc5fb (records: 1) - SRV port 32345 fc0bb9d1-dbdc-4f38-a630-825cde4cc5fb.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal * name: _nameservice._tcp (records: 2 -> 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 e375dd21-320b-43b7-bc92-a2c3dac9d9e1.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: a41824be-1ebb-4889-a631-f8dc7164cfab.host (records: 1) - AAAA fd00:1122:3344:102::2a - name: a5d2aef8-624b-41c6-9162-583654920631.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: af4d4eba-4a44-47f4-abd4-69e21e0fcae2.host (records: 1) - AAAA fd00:1122:3344:102::2e - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bb9974d0-428b-419f-b617-40ceec19e58a.host (records: 1) - AAAA fd00:1122:3344:103::2d - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: bd945040-9f62-4e7c-af5a-f8cc4b746992.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: c49fabd6-b2ad-44c6-b760-90e614601b2b.host (records: 1) - AAAA fd00:1122:3344:102::29 - name: c590491d-3c92-442a-bb1f-e402363ae172.host (records: 1) - AAAA fd00:1122:3344:103::2b - name: c936e8c4-bbf4-4568-8d97-f08286d0cb81.host (records: 1) - AAAA fd00:1122:3344:103::29 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 + name: e375dd21-320b-43b7-bc92-a2c3dac9d9e1.host (records: 1) + AAAA fd00:1122:3344:1::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: fc0bb9d1-dbdc-4f38-a630-825cde4cc5fb.host (records: 1) - AAAA fd00:1122:3344:101::2c * name: ns1 (records: 1 -> 1) - AAAA fd00:1122:3344:2::1 + AAAA fd00:1122:3344:1::1 @@ -1391,23 +805,11 @@ internal DNS: + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + unchanged names: 87 (records: 97) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout index 174b44d1c93..1dd547e695d 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout @@ -158,131 +158,11 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 005548be-c5e4-49ff-a27b-88f314f1bc51.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 2205353a-e1d2-48ff-863b-9d6b1487d474.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2470c0fb-7af3-4b6b-8db3-424e4c297bd9.host (records: 1) - AAAA fd00:1122:3344:101::34 - name: 289a6d4b-ddfe-4101-9715-6693398704f2.host (records: 1) - AAAA fd00:1122:3344:101::2f - name: 2a063d42-9caf-4364-b099-c7c302c87cdd.host (records: 1) - AAAA fd00:1122:3344:101::33 - name: 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a.host (records: 1) - AAAA fd00:1122:3344:101::32 - name: 2de1c525-4e04-4ba6-ac6b-377aaa542f96.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 31b26053-8b94-4d8c-9e4a-d7d720afe265.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 3d9b7487-d7b9-4e25-960f-f2086f3e2919.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 45ce130e-c5ac-4e26-ab73-7589ba634418.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 4b19d194-8b25-4396-88da-3df1b3788601.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: 70aab480-3d6c-47d5-aaf9-8d2ddab2931c.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 9b135c74-c09a-4dcc-ba19-f6f8deae135a.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 2de1c525-4e04-4ba6-ac6b-377aaa542f96.host.control-plane.oxide.internal - SRV port 17000 31b26053-8b94-4d8c-9e4a-d7d720afe265.host.control-plane.oxide.internal - SRV port 17000 f87ada51-4419-4144-86a8-e5e4ff0f64d3.host.control-plane.oxide.internal - name: _crucible._tcp.2470c0fb-7af3-4b6b-8db3-424e4c297bd9 (records: 1) - SRV port 32345 2470c0fb-7af3-4b6b-8db3-424e4c297bd9.host.control-plane.oxide.internal - name: _crucible._tcp.289a6d4b-ddfe-4101-9715-6693398704f2 (records: 1) - SRV port 32345 289a6d4b-ddfe-4101-9715-6693398704f2.host.control-plane.oxide.internal - name: _crucible._tcp.2a063d42-9caf-4364-b099-c7c302c87cdd (records: 1) - SRV port 32345 2a063d42-9caf-4364-b099-c7c302c87cdd.host.control-plane.oxide.internal - name: _crucible._tcp.2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a (records: 1) - SRV port 32345 2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a.host.control-plane.oxide.internal - name: _crucible._tcp.3d9b7487-d7b9-4e25-960f-f2086f3e2919 (records: 1) - SRV port 32345 3d9b7487-d7b9-4e25-960f-f2086f3e2919.host.control-plane.oxide.internal - name: _crucible._tcp.4b19d194-8b25-4396-88da-3df1b3788601 (records: 1) - SRV port 32345 4b19d194-8b25-4396-88da-3df1b3788601.host.control-plane.oxide.internal - name: _crucible._tcp.bbae1838-0a4c-4489-b141-5aff19c2f277 (records: 1) - SRV port 32345 bbae1838-0a4c-4489-b141-5aff19c2f277.host.control-plane.oxide.internal - name: _crucible._tcp.c204730a-0946-4793-a470-64c88e89da96 (records: 1) - SRV port 32345 c204730a-0946-4793-a470-64c88e89da96.host.control-plane.oxide.internal - name: _crucible._tcp.e0075607-793a-4348-a808-84a9dbe3e841 (records: 1) - SRV port 32345 e0075607-793a-4348-a808-84a9dbe3e841.host.control-plane.oxide.internal - name: _crucible._tcp.edce5918-6d13-4cde-9d64-54e00f4c09e6 (records: 1) - SRV port 32345 edce5918-6d13-4cde-9d64-54e00f4c09e6.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 005548be-c5e4-49ff-a27b-88f314f1bc51.host.control-plane.oxide.internal - SRV port 5353 2205353a-e1d2-48ff-863b-9d6b1487d474.host.control-plane.oxide.internal - SRV port 5353 70aab480-3d6c-47d5-aaf9-8d2ddab2931c.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 1) - SRV port 123 9b135c74-c09a-4dcc-ba19-f6f8deae135a.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035.host.control-plane.oxide.internal - SRV port 5353 45ce130e-c5ac-4e26-ab73-7589ba634418.host.control-plane.oxide.internal - SRV port 5353 a7b7bfbe-0588-4781-9a5e-fba63584e5d2.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4.host.control-plane.oxide.internal - SRV port 12221 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe.host.control-plane.oxide.internal - SRV port 12221 c2cbbf34-b852-4164-a572-01d4d79445a1.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 1) - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - name: a7b7bfbe-0588-4781-9a5e-fba63584e5d2.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bbae1838-0a4c-4489-b141-5aff19c2f277.host (records: 1) - AAAA fd00:1122:3344:101::35 - name: c204730a-0946-4793-a470-64c88e89da96.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: c2cbbf34-b852-4164-a572-01d4d79445a1.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: e0075607-793a-4348-a808-84a9dbe3e841.host (records: 1) - AAAA fd00:1122:3344:101::31 - name: edce5918-6d13-4cde-9d64-54e00f4c09e6.host (records: 1) - AAAA fd00:1122:3344:101::30 - name: f87ada51-4419-4144-86a8-e5e4ff0f64d3.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 49 (records: 59) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.4 - A 192.0.2.3 - A 192.0.2.2 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -438,131 +318,11 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 005548be-c5e4-49ff-a27b-88f314f1bc51.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 2205353a-e1d2-48ff-863b-9d6b1487d474.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2470c0fb-7af3-4b6b-8db3-424e4c297bd9.host (records: 1) - AAAA fd00:1122:3344:101::34 - name: 289a6d4b-ddfe-4101-9715-6693398704f2.host (records: 1) - AAAA fd00:1122:3344:101::2f - name: 2a063d42-9caf-4364-b099-c7c302c87cdd.host (records: 1) - AAAA fd00:1122:3344:101::33 - name: 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a.host (records: 1) - AAAA fd00:1122:3344:101::32 - name: 2de1c525-4e04-4ba6-ac6b-377aaa542f96.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 31b26053-8b94-4d8c-9e4a-d7d720afe265.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 3d9b7487-d7b9-4e25-960f-f2086f3e2919.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 45ce130e-c5ac-4e26-ab73-7589ba634418.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 4b19d194-8b25-4396-88da-3df1b3788601.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: 70aab480-3d6c-47d5-aaf9-8d2ddab2931c.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 9b135c74-c09a-4dcc-ba19-f6f8deae135a.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 2de1c525-4e04-4ba6-ac6b-377aaa542f96.host.control-plane.oxide.internal - SRV port 17000 31b26053-8b94-4d8c-9e4a-d7d720afe265.host.control-plane.oxide.internal - SRV port 17000 f87ada51-4419-4144-86a8-e5e4ff0f64d3.host.control-plane.oxide.internal - name: _crucible._tcp.2470c0fb-7af3-4b6b-8db3-424e4c297bd9 (records: 1) - SRV port 32345 2470c0fb-7af3-4b6b-8db3-424e4c297bd9.host.control-plane.oxide.internal - name: _crucible._tcp.289a6d4b-ddfe-4101-9715-6693398704f2 (records: 1) - SRV port 32345 289a6d4b-ddfe-4101-9715-6693398704f2.host.control-plane.oxide.internal - name: _crucible._tcp.2a063d42-9caf-4364-b099-c7c302c87cdd (records: 1) - SRV port 32345 2a063d42-9caf-4364-b099-c7c302c87cdd.host.control-plane.oxide.internal - name: _crucible._tcp.2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a (records: 1) - SRV port 32345 2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a.host.control-plane.oxide.internal - name: _crucible._tcp.3d9b7487-d7b9-4e25-960f-f2086f3e2919 (records: 1) - SRV port 32345 3d9b7487-d7b9-4e25-960f-f2086f3e2919.host.control-plane.oxide.internal - name: _crucible._tcp.4b19d194-8b25-4396-88da-3df1b3788601 (records: 1) - SRV port 32345 4b19d194-8b25-4396-88da-3df1b3788601.host.control-plane.oxide.internal - name: _crucible._tcp.bbae1838-0a4c-4489-b141-5aff19c2f277 (records: 1) - SRV port 32345 bbae1838-0a4c-4489-b141-5aff19c2f277.host.control-plane.oxide.internal - name: _crucible._tcp.c204730a-0946-4793-a470-64c88e89da96 (records: 1) - SRV port 32345 c204730a-0946-4793-a470-64c88e89da96.host.control-plane.oxide.internal - name: _crucible._tcp.e0075607-793a-4348-a808-84a9dbe3e841 (records: 1) - SRV port 32345 e0075607-793a-4348-a808-84a9dbe3e841.host.control-plane.oxide.internal - name: _crucible._tcp.edce5918-6d13-4cde-9d64-54e00f4c09e6 (records: 1) - SRV port 32345 edce5918-6d13-4cde-9d64-54e00f4c09e6.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 005548be-c5e4-49ff-a27b-88f314f1bc51.host.control-plane.oxide.internal - SRV port 5353 2205353a-e1d2-48ff-863b-9d6b1487d474.host.control-plane.oxide.internal - SRV port 5353 70aab480-3d6c-47d5-aaf9-8d2ddab2931c.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 1) - SRV port 123 9b135c74-c09a-4dcc-ba19-f6f8deae135a.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035.host.control-plane.oxide.internal - SRV port 5353 45ce130e-c5ac-4e26-ab73-7589ba634418.host.control-plane.oxide.internal - SRV port 5353 a7b7bfbe-0588-4781-9a5e-fba63584e5d2.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4.host.control-plane.oxide.internal - SRV port 12221 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe.host.control-plane.oxide.internal - SRV port 12221 c2cbbf34-b852-4164-a572-01d4d79445a1.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 1) - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - name: a7b7bfbe-0588-4781-9a5e-fba63584e5d2.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bbae1838-0a4c-4489-b141-5aff19c2f277.host (records: 1) - AAAA fd00:1122:3344:101::35 - name: c204730a-0946-4793-a470-64c88e89da96.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: c2cbbf34-b852-4164-a572-01d4d79445a1.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: e0075607-793a-4348-a808-84a9dbe3e841.host (records: 1) - AAAA fd00:1122:3344:101::31 - name: edce5918-6d13-4cde-9d64-54e00f4c09e6.host (records: 1) - AAAA fd00:1122:3344:101::30 - name: f87ada51-4419-4144-86a8-e5e4ff0f64d3.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 49 (records: 59) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.4 - A 192.0.2.3 - A 192.0.2.2 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -868,131 +628,11 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 005548be-c5e4-49ff-a27b-88f314f1bc51.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 2205353a-e1d2-48ff-863b-9d6b1487d474.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2470c0fb-7af3-4b6b-8db3-424e4c297bd9.host (records: 1) - AAAA fd00:1122:3344:101::34 - name: 289a6d4b-ddfe-4101-9715-6693398704f2.host (records: 1) - AAAA fd00:1122:3344:101::2f - name: 2a063d42-9caf-4364-b099-c7c302c87cdd.host (records: 1) - AAAA fd00:1122:3344:101::33 - name: 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a.host (records: 1) - AAAA fd00:1122:3344:101::32 - name: 2de1c525-4e04-4ba6-ac6b-377aaa542f96.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 31b26053-8b94-4d8c-9e4a-d7d720afe265.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 3d9b7487-d7b9-4e25-960f-f2086f3e2919.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 45ce130e-c5ac-4e26-ab73-7589ba634418.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 4b19d194-8b25-4396-88da-3df1b3788601.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: 70aab480-3d6c-47d5-aaf9-8d2ddab2931c.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 9b135c74-c09a-4dcc-ba19-f6f8deae135a.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 2de1c525-4e04-4ba6-ac6b-377aaa542f96.host.control-plane.oxide.internal - SRV port 17000 31b26053-8b94-4d8c-9e4a-d7d720afe265.host.control-plane.oxide.internal - SRV port 17000 f87ada51-4419-4144-86a8-e5e4ff0f64d3.host.control-plane.oxide.internal - name: _crucible._tcp.2470c0fb-7af3-4b6b-8db3-424e4c297bd9 (records: 1) - SRV port 32345 2470c0fb-7af3-4b6b-8db3-424e4c297bd9.host.control-plane.oxide.internal - name: _crucible._tcp.289a6d4b-ddfe-4101-9715-6693398704f2 (records: 1) - SRV port 32345 289a6d4b-ddfe-4101-9715-6693398704f2.host.control-plane.oxide.internal - name: _crucible._tcp.2a063d42-9caf-4364-b099-c7c302c87cdd (records: 1) - SRV port 32345 2a063d42-9caf-4364-b099-c7c302c87cdd.host.control-plane.oxide.internal - name: _crucible._tcp.2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a (records: 1) - SRV port 32345 2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a.host.control-plane.oxide.internal - name: _crucible._tcp.3d9b7487-d7b9-4e25-960f-f2086f3e2919 (records: 1) - SRV port 32345 3d9b7487-d7b9-4e25-960f-f2086f3e2919.host.control-plane.oxide.internal - name: _crucible._tcp.4b19d194-8b25-4396-88da-3df1b3788601 (records: 1) - SRV port 32345 4b19d194-8b25-4396-88da-3df1b3788601.host.control-plane.oxide.internal - name: _crucible._tcp.bbae1838-0a4c-4489-b141-5aff19c2f277 (records: 1) - SRV port 32345 bbae1838-0a4c-4489-b141-5aff19c2f277.host.control-plane.oxide.internal - name: _crucible._tcp.c204730a-0946-4793-a470-64c88e89da96 (records: 1) - SRV port 32345 c204730a-0946-4793-a470-64c88e89da96.host.control-plane.oxide.internal - name: _crucible._tcp.e0075607-793a-4348-a808-84a9dbe3e841 (records: 1) - SRV port 32345 e0075607-793a-4348-a808-84a9dbe3e841.host.control-plane.oxide.internal - name: _crucible._tcp.edce5918-6d13-4cde-9d64-54e00f4c09e6 (records: 1) - SRV port 32345 edce5918-6d13-4cde-9d64-54e00f4c09e6.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 005548be-c5e4-49ff-a27b-88f314f1bc51.host.control-plane.oxide.internal - SRV port 5353 2205353a-e1d2-48ff-863b-9d6b1487d474.host.control-plane.oxide.internal - SRV port 5353 70aab480-3d6c-47d5-aaf9-8d2ddab2931c.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 1) - SRV port 123 9b135c74-c09a-4dcc-ba19-f6f8deae135a.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035.host.control-plane.oxide.internal - SRV port 5353 45ce130e-c5ac-4e26-ab73-7589ba634418.host.control-plane.oxide.internal - SRV port 5353 a7b7bfbe-0588-4781-9a5e-fba63584e5d2.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4.host.control-plane.oxide.internal - SRV port 12221 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe.host.control-plane.oxide.internal - SRV port 12221 c2cbbf34-b852-4164-a572-01d4d79445a1.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 1) - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - name: a7b7bfbe-0588-4781-9a5e-fba63584e5d2.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bbae1838-0a4c-4489-b141-5aff19c2f277.host (records: 1) - AAAA fd00:1122:3344:101::35 - name: c204730a-0946-4793-a470-64c88e89da96.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: c2cbbf34-b852-4164-a572-01d4d79445a1.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: e0075607-793a-4348-a808-84a9dbe3e841.host (records: 1) - AAAA fd00:1122:3344:101::31 - name: edce5918-6d13-4cde-9d64-54e00f4c09e6.host (records: 1) - AAAA fd00:1122:3344:101::30 - name: f87ada51-4419-4144-86a8-e5e4ff0f64d3.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 49 (records: 59) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.4 - A 192.0.2.3 - A 192.0.2.2 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1148,131 +788,11 @@ to: blueprint df06bb57-ad42-4431-9206-abff322896c7 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 005548be-c5e4-49ff-a27b-88f314f1bc51.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 2205353a-e1d2-48ff-863b-9d6b1487d474.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2470c0fb-7af3-4b6b-8db3-424e4c297bd9.host (records: 1) - AAAA fd00:1122:3344:101::34 - name: 289a6d4b-ddfe-4101-9715-6693398704f2.host (records: 1) - AAAA fd00:1122:3344:101::2f - name: 2a063d42-9caf-4364-b099-c7c302c87cdd.host (records: 1) - AAAA fd00:1122:3344:101::33 - name: 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a.host (records: 1) - AAAA fd00:1122:3344:101::32 - name: 2de1c525-4e04-4ba6-ac6b-377aaa542f96.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: 31b26053-8b94-4d8c-9e4a-d7d720afe265.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 3d9b7487-d7b9-4e25-960f-f2086f3e2919.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 45ce130e-c5ac-4e26-ab73-7589ba634418.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 4b19d194-8b25-4396-88da-3df1b3788601.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: 70aab480-3d6c-47d5-aaf9-8d2ddab2931c.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 9b135c74-c09a-4dcc-ba19-f6f8deae135a.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 2de1c525-4e04-4ba6-ac6b-377aaa542f96.host.control-plane.oxide.internal - SRV port 17000 31b26053-8b94-4d8c-9e4a-d7d720afe265.host.control-plane.oxide.internal - SRV port 17000 f87ada51-4419-4144-86a8-e5e4ff0f64d3.host.control-plane.oxide.internal - name: _crucible._tcp.2470c0fb-7af3-4b6b-8db3-424e4c297bd9 (records: 1) - SRV port 32345 2470c0fb-7af3-4b6b-8db3-424e4c297bd9.host.control-plane.oxide.internal - name: _crucible._tcp.289a6d4b-ddfe-4101-9715-6693398704f2 (records: 1) - SRV port 32345 289a6d4b-ddfe-4101-9715-6693398704f2.host.control-plane.oxide.internal - name: _crucible._tcp.2a063d42-9caf-4364-b099-c7c302c87cdd (records: 1) - SRV port 32345 2a063d42-9caf-4364-b099-c7c302c87cdd.host.control-plane.oxide.internal - name: _crucible._tcp.2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a (records: 1) - SRV port 32345 2dcb07a4-b9cc-4ae6-9119-ccd241acbf3a.host.control-plane.oxide.internal - name: _crucible._tcp.3d9b7487-d7b9-4e25-960f-f2086f3e2919 (records: 1) - SRV port 32345 3d9b7487-d7b9-4e25-960f-f2086f3e2919.host.control-plane.oxide.internal - name: _crucible._tcp.4b19d194-8b25-4396-88da-3df1b3788601 (records: 1) - SRV port 32345 4b19d194-8b25-4396-88da-3df1b3788601.host.control-plane.oxide.internal - name: _crucible._tcp.bbae1838-0a4c-4489-b141-5aff19c2f277 (records: 1) - SRV port 32345 bbae1838-0a4c-4489-b141-5aff19c2f277.host.control-plane.oxide.internal - name: _crucible._tcp.c204730a-0946-4793-a470-64c88e89da96 (records: 1) - SRV port 32345 c204730a-0946-4793-a470-64c88e89da96.host.control-plane.oxide.internal - name: _crucible._tcp.e0075607-793a-4348-a808-84a9dbe3e841 (records: 1) - SRV port 32345 e0075607-793a-4348-a808-84a9dbe3e841.host.control-plane.oxide.internal - name: _crucible._tcp.edce5918-6d13-4cde-9d64-54e00f4c09e6 (records: 1) - SRV port 32345 edce5918-6d13-4cde-9d64-54e00f4c09e6.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 005548be-c5e4-49ff-a27b-88f314f1bc51.host.control-plane.oxide.internal - SRV port 5353 2205353a-e1d2-48ff-863b-9d6b1487d474.host.control-plane.oxide.internal - SRV port 5353 70aab480-3d6c-47d5-aaf9-8d2ddab2931c.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 1) - SRV port 123 9b135c74-c09a-4dcc-ba19-f6f8deae135a.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035.host.control-plane.oxide.internal - SRV port 5353 45ce130e-c5ac-4e26-ab73-7589ba634418.host.control-plane.oxide.internal - SRV port 5353 a7b7bfbe-0588-4781-9a5e-fba63584e5d2.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4.host.control-plane.oxide.internal - SRV port 12221 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe.host.control-plane.oxide.internal - SRV port 12221 c2cbbf34-b852-4164-a572-01d4d79445a1.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 10676bfe-b61f-40e1-bc07-a4cb76ea1f30.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 1) - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - name: a7b7bfbe-0588-4781-9a5e-fba63584e5d2.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bbae1838-0a4c-4489-b141-5aff19c2f277.host (records: 1) - AAAA fd00:1122:3344:101::35 - name: c204730a-0946-4793-a470-64c88e89da96.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: c2cbbf34-b852-4164-a572-01d4d79445a1.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: e0075607-793a-4348-a808-84a9dbe3e841.host (records: 1) - AAAA fd00:1122:3344:101::31 - name: edce5918-6d13-4cde-9d64-54e00f4c09e6.host (records: 1) - AAAA fd00:1122:3344:101::30 - name: f87ada51-4419-4144-86a8-e5e4ff0f64d3.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 49 (records: 59) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.4 - A 192.0.2.3 - A 192.0.2.2 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 6a3a05676b0..39c1f30e8b9 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -704,115 +704,11 @@ to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 39 (records: 53) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -910,112 +806,11 @@ to: blueprint 626487fa-7139-45ec-8416-902271fc730b internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 2) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 38 (records: 51) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1247,115 +1042,11 @@ to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 39 (records: 53) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1655,115 +1346,11 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 39 (records: 53) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -2081,115 +1668,11 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 39 (records: 53) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -2439,115 +1922,11 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 39 (records: 53) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -2814,115 +2193,11 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 39 (records: 53) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -3071,118 +2346,11 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 4) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b.sled (records: 1) - AAAA fd00:1122:3344:104::1 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 40 (records: 55) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -3298,60 +2466,8 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 + name: 6af1f5f5-6a16-40f0-a830-651e738967aa.host (records: 1) + AAAA fd00:1122:3344:104::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal * name: _internal-ntp._tcp (records: 3 -> 4) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal @@ -3360,62 +2476,11 @@ internal DNS: + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 6af1f5f5-6a16-40f0-a830-651e738967aa.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 4) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b.sled (records: 1) - AAAA fd00:1122:3344:104::1 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 39 (records: 52) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index 3bd4b1ddc65..7c6ef00867e 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -360,76 +360,10 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS: * DNS zone: "control-plane.oxide.internal": - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 33862f97-2897-4d53-a9a6-78a80f7eb13f.host (records: 1) - AAAA fd00:1122:3344:104::22 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3a7c2683-58bc-479c-9c16-2f9dfc102e29.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 43a0588f-5b57-469b-a173-db6cb6105e4c.host (records: 1) - AAAA fd00:1122:3344:104::23 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 824150c0-3fa4-4bac-9d14-c47ad04c9f3a.host (records: 1) - AAAA fd00:1122:3344:106::22 - name: 8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea.host (records: 1) - AAAA fd00:1122:3344:104::25 - name: 97753dbd-5a0f-4273-b1be-db6bb2b69381.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled (records: 1) - AAAA fd00:1122:3344:106::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - SRV port 17000 d07a1fed-4235-4821-a1e5-f7eb2646ff33.host.control-plane.oxide.internal - name: _crucible._tcp.3a7c2683-58bc-479c-9c16-2f9dfc102e29 (records: 1) - SRV port 32345 3a7c2683-58bc-479c-9c16-2f9dfc102e29.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.824150c0-3fa4-4bac-9d14-c47ad04c9f3a (records: 1) - SRV port 32345 824150c0-3fa4-4bac-9d14-c47ad04c9f3a.host.control-plane.oxide.internal - name: _crucible._tcp.8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea (records: 1) - SRV port 32345 8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.c1467d0e-b3de-4bd8-b36a-d8b36626badc (records: 1) - SRV port 32345 c1467d0e-b3de-4bd8-b36a-d8b36626badc.host.control-plane.oxide.internal - name: _crucible._tcp.ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4 (records: 1) - SRV port 32345 ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 43a0588f-5b57-469b-a173-db6cb6105e4c.host.control-plane.oxide.internal - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal * name: _internal-ntp._tcp (records: 7 -> 6) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal @@ -444,74 +378,13 @@ internal DNS: + SRV port 123 c800ba17-240e-4b72-8ae6-afc30b6baa96.host.control-plane.oxide.internal + SRV port 123 dd66f033-4fe8-438e-afb4-29d3561d4c3e.host.control-plane.oxide.internal + SRV port 123 e8fe709c-725f-4bb2-b714-ffcda13a9e54.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 97753dbd-5a0f-4273-b1be-db6bb2b69381.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 33862f97-2897-4d53-a9a6-78a80f7eb13f.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 6) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled.control-plane.oxide.internal - SRV port 12348 aff6c093-197d-42c5-ad80-9f10ba051a34.sled.control-plane.oxide.internal - SRV port 12348 b82ede02-399c-48c6-a1de-411df4fa49a7.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: aff6c093-197d-42c5-ad80-9f10ba051a34.sled (records: 1) - AAAA fd00:1122:3344:104::1 - name: b82ede02-399c-48c6-a1de-411df4fa49a7.sled (records: 1) - AAAA fd00:1122:3344:105::1 - name: b910534b-2a53-4335-a3d9-5311d2f3186a.host (records: 1) - AAAA fd00:1122:3344:105::21 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: c1467d0e-b3de-4bd8-b36a-d8b36626badc.host (records: 1) - AAAA fd00:1122:3344:107::22 - name: c800ba17-240e-4b72-8ae6-afc30b6baa96.host (records: 1) - AAAA fd00:1122:3344:107::21 - name: d07a1fed-4235-4821-a1e5-f7eb2646ff33.host (records: 1) - AAAA fd00:1122:3344:104::24 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: db288a1e-c33c-44ca-8c79-9a8978afa34d.host (records: 1) - AAAA fd00:1122:3344:106::21 - name: dd66f033-4fe8-438e-afb4-29d3561d4c3e.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: e8fe709c-725f-4bb2-b714-ffcda13a9e54.host (records: 1) - AAAA fd00:1122:3344:104::21 - name: ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4.host (records: 1) - AAAA fd00:1122:3344:105::22 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 50 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -618,148 +491,11 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 33862f97-2897-4d53-a9a6-78a80f7eb13f.host (records: 1) - AAAA fd00:1122:3344:104::22 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3a7c2683-58bc-479c-9c16-2f9dfc102e29.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 43a0588f-5b57-469b-a173-db6cb6105e4c.host (records: 1) - AAAA fd00:1122:3344:104::23 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea.host (records: 1) - AAAA fd00:1122:3344:104::25 - name: 97753dbd-5a0f-4273-b1be-db6bb2b69381.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled (records: 1) - AAAA fd00:1122:3344:106::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - SRV port 17000 d07a1fed-4235-4821-a1e5-f7eb2646ff33.host.control-plane.oxide.internal - name: _crucible._tcp.3a7c2683-58bc-479c-9c16-2f9dfc102e29 (records: 1) - SRV port 32345 3a7c2683-58bc-479c-9c16-2f9dfc102e29.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea (records: 1) - SRV port 32345 8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.c1467d0e-b3de-4bd8-b36a-d8b36626badc (records: 1) - SRV port 32345 c1467d0e-b3de-4bd8-b36a-d8b36626badc.host.control-plane.oxide.internal - name: _crucible._tcp.ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4 (records: 1) - SRV port 32345 ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 43a0588f-5b57-469b-a173-db6cb6105e4c.host.control-plane.oxide.internal - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 6) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 b910534b-2a53-4335-a3d9-5311d2f3186a.host.control-plane.oxide.internal - SRV port 123 c800ba17-240e-4b72-8ae6-afc30b6baa96.host.control-plane.oxide.internal - SRV port 123 dd66f033-4fe8-438e-afb4-29d3561d4c3e.host.control-plane.oxide.internal - SRV port 123 e8fe709c-725f-4bb2-b714-ffcda13a9e54.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 97753dbd-5a0f-4273-b1be-db6bb2b69381.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 33862f97-2897-4d53-a9a6-78a80f7eb13f.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 7) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled.control-plane.oxide.internal - SRV port 12348 aff6c093-197d-42c5-ad80-9f10ba051a34.sled.control-plane.oxide.internal - SRV port 12348 b82ede02-399c-48c6-a1de-411df4fa49a7.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - SRV port 12348 e96e226f-4ed9-4c01-91b9-69a9cd076c9e.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: aff6c093-197d-42c5-ad80-9f10ba051a34.sled (records: 1) - AAAA fd00:1122:3344:104::1 - name: b82ede02-399c-48c6-a1de-411df4fa49a7.sled (records: 1) - AAAA fd00:1122:3344:105::1 - name: b910534b-2a53-4335-a3d9-5311d2f3186a.host (records: 1) - AAAA fd00:1122:3344:105::21 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: c1467d0e-b3de-4bd8-b36a-d8b36626badc.host (records: 1) - AAAA fd00:1122:3344:107::22 - name: c800ba17-240e-4b72-8ae6-afc30b6baa96.host (records: 1) - AAAA fd00:1122:3344:107::21 - name: d07a1fed-4235-4821-a1e5-f7eb2646ff33.host (records: 1) - AAAA fd00:1122:3344:104::24 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dd66f033-4fe8-438e-afb4-29d3561d4c3e.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: e8fe709c-725f-4bb2-b714-ffcda13a9e54.host (records: 1) - AAAA fd00:1122:3344:104::21 - name: e96e226f-4ed9-4c01-91b9-69a9cd076c9e.sled (records: 1) - AAAA fd00:1122:3344:107::1 - name: ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4.host (records: 1) - AAAA fd00:1122:3344:105::22 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 52 (records: 73) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout index 451abc426ae..06bc28648a5 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout @@ -457,139 +457,11 @@ to: blueprint cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 08514090-6acd-4177-90ea-f6deb220fd88.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2e1d5343-eaaf-42de-8e67-a67e54aea756.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: 31591457-5a3c-4356-9dd0-61a6cf67d14a.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 45b67899-01e0-43fd-8b92-46db32cd2189.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 4d162d2a-bb09-47a3-85cf-c956f25e0522.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 68b1e69a-aede-48fe-b6b1-51966ca07074.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 8e939783-d692-4c5e-b133-6d285450f855.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 8f14fca6-eadd-41ad-82c0-a896f779c17f.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 926701e8-01d7-4857-a898-720aab013bc7.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 9777ed35-6781-4129-8204-9bd58176d6cc.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host.control-plane.oxide.internal - SRV port 17000 8e939783-d692-4c5e-b133-6d285450f855.host.control-plane.oxide.internal - SRV port 17000 ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host.control-plane.oxide.internal - name: _crucible._tcp.08514090-6acd-4177-90ea-f6deb220fd88 (records: 1) - SRV port 32345 08514090-6acd-4177-90ea-f6deb220fd88.host.control-plane.oxide.internal - name: _crucible._tcp.11e5e2a1-9883-4abf-97ec-1d0ab8eebf66 (records: 1) - SRV port 32345 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host.control-plane.oxide.internal - name: _crucible._tcp.2e1d5343-eaaf-42de-8e67-a67e54aea756 (records: 1) - SRV port 32345 2e1d5343-eaaf-42de-8e67-a67e54aea756.host.control-plane.oxide.internal - name: _crucible._tcp.31591457-5a3c-4356-9dd0-61a6cf67d14a (records: 1) - SRV port 32345 31591457-5a3c-4356-9dd0-61a6cf67d14a.host.control-plane.oxide.internal - name: _crucible._tcp.851c5a9c-c055-4dd0-aaf7-5739134e85b1 (records: 1) - SRV port 32345 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host.control-plane.oxide.internal - name: _crucible._tcp.93bd73ae-fd4f-4e2c-a239-4ebdebb78533 (records: 1) - SRV port 32345 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host.control-plane.oxide.internal - name: _crucible._tcp.9777ed35-6781-4129-8204-9bd58176d6cc (records: 1) - SRV port 32345 9777ed35-6781-4129-8204-9bd58176d6cc.host.control-plane.oxide.internal - name: _crucible._tcp.a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63 (records: 1) - SRV port 32345 a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host.control-plane.oxide.internal - name: _crucible._tcp.f7977095-53d3-45a0-ae60-94172e514b2c (records: 1) - SRV port 32345 f7977095-53d3-45a0-ae60-94172e514b2c.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host.control-plane.oxide.internal - SRV port 5353 4d162d2a-bb09-47a3-85cf-c956f25e0522.host.control-plane.oxide.internal - SRV port 5353 a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host.control-plane.oxide.internal - SRV port 123 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host.control-plane.oxide.internal - SRV port 123 926701e8-01d7-4857-a898-720aab013bc7.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 8f14fca6-eadd-41ad-82c0-a896f779c17f.host.control-plane.oxide.internal - SRV port 5353 b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host.control-plane.oxide.internal - SRV port 5353 c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 68b1e69a-aede-48fe-b6b1-51966ca07074.host.control-plane.oxide.internal - SRV port 12221 a855bf91-f935-4d03-9f70-5620d31425bb.host.control-plane.oxide.internal - SRV port 12221 cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled.control-plane.oxide.internal - SRV port 12348 bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled.control-plane.oxide.internal - SRV port 12348 cc00b21a-5685-480a-ab5e-d2e29cf369df.sled.control-plane.oxide.internal - name: a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: a855bf91-f935-4d03-9f70-5620d31425bb.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: cc00b21a-5685-480a-ab5e-d2e29cf369df.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: f7977095-53d3-45a0-ae60-94172e514b2c.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -622,139 +494,11 @@ to: blueprint ad97e762-7bf1-45a6-a98f-60afb7e491c0 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 08514090-6acd-4177-90ea-f6deb220fd88.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2e1d5343-eaaf-42de-8e67-a67e54aea756.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: 31591457-5a3c-4356-9dd0-61a6cf67d14a.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 45b67899-01e0-43fd-8b92-46db32cd2189.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 4d162d2a-bb09-47a3-85cf-c956f25e0522.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 68b1e69a-aede-48fe-b6b1-51966ca07074.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 8e939783-d692-4c5e-b133-6d285450f855.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 8f14fca6-eadd-41ad-82c0-a896f779c17f.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 926701e8-01d7-4857-a898-720aab013bc7.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 9777ed35-6781-4129-8204-9bd58176d6cc.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host.control-plane.oxide.internal - SRV port 17000 8e939783-d692-4c5e-b133-6d285450f855.host.control-plane.oxide.internal - SRV port 17000 ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host.control-plane.oxide.internal - name: _crucible._tcp.08514090-6acd-4177-90ea-f6deb220fd88 (records: 1) - SRV port 32345 08514090-6acd-4177-90ea-f6deb220fd88.host.control-plane.oxide.internal - name: _crucible._tcp.11e5e2a1-9883-4abf-97ec-1d0ab8eebf66 (records: 1) - SRV port 32345 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host.control-plane.oxide.internal - name: _crucible._tcp.2e1d5343-eaaf-42de-8e67-a67e54aea756 (records: 1) - SRV port 32345 2e1d5343-eaaf-42de-8e67-a67e54aea756.host.control-plane.oxide.internal - name: _crucible._tcp.31591457-5a3c-4356-9dd0-61a6cf67d14a (records: 1) - SRV port 32345 31591457-5a3c-4356-9dd0-61a6cf67d14a.host.control-plane.oxide.internal - name: _crucible._tcp.851c5a9c-c055-4dd0-aaf7-5739134e85b1 (records: 1) - SRV port 32345 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host.control-plane.oxide.internal - name: _crucible._tcp.93bd73ae-fd4f-4e2c-a239-4ebdebb78533 (records: 1) - SRV port 32345 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host.control-plane.oxide.internal - name: _crucible._tcp.9777ed35-6781-4129-8204-9bd58176d6cc (records: 1) - SRV port 32345 9777ed35-6781-4129-8204-9bd58176d6cc.host.control-plane.oxide.internal - name: _crucible._tcp.a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63 (records: 1) - SRV port 32345 a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host.control-plane.oxide.internal - name: _crucible._tcp.f7977095-53d3-45a0-ae60-94172e514b2c (records: 1) - SRV port 32345 f7977095-53d3-45a0-ae60-94172e514b2c.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host.control-plane.oxide.internal - SRV port 5353 4d162d2a-bb09-47a3-85cf-c956f25e0522.host.control-plane.oxide.internal - SRV port 5353 a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host.control-plane.oxide.internal - SRV port 123 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host.control-plane.oxide.internal - SRV port 123 926701e8-01d7-4857-a898-720aab013bc7.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 8f14fca6-eadd-41ad-82c0-a896f779c17f.host.control-plane.oxide.internal - SRV port 5353 b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host.control-plane.oxide.internal - SRV port 5353 c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 68b1e69a-aede-48fe-b6b1-51966ca07074.host.control-plane.oxide.internal - SRV port 12221 a855bf91-f935-4d03-9f70-5620d31425bb.host.control-plane.oxide.internal - SRV port 12221 cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled.control-plane.oxide.internal - SRV port 12348 bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled.control-plane.oxide.internal - SRV port 12348 cc00b21a-5685-480a-ab5e-d2e29cf369df.sled.control-plane.oxide.internal - name: a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: a855bf91-f935-4d03-9f70-5620d31425bb.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: cc00b21a-5685-480a-ab5e-d2e29cf369df.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: f7977095-53d3-45a0-ae60-94172e514b2c.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1005,139 +749,11 @@ to: blueprint 5bf974f3-81f9-455b-b24e-3099f765664c internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 08514090-6acd-4177-90ea-f6deb220fd88.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2e1d5343-eaaf-42de-8e67-a67e54aea756.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: 31591457-5a3c-4356-9dd0-61a6cf67d14a.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 45b67899-01e0-43fd-8b92-46db32cd2189.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 4d162d2a-bb09-47a3-85cf-c956f25e0522.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 68b1e69a-aede-48fe-b6b1-51966ca07074.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 8e939783-d692-4c5e-b133-6d285450f855.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 8f14fca6-eadd-41ad-82c0-a896f779c17f.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 926701e8-01d7-4857-a898-720aab013bc7.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 9777ed35-6781-4129-8204-9bd58176d6cc.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host.control-plane.oxide.internal - SRV port 17000 8e939783-d692-4c5e-b133-6d285450f855.host.control-plane.oxide.internal - SRV port 17000 ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host.control-plane.oxide.internal - name: _crucible._tcp.08514090-6acd-4177-90ea-f6deb220fd88 (records: 1) - SRV port 32345 08514090-6acd-4177-90ea-f6deb220fd88.host.control-plane.oxide.internal - name: _crucible._tcp.11e5e2a1-9883-4abf-97ec-1d0ab8eebf66 (records: 1) - SRV port 32345 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host.control-plane.oxide.internal - name: _crucible._tcp.2e1d5343-eaaf-42de-8e67-a67e54aea756 (records: 1) - SRV port 32345 2e1d5343-eaaf-42de-8e67-a67e54aea756.host.control-plane.oxide.internal - name: _crucible._tcp.31591457-5a3c-4356-9dd0-61a6cf67d14a (records: 1) - SRV port 32345 31591457-5a3c-4356-9dd0-61a6cf67d14a.host.control-plane.oxide.internal - name: _crucible._tcp.851c5a9c-c055-4dd0-aaf7-5739134e85b1 (records: 1) - SRV port 32345 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host.control-plane.oxide.internal - name: _crucible._tcp.93bd73ae-fd4f-4e2c-a239-4ebdebb78533 (records: 1) - SRV port 32345 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host.control-plane.oxide.internal - name: _crucible._tcp.9777ed35-6781-4129-8204-9bd58176d6cc (records: 1) - SRV port 32345 9777ed35-6781-4129-8204-9bd58176d6cc.host.control-plane.oxide.internal - name: _crucible._tcp.a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63 (records: 1) - SRV port 32345 a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host.control-plane.oxide.internal - name: _crucible._tcp.f7977095-53d3-45a0-ae60-94172e514b2c (records: 1) - SRV port 32345 f7977095-53d3-45a0-ae60-94172e514b2c.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host.control-plane.oxide.internal - SRV port 5353 4d162d2a-bb09-47a3-85cf-c956f25e0522.host.control-plane.oxide.internal - SRV port 5353 a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host.control-plane.oxide.internal - SRV port 123 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host.control-plane.oxide.internal - SRV port 123 926701e8-01d7-4857-a898-720aab013bc7.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 8f14fca6-eadd-41ad-82c0-a896f779c17f.host.control-plane.oxide.internal - SRV port 5353 b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host.control-plane.oxide.internal - SRV port 5353 c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 68b1e69a-aede-48fe-b6b1-51966ca07074.host.control-plane.oxide.internal - SRV port 12221 a855bf91-f935-4d03-9f70-5620d31425bb.host.control-plane.oxide.internal - SRV port 12221 cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled.control-plane.oxide.internal - SRV port 12348 bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled.control-plane.oxide.internal - SRV port 12348 cc00b21a-5685-480a-ab5e-d2e29cf369df.sled.control-plane.oxide.internal - name: a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: a855bf91-f935-4d03-9f70-5620d31425bb.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: cc00b21a-5685-480a-ab5e-d2e29cf369df.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: f7977095-53d3-45a0-ae60-94172e514b2c.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1171,139 +787,11 @@ to: blueprint cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 08514090-6acd-4177-90ea-f6deb220fd88.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2e1d5343-eaaf-42de-8e67-a67e54aea756.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: 31591457-5a3c-4356-9dd0-61a6cf67d14a.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 45b67899-01e0-43fd-8b92-46db32cd2189.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 4d162d2a-bb09-47a3-85cf-c956f25e0522.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 68b1e69a-aede-48fe-b6b1-51966ca07074.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 8e939783-d692-4c5e-b133-6d285450f855.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 8f14fca6-eadd-41ad-82c0-a896f779c17f.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 926701e8-01d7-4857-a898-720aab013bc7.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 9777ed35-6781-4129-8204-9bd58176d6cc.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host.control-plane.oxide.internal - SRV port 17000 8e939783-d692-4c5e-b133-6d285450f855.host.control-plane.oxide.internal - SRV port 17000 ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host.control-plane.oxide.internal - name: _crucible._tcp.08514090-6acd-4177-90ea-f6deb220fd88 (records: 1) - SRV port 32345 08514090-6acd-4177-90ea-f6deb220fd88.host.control-plane.oxide.internal - name: _crucible._tcp.11e5e2a1-9883-4abf-97ec-1d0ab8eebf66 (records: 1) - SRV port 32345 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host.control-plane.oxide.internal - name: _crucible._tcp.2e1d5343-eaaf-42de-8e67-a67e54aea756 (records: 1) - SRV port 32345 2e1d5343-eaaf-42de-8e67-a67e54aea756.host.control-plane.oxide.internal - name: _crucible._tcp.31591457-5a3c-4356-9dd0-61a6cf67d14a (records: 1) - SRV port 32345 31591457-5a3c-4356-9dd0-61a6cf67d14a.host.control-plane.oxide.internal - name: _crucible._tcp.851c5a9c-c055-4dd0-aaf7-5739134e85b1 (records: 1) - SRV port 32345 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host.control-plane.oxide.internal - name: _crucible._tcp.93bd73ae-fd4f-4e2c-a239-4ebdebb78533 (records: 1) - SRV port 32345 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host.control-plane.oxide.internal - name: _crucible._tcp.9777ed35-6781-4129-8204-9bd58176d6cc (records: 1) - SRV port 32345 9777ed35-6781-4129-8204-9bd58176d6cc.host.control-plane.oxide.internal - name: _crucible._tcp.a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63 (records: 1) - SRV port 32345 a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host.control-plane.oxide.internal - name: _crucible._tcp.f7977095-53d3-45a0-ae60-94172e514b2c (records: 1) - SRV port 32345 f7977095-53d3-45a0-ae60-94172e514b2c.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host.control-plane.oxide.internal - SRV port 5353 4d162d2a-bb09-47a3-85cf-c956f25e0522.host.control-plane.oxide.internal - SRV port 5353 a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host.control-plane.oxide.internal - SRV port 123 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host.control-plane.oxide.internal - SRV port 123 926701e8-01d7-4857-a898-720aab013bc7.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 8f14fca6-eadd-41ad-82c0-a896f779c17f.host.control-plane.oxide.internal - SRV port 5353 b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host.control-plane.oxide.internal - SRV port 5353 c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 68b1e69a-aede-48fe-b6b1-51966ca07074.host.control-plane.oxide.internal - SRV port 12221 a855bf91-f935-4d03-9f70-5620d31425bb.host.control-plane.oxide.internal - SRV port 12221 cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled.control-plane.oxide.internal - SRV port 12348 bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled.control-plane.oxide.internal - SRV port 12348 cc00b21a-5685-480a-ab5e-d2e29cf369df.sled.control-plane.oxide.internal - name: a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: a855bf91-f935-4d03-9f70-5620d31425bb.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: cc00b21a-5685-480a-ab5e-d2e29cf369df.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: f7977095-53d3-45a0-ae60-94172e514b2c.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1556,139 +1044,11 @@ to: blueprint 1b837a27-3be1-4fcb-8499-a921c839e1d0 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 08514090-6acd-4177-90ea-f6deb220fd88.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2e1d5343-eaaf-42de-8e67-a67e54aea756.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: 31591457-5a3c-4356-9dd0-61a6cf67d14a.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 45b67899-01e0-43fd-8b92-46db32cd2189.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 4d162d2a-bb09-47a3-85cf-c956f25e0522.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 68b1e69a-aede-48fe-b6b1-51966ca07074.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 8e939783-d692-4c5e-b133-6d285450f855.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 8f14fca6-eadd-41ad-82c0-a896f779c17f.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 926701e8-01d7-4857-a898-720aab013bc7.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 9777ed35-6781-4129-8204-9bd58176d6cc.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host.control-plane.oxide.internal - SRV port 17000 8e939783-d692-4c5e-b133-6d285450f855.host.control-plane.oxide.internal - SRV port 17000 ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host.control-plane.oxide.internal - name: _crucible._tcp.08514090-6acd-4177-90ea-f6deb220fd88 (records: 1) - SRV port 32345 08514090-6acd-4177-90ea-f6deb220fd88.host.control-plane.oxide.internal - name: _crucible._tcp.11e5e2a1-9883-4abf-97ec-1d0ab8eebf66 (records: 1) - SRV port 32345 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host.control-plane.oxide.internal - name: _crucible._tcp.2e1d5343-eaaf-42de-8e67-a67e54aea756 (records: 1) - SRV port 32345 2e1d5343-eaaf-42de-8e67-a67e54aea756.host.control-plane.oxide.internal - name: _crucible._tcp.31591457-5a3c-4356-9dd0-61a6cf67d14a (records: 1) - SRV port 32345 31591457-5a3c-4356-9dd0-61a6cf67d14a.host.control-plane.oxide.internal - name: _crucible._tcp.851c5a9c-c055-4dd0-aaf7-5739134e85b1 (records: 1) - SRV port 32345 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host.control-plane.oxide.internal - name: _crucible._tcp.93bd73ae-fd4f-4e2c-a239-4ebdebb78533 (records: 1) - SRV port 32345 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host.control-plane.oxide.internal - name: _crucible._tcp.9777ed35-6781-4129-8204-9bd58176d6cc (records: 1) - SRV port 32345 9777ed35-6781-4129-8204-9bd58176d6cc.host.control-plane.oxide.internal - name: _crucible._tcp.a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63 (records: 1) - SRV port 32345 a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host.control-plane.oxide.internal - name: _crucible._tcp.f7977095-53d3-45a0-ae60-94172e514b2c (records: 1) - SRV port 32345 f7977095-53d3-45a0-ae60-94172e514b2c.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host.control-plane.oxide.internal - SRV port 5353 4d162d2a-bb09-47a3-85cf-c956f25e0522.host.control-plane.oxide.internal - SRV port 5353 a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host.control-plane.oxide.internal - SRV port 123 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host.control-plane.oxide.internal - SRV port 123 926701e8-01d7-4857-a898-720aab013bc7.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 8f14fca6-eadd-41ad-82c0-a896f779c17f.host.control-plane.oxide.internal - SRV port 5353 b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host.control-plane.oxide.internal - SRV port 5353 c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 68b1e69a-aede-48fe-b6b1-51966ca07074.host.control-plane.oxide.internal - SRV port 12221 a855bf91-f935-4d03-9f70-5620d31425bb.host.control-plane.oxide.internal - SRV port 12221 cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled.control-plane.oxide.internal - SRV port 12348 bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled.control-plane.oxide.internal - SRV port 12348 cc00b21a-5685-480a-ab5e-d2e29cf369df.sled.control-plane.oxide.internal - name: a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: a855bf91-f935-4d03-9f70-5620d31425bb.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: cc00b21a-5685-480a-ab5e-d2e29cf369df.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: f7977095-53d3-45a0-ae60-94172e514b2c.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1937,139 +1297,11 @@ to: blueprint 3682a71b-c6ca-4b7e-8f84-16df80c85960 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 08514090-6acd-4177-90ea-f6deb220fd88.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2e1d5343-eaaf-42de-8e67-a67e54aea756.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: 31591457-5a3c-4356-9dd0-61a6cf67d14a.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 45b67899-01e0-43fd-8b92-46db32cd2189.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 4d162d2a-bb09-47a3-85cf-c956f25e0522.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: 68b1e69a-aede-48fe-b6b1-51966ca07074.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: 8e939783-d692-4c5e-b133-6d285450f855.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 8f14fca6-eadd-41ad-82c0-a896f779c17f.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: 926701e8-01d7-4857-a898-720aab013bc7.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 9777ed35-6781-4129-8204-9bd58176d6cc.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 7ce8368d-16d2-4a6d-92be-cd9de74b25c4.host.control-plane.oxide.internal - SRV port 17000 8e939783-d692-4c5e-b133-6d285450f855.host.control-plane.oxide.internal - SRV port 17000 ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host.control-plane.oxide.internal - name: _crucible._tcp.08514090-6acd-4177-90ea-f6deb220fd88 (records: 1) - SRV port 32345 08514090-6acd-4177-90ea-f6deb220fd88.host.control-plane.oxide.internal - name: _crucible._tcp.11e5e2a1-9883-4abf-97ec-1d0ab8eebf66 (records: 1) - SRV port 32345 11e5e2a1-9883-4abf-97ec-1d0ab8eebf66.host.control-plane.oxide.internal - name: _crucible._tcp.2e1d5343-eaaf-42de-8e67-a67e54aea756 (records: 1) - SRV port 32345 2e1d5343-eaaf-42de-8e67-a67e54aea756.host.control-plane.oxide.internal - name: _crucible._tcp.31591457-5a3c-4356-9dd0-61a6cf67d14a (records: 1) - SRV port 32345 31591457-5a3c-4356-9dd0-61a6cf67d14a.host.control-plane.oxide.internal - name: _crucible._tcp.851c5a9c-c055-4dd0-aaf7-5739134e85b1 (records: 1) - SRV port 32345 851c5a9c-c055-4dd0-aaf7-5739134e85b1.host.control-plane.oxide.internal - name: _crucible._tcp.93bd73ae-fd4f-4e2c-a239-4ebdebb78533 (records: 1) - SRV port 32345 93bd73ae-fd4f-4e2c-a239-4ebdebb78533.host.control-plane.oxide.internal - name: _crucible._tcp.9777ed35-6781-4129-8204-9bd58176d6cc (records: 1) - SRV port 32345 9777ed35-6781-4129-8204-9bd58176d6cc.host.control-plane.oxide.internal - name: _crucible._tcp.a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63 (records: 1) - SRV port 32345 a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host.control-plane.oxide.internal - name: _crucible._tcp.f7977095-53d3-45a0-ae60-94172e514b2c (records: 1) - SRV port 32345 f7977095-53d3-45a0-ae60-94172e514b2c.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 3dcfe737-743a-4cdc-a9a4-664dc9ca7b9f.host.control-plane.oxide.internal - SRV port 5353 4d162d2a-bb09-47a3-85cf-c956f25e0522.host.control-plane.oxide.internal - SRV port 5353 a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 45d8d427-ca4d-4298-9bee-ad9741b90cb8.host.control-plane.oxide.internal - SRV port 123 620951c9-7ab4-4fea-a6f2-e8e73f1ffe01.host.control-plane.oxide.internal - SRV port 123 926701e8-01d7-4857-a898-720aab013bc7.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 8f14fca6-eadd-41ad-82c0-a896f779c17f.host.control-plane.oxide.internal - SRV port 5353 b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host.control-plane.oxide.internal - SRV port 5353 c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 68b1e69a-aede-48fe-b6b1-51966ca07074.host.control-plane.oxide.internal - SRV port 12221 a855bf91-f935-4d03-9f70-5620d31425bb.host.control-plane.oxide.internal - SRV port 12221 cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 45b67899-01e0-43fd-8b92-46db32cd2189.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled.control-plane.oxide.internal - SRV port 12348 bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled.control-plane.oxide.internal - SRV port 12348 cc00b21a-5685-480a-ab5e-d2e29cf369df.sled.control-plane.oxide.internal - name: a59aa5b4-f6dd-4a52-8cb1-0f65f6c92d63.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: a74bae55-d8ee-4dc1-819a-fe5251a5fdd9.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: a855bf91-f935-4d03-9f70-5620d31425bb.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: b44bc8cb-a1a7-499c-bc87-8c65cbde4a80.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: bb0ec23a-f97c-4b6a-a5bc-864b1ebc9236.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: bba6ea73-6c9c-4ab5-8bb4-1dd145071407.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: c6193e9d-6fcb-4b89-bf0a-65ebd8f5010e.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: cc00b21a-5685-480a-ab5e-d2e29cf369df.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: cc6cb457-ce9b-4885-b4af-b1fdb9ce3344.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: ed1b15aa-7373-45bc-b2a2-50f49b5b4ada.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: f7977095-53d3-45a0-ae60-94172e514b2c.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout index 01fb9438c11..e599ae2bdf7 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout @@ -405,32 +405,11 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 00320471-945d-413c-85e7-03e091a70b3c.sled (records: 1) - AAAA fd00:1122:3344:108::1 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled (records: 1) - AAAA fd00:1122:3344:106::1 - name: _repo-depot._tcp (records: 7) - SRV port 12348 00320471-945d-413c-85e7-03e091a70b3c.sled.control-plane.oxide.internal - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled.control-plane.oxide.internal - SRV port 12348 aff6c093-197d-42c5-ad80-9f10ba051a34.sled.control-plane.oxide.internal - SRV port 12348 b82ede02-399c-48c6-a1de-411df4fa49a7.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: aff6c093-197d-42c5-ad80-9f10ba051a34.sled (records: 1) - AAAA fd00:1122:3344:104::1 - name: b82ede02-399c-48c6-a1de-411df4fa49a7.sled (records: 1) - AAAA fd00:1122:3344:105::1 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 + unchanged names: 8 (records: 14) external DNS: DNS zone: "oxide.example" (unchanged) - name: example-silo.sys (records: 0) + unchanged names: 1 (records: 0) @@ -692,32 +671,11 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 00320471-945d-413c-85e7-03e091a70b3c.sled (records: 1) - AAAA fd00:1122:3344:108::1 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled (records: 1) - AAAA fd00:1122:3344:106::1 - name: _repo-depot._tcp (records: 7) - SRV port 12348 00320471-945d-413c-85e7-03e091a70b3c.sled.control-plane.oxide.internal - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled.control-plane.oxide.internal - SRV port 12348 aff6c093-197d-42c5-ad80-9f10ba051a34.sled.control-plane.oxide.internal - SRV port 12348 b82ede02-399c-48c6-a1de-411df4fa49a7.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: aff6c093-197d-42c5-ad80-9f10ba051a34.sled (records: 1) - AAAA fd00:1122:3344:104::1 - name: b82ede02-399c-48c6-a1de-411df4fa49a7.sled (records: 1) - AAAA fd00:1122:3344:105::1 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 + unchanged names: 8 (records: 14) external DNS: DNS zone: "oxide.example" (unchanged) - name: example-silo.sys (records: 0) + unchanged names: 1 (records: 0) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout index f704a5c0af3..14619df2366 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout @@ -337,103 +337,11 @@ to: blueprint f714e6ea-e85a-4d7d-93c2-a018744fe176 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 1f19f188-e062-489c-b318-070819d86d5a.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: 2f114b3c-adc9-4189-918e-f99984976121.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2fa580a0-db67-4581-9c53-24fd1f42540e.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 3d853da1-51a7-4a9b-9254-ba480e6037f2.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 417f0724-65fa-4819-9e3b-02beb942f2cd.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 46cb864e-d722-4805-8725-5ec3c7f4e7f0.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 80d4a70a-db63-412d-81d6-7f17c18895dd.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: 868d5b02-7792-4fc0-b6a9-654afcae9ea0.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 871381e9-f76c-4f63-af49-4cedcc5ff52d.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 87505ee8-022c-45fd-8a38-55ffd685bca6.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 3d853da1-51a7-4a9b-9254-ba480e6037f2.host.control-plane.oxide.internal - SRV port 17000 80d4a70a-db63-412d-81d6-7f17c18895dd.host.control-plane.oxide.internal - SRV port 17000 daca619d-d809-47d0-9957-d7ba9de2922e.host.control-plane.oxide.internal - name: _crucible._tcp.87505ee8-022c-45fd-8a38-55ffd685bca6 (records: 1) - SRV port 32345 87505ee8-022c-45fd-8a38-55ffd685bca6.host.control-plane.oxide.internal - name: _crucible._tcp.b8766bf4-a8cb-4851-b7a1-199b8eb5da11 (records: 1) - SRV port 32345 b8766bf4-a8cb-4851-b7a1-199b8eb5da11.host.control-plane.oxide.internal - name: _crucible._tcp.d4c24690-28b6-4926-b003-48f01a7a8bdd (records: 1) - SRV port 32345 d4c24690-28b6-4926-b003-48f01a7a8bdd.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 2f114b3c-adc9-4189-918e-f99984976121.host.control-plane.oxide.internal - SRV port 5353 a399d697-11cd-47cc-ac25-60ac6a07083d.host.control-plane.oxide.internal - SRV port 5353 b6dcd340-c890-44a7-a6e7-3023f7182e1c.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 1) - SRV port 123 46cb864e-d722-4805-8725-5ec3c7f4e7f0.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 1f19f188-e062-489c-b318-070819d86d5a.host.control-plane.oxide.internal - SRV port 5353 2fa580a0-db67-4581-9c53-24fd1f42540e.host.control-plane.oxide.internal - SRV port 5353 fc6956b2-56a8-4337-a93a-4d5a5d36f123.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 417f0724-65fa-4819-9e3b-02beb942f2cd.host.control-plane.oxide.internal - SRV port 12221 871381e9-f76c-4f63-af49-4cedcc5ff52d.host.control-plane.oxide.internal - SRV port 12221 a24a9100-c1f6-4478-9412-4b7a6649bd7c.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 1) - SRV port 12348 868d5b02-7792-4fc0-b6a9-654afcae9ea0.sled.control-plane.oxide.internal - name: a24a9100-c1f6-4478-9412-4b7a6649bd7c.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: a399d697-11cd-47cc-ac25-60ac6a07083d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: b6dcd340-c890-44a7-a6e7-3023f7182e1c.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: b8766bf4-a8cb-4851-b7a1-199b8eb5da11.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: d4c24690-28b6-4926-b003-48f01a7a8bdd.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: daca619d-d809-47d0-9957-d7ba9de2922e.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: fc6956b2-56a8-4337-a93a-4d5a5d36f123.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 35 (records: 45) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.3 - A 192.0.2.4 - A 192.0.2.2 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -659,103 +567,11 @@ to: blueprint d9c572a1-a68c-4945-b1ec-5389bd588fe9 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 1f19f188-e062-489c-b318-070819d86d5a.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: 2f114b3c-adc9-4189-918e-f99984976121.host (records: 1) - AAAA fd00:1122:3344:101::28 - name: 2fa580a0-db67-4581-9c53-24fd1f42540e.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 3d853da1-51a7-4a9b-9254-ba480e6037f2.host (records: 1) - AAAA fd00:1122:3344:101::29 - name: 417f0724-65fa-4819-9e3b-02beb942f2cd.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 46cb864e-d722-4805-8725-5ec3c7f4e7f0.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 80d4a70a-db63-412d-81d6-7f17c18895dd.host (records: 1) - AAAA fd00:1122:3344:101::2a - name: 868d5b02-7792-4fc0-b6a9-654afcae9ea0.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 871381e9-f76c-4f63-af49-4cedcc5ff52d.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: 87505ee8-022c-45fd-8a38-55ffd685bca6.host (records: 1) - AAAA fd00:1122:3344:101::2d - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 3d853da1-51a7-4a9b-9254-ba480e6037f2.host.control-plane.oxide.internal - SRV port 17000 80d4a70a-db63-412d-81d6-7f17c18895dd.host.control-plane.oxide.internal - SRV port 17000 daca619d-d809-47d0-9957-d7ba9de2922e.host.control-plane.oxide.internal - name: _crucible._tcp.87505ee8-022c-45fd-8a38-55ffd685bca6 (records: 1) - SRV port 32345 87505ee8-022c-45fd-8a38-55ffd685bca6.host.control-plane.oxide.internal - name: _crucible._tcp.b8766bf4-a8cb-4851-b7a1-199b8eb5da11 (records: 1) - SRV port 32345 b8766bf4-a8cb-4851-b7a1-199b8eb5da11.host.control-plane.oxide.internal - name: _crucible._tcp.d4c24690-28b6-4926-b003-48f01a7a8bdd (records: 1) - SRV port 32345 d4c24690-28b6-4926-b003-48f01a7a8bdd.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 2f114b3c-adc9-4189-918e-f99984976121.host.control-plane.oxide.internal - SRV port 5353 a399d697-11cd-47cc-ac25-60ac6a07083d.host.control-plane.oxide.internal - SRV port 5353 b6dcd340-c890-44a7-a6e7-3023f7182e1c.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 1) - SRV port 123 46cb864e-d722-4805-8725-5ec3c7f4e7f0.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 1f19f188-e062-489c-b318-070819d86d5a.host.control-plane.oxide.internal - SRV port 5353 2fa580a0-db67-4581-9c53-24fd1f42540e.host.control-plane.oxide.internal - SRV port 5353 fc6956b2-56a8-4337-a93a-4d5a5d36f123.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 417f0724-65fa-4819-9e3b-02beb942f2cd.host.control-plane.oxide.internal - SRV port 12221 871381e9-f76c-4f63-af49-4cedcc5ff52d.host.control-plane.oxide.internal - SRV port 12221 a24a9100-c1f6-4478-9412-4b7a6649bd7c.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 7cbd5731-4f6a-4d13-9248-2ec7483d3c37.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 1) - SRV port 12348 868d5b02-7792-4fc0-b6a9-654afcae9ea0.sled.control-plane.oxide.internal - name: a24a9100-c1f6-4478-9412-4b7a6649bd7c.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: a399d697-11cd-47cc-ac25-60ac6a07083d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: b6dcd340-c890-44a7-a6e7-3023f7182e1c.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: b8766bf4-a8cb-4851-b7a1-199b8eb5da11.host (records: 1) - AAAA fd00:1122:3344:101::2e - name: d4c24690-28b6-4926-b003-48f01a7a8bdd.host (records: 1) - AAAA fd00:1122:3344:101::2c - name: daca619d-d809-47d0-9957-d7ba9de2922e.host (records: 1) - AAAA fd00:1122:3344:101::2b - name: fc6956b2-56a8-4337-a93a-4d5a5d36f123.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 35 (records: 45) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.3 - A 192.0.2.4 - A 192.0.2.2 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 40014409995..dd49f0c9f0b 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -252,139 +252,11 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -432,139 +304,11 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -631,139 +375,11 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -830,139 +446,11 @@ to: blueprint df06bb57-ad42-4431-9206-abff322896c7 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1091,139 +579,11 @@ to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1270,139 +630,11 @@ to: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1459,139 +691,11 @@ to: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1647,139 +751,11 @@ to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -1846,139 +822,11 @@ to: blueprint 626487fa-7139-45ec-8416-902271fc730b internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -2047,139 +895,11 @@ to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -2243,139 +963,11 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -2443,139 +1035,11 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -2639,139 +1103,11 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -2838,139 +1174,11 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -3099,139 +1307,11 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -3299,139 +1379,11 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -3501,139 +1453,11 @@ to: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -3696,139 +1520,11 @@ to: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -3895,139 +1591,11 @@ to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -4096,139 +1664,11 @@ to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -4294,139 +1734,11 @@ to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -4490,139 +1802,11 @@ to: blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -4746,139 +1930,11 @@ to: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) @@ -5009,139 +2065,11 @@ to: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) - AAAA fd00:1122:3344:101::27 - name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) - AAAA fd00:1122:3344:101::22 - name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) - AAAA fd00:1122:3344:102::1 - name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) - AAAA fd00:1122:3344:102::23 - name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) - AAAA fd00:1122:3344:103::22 - name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) - AAAA fd00:1122:3344:2::1 - name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) - AAAA fd00:1122:3344:102::22 - name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) - AAAA fd00:1122:3344:101::25 - name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) - AAAA fd00:1122:3344:102::21 - name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) - AAAA fd00:1122:3344:101::21 - name: 694bd14f-cb24-4be4-bb19-876e79cda2c8.host (records: 1) - AAAA fd00:1122:3344:103::26 - name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) - AAAA fd00:1122:3344:102::24 - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 - name: 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host (records: 1) - AAAA fd00:1122:3344:103::27 - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 - name: 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host (records: 1) - AAAA fd00:1122:3344:102::28 - name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) - AAAA fd00:1122:3344:101::1 - name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) - AAAA fd00:1122:3344:1::1 - name: @ (records: 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal - NS ns3.control-plane.oxide.internal - name: _clickhouse-admin-single-server._tcp (records: 1) - SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse-native._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _clickhouse._tcp (records: 1) - SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _crucible-pantry._tcp (records: 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: _crucible._tcp.058fd5f9-60a8-4e11-9302-15172782e17d (records: 1) - SRV port 32345 058fd5f9-60a8-4e11-9302-15172782e17d.host.control-plane.oxide.internal - name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) - SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - name: _crucible._tcp.7c252b64-c5af-4ec1-989e-9a03f3b0f111 (records: 1) - SRV port 32345 7c252b64-c5af-4ec1-989e-9a03f3b0f111.host.control-plane.oxide.internal - name: _crucible._tcp.86a22a56-0168-453d-9df1-cb2a7c64b5d3 (records: 1) - SRV port 32345 86a22a56-0168-453d-9df1-cb2a7c64b5d3.host.control-plane.oxide.internal - name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) - SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal - name: _crucible._tcp.dfac80b4-a887-430a-ae87-a4e065dba787 (records: 1) - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: _crucible._tcp.e2fdefe7-95b2-4fd2-ae37-56929a06d58c (records: 1) - SRV port 32345 e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host.control-plane.oxide.internal - name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) - SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal - name: _external-dns._tcp (records: 3) - SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - name: _internal-ntp._tcp (records: 3) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - name: _nameservice._tcp (records: 3) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - name: _oximeter-reader._tcp (records: 1) - SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal - name: _repo-depot._tcp (records: 3) - SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal - SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal - SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) - AAAA fd00:1122:3344:102::26 - name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) - AAAA fd00:1122:3344:103::1 - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - name: e2fdefe7-95b2-4fd2-ae37-56929a06d58c.host (records: 1) - AAAA fd00:1122:3344:102::27 - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) - AAAA fd00:1122:3344:103::25 - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - name: ns1 (records: 1) - AAAA fd00:1122:3344:1::1 - name: ns2 (records: 1) - AAAA fd00:1122:3344:2::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 + unchanged names: 51 (records: 65) external DNS: DNS zone: "oxide.example" (unchanged) - name: @ (records: 3) - NS ns1.oxide.example - NS ns2.oxide.example - NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 - name: ns1 (records: 1) - A 198.51.100.1 - name: ns2 (records: 1) - A 198.51.100.2 - name: ns3 (records: 1) - A 198.51.100.3 + unchanged names: 5 (records: 9) diff --git a/internal-dns/types/src/diff.rs b/internal-dns/types/src/diff.rs index f262330c3cc..b45081f3e67 100644 --- a/internal-dns/types/src/diff.rs +++ b/internal-dns/types/src/diff.rs @@ -160,6 +160,8 @@ impl std::fmt::Display for DnsDiff<'_> { Ok(()) }; + let mut num_names_unchanged = 0; + let mut num_records_unchanged = 0; for name_diff in self.iter_names() { match name_diff { NameDiff::Added(name, records) => { @@ -180,14 +182,9 @@ impl std::fmt::Display for DnsDiff<'_> { )?; print_records(f, "-", records)?; } - NameDiff::Unchanged(name, records) => { - writeln!( - f, - " name: {:50} (records: {})", - name, - records.len() - )?; - print_records(f, " ", records)?; + NameDiff::Unchanged(_name, records) => { + num_names_unchanged += 1; + num_records_unchanged += records.len(); } NameDiff::Changed(name, records1, records2) => { writeln!( @@ -202,6 +199,13 @@ impl std::fmt::Display for DnsDiff<'_> { } } } + if num_names_unchanged > 0 { + writeln!( + f, + " unchanged names: {num_names_unchanged} \ + (records: {num_records_unchanged})" + )?; + } Ok(()) } diff --git a/internal-dns/types/tests/output/diff_example_empty.out b/internal-dns/types/tests/output/diff_example_empty.out index 1e3ba76bc92..3016b049505 100644 --- a/internal-dns/types/tests/output/diff_example_empty.out +++ b/internal-dns/types/tests/output/diff_example_empty.out @@ -1,5 +1,2 @@ DNS zone: "dummy" (unchanged) - name: ex1 (records: 1) - A 127.0.0.1 - name: ex2 (records: 1) - A 192.168.1.3 + unchanged names: 2 (records: 2) From c2862a0cbf82ac8ce77e6bcf72694dc3d928d52a Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Thu, 28 Aug 2025 17:14:38 -0400 Subject: [PATCH 13/38] Store the silo admin group name (#8850) The silo admin group is a special group that is automatically created during silo creation where members are granted the silo admin role. This name however is not stored, but this is currently ok: any existing silo won't have the ability to delete groups, meaning that the automatically created admin group is there to stay and a group with a duplicate name cannot be created. Very soon there will be silos with provision types that _can_ delete groups, meaning this name has to be known in order for Nexus to create the appropriate policy when a group with a matching name is created again. It was a mistake not to store this parameter with the silo, so rectify that with this PR. --- nexus/db-model/src/schema_versions.rs | 3 ++- nexus/db-model/src/silo.rs | 18 ++++++++++++++++++ nexus/db-schema/src/schema.rs | 2 ++ nexus/types/src/external_api/views.rs | 4 ++++ openapi/nexus.json | 5 +++++ schema/crdb/dbinit.sql | 6 ++++-- .../crdb/store-silo-admin-group-name/up01.sql | 3 +++ 7 files changed, 38 insertions(+), 3 deletions(-) create mode 100644 schema/crdb/store-silo-admin-group-name/up01.sql diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index fd8fc89b3bb..80df59b44f4 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(183, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(184, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(184, "store-silo-admin-group-name"), KnownVersion::new(183, "add-ip-version-to-pools"), KnownVersion::new(182, "add-tuf-artifact-board"), KnownVersion::new(181, "rename-nat-table"), diff --git a/nexus/db-model/src/silo.rs b/nexus/db-model/src/silo.rs index 63573eb4f76..2060ef4f2fa 100644 --- a/nexus/db-model/src/silo.rs +++ b/nexus/db-model/src/silo.rs @@ -100,6 +100,22 @@ pub struct Silo { /// child resource generation number, per RFD 192 pub rcgen: Generation, + + /// Store a group name that will be + /// + /// 1) automatically created (depending on the provision type of this silo) + /// at silo create time. + /// 2) assigned a policy granting members the silo admin role + /// + /// Prior to this column existing, for api_only and jit provision types, + /// Nexus would create this group, and create a policy where users of the + /// group would have the silo admin role. It wouldn't store this information + /// though as groups cannot be deleted with those provision types. + /// + /// For provision types that can both create and delete groups, it's + /// important to store this name so that when groups are created the same + /// automatic policy can be created as well. + pub admin_group_name: Option, } /// Form of mapped fleet roles used when serializing to the database @@ -180,6 +196,7 @@ impl Silo { .into(), rcgen: Generation::new(), mapped_fleet_roles, + admin_group_name: params.admin_group_name, }) } @@ -225,6 +242,7 @@ impl TryFrom for views::Silo { discoverable: silo.discoverable, identity_mode, mapped_fleet_roles, + admin_group_name: silo.admin_group_name, }) } } diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 7d4daff9d2d..28a168d6f76 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -751,6 +751,8 @@ table! { mapped_fleet_roles -> Jsonb, rcgen -> Int8, + + admin_group_name -> Nullable, } } diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index b406c9299f6..7f10955f96f 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -58,6 +58,10 @@ pub struct Silo { /// unless there's a corresponding entry in this map. pub mapped_fleet_roles: BTreeMap>, + + /// Optionally, silos can have a group name that is automatically granted + /// the silo admin role. + pub admin_group_name: Option, } /// A collection of resource counts used to describe capacity and utilization diff --git a/openapi/nexus.json b/openapi/nexus.json index 1a5ef03fbfe..8933823f9e4 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -23566,6 +23566,11 @@ "description": "View of a Silo\n\nA Silo is the highest level unit of isolation.", "type": "object", "properties": { + "admin_group_name": { + "nullable": true, + "description": "Optionally, silos can have a group name that is automatically granted the silo admin role.", + "type": "string" + }, "description": { "description": "human-readable free-form text about a resource", "type": "string" diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 4ef07d9dbdf..e6e2949c08b 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -875,7 +875,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.silo ( mapped_fleet_roles JSONB NOT NULL, /* child resource generation number, per RFD 192 */ - rcgen INT NOT NULL + rcgen INT NOT NULL, + + admin_group_name TEXT ); CREATE UNIQUE INDEX IF NOT EXISTS lookup_silo_by_name ON omicron.public.silo ( @@ -6562,7 +6564,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '183.0.0', NULL) + (TRUE, NOW(), NOW(), '184.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/store-silo-admin-group-name/up01.sql b/schema/crdb/store-silo-admin-group-name/up01.sql new file mode 100644 index 00000000000..4b663529876 --- /dev/null +++ b/schema/crdb/store-silo-admin-group-name/up01.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.silo +ADD COLUMN IF NOT EXISTS +admin_group_name TEXT DEFAULT NULL From 33ad127e7ea82d07e3c9ebb9f4fdb45dc50ffe71 Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Thu, 28 Aug 2025 18:17:33 -0400 Subject: [PATCH 14/38] Use typed UUIDs for silo user and group (#8803) This commit changes SiloUser and SiloGroup to use typed UUIDs. The biggest reason that this couldn't happen without a bunch of work was the `lookup_resource` macro: for a resource like SshKey, it looked like ``` lookup_resource! { name = "SshKey", ancestors = [ "Silo", "SiloUser" ], lookup_by_name = true, soft_deletes = true, primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] } ``` One of the methods that the `lookup_resource` macro generates will create a lookup for ancestors of the resource, and this was one using the type returned from the corresponding authz resource's `id` method: ``` quote! { .filter(dsl::#parent_id.eq(#parent_authz_name.id())) }, ``` Changing SiloUser to use a typed UUID in the authz resource: ``` authz_resource! { name = "SiloUser", parent = "Silo", primary_key = { uuid_kind = SiloUserKind }, <-- here roles_allowed = false, polar_snippet = Custom, } ``` and changing the `SiloUser` db model to use `DbTypedUuid` meant that a call to `to_db_typed_uuid` was required. The lookup_resource macro has no type information from the string "SiloUser", so this PR adds a check: if the ancestor string is suffixed with a '*', then the lookup_resource macro should assume that the `parent_id` is a typed UUID, and generate the call to `to_db_typed_uuid`. Most of the work after that was mechanical, changing Uuid to their typed equivalent, changing method argument types, etc etc. Some other related things made it into this PR: - UserBuiltIn now also uses a typed UUID as well, distinguishing them from silo users - Actor no longer has the `actor_id` method, instead requiring call sites to check which variant of Actor is being used - AuthenticatedActor stores the full Actor instead of only the actor id, leading to typed comparisons in its oso::PolarClass impl - User and Group path params are now typed --- Cargo.lock | 2 + clients/oxide-client/Cargo.toml | 1 + nexus/auth/src/authn/external/mod.rs | 6 +- .../auth/src/authn/external/session_cookie.rs | 14 +-- nexus/auth/src/authn/external/spoof.rs | 16 ++-- nexus/auth/src/authn/mod.rs | 42 +++++---- nexus/auth/src/authz/actor.rs | 44 +++++---- nexus/auth/src/authz/api_resources.rs | 6 +- nexus/auth/src/authz/roles.rs | 10 +- nexus/auth/src/context.rs | 24 +++-- nexus/db-fixed-data/Cargo.toml | 1 + nexus/db-fixed-data/src/lib.rs | 12 +++ nexus/db-fixed-data/src/role_assignment.rs | 13 +-- nexus/db-fixed-data/src/silo_user.rs | 12 +-- nexus/db-fixed-data/src/user_builtin.rs | 22 +++-- nexus/db-lookup/src/lookup.rs | 43 +++++---- nexus/db-macros/src/lookup.rs | 64 ++++++++++--- nexus/db-model/src/audit_log.rs | 33 +++++-- nexus/db-model/src/console_session.rs | 27 ++++-- nexus/db-model/src/device_auth.rs | 14 ++- nexus/db-model/src/role_assignment.rs | 35 +++++++ nexus/db-model/src/silo_group.rs | 23 ++++- nexus/db-model/src/silo_user.rs | 8 +- nexus/db-model/src/silo_user_password_hash.rs | 19 +++- nexus/db-model/src/ssh_key.rs | 22 ++++- nexus/db-model/src/user_builtin.rs | 5 +- .../src/db/datastore/console_session.rs | 7 +- .../src/db/datastore/device_auth.rs | 31 ++++++- .../src/db/datastore/identity_provider.rs | 46 +++++----- nexus/db-queries/src/db/datastore/mod.rs | 30 +++--- nexus/db-queries/src/db/datastore/rack.rs | 15 ++- nexus/db-queries/src/db/datastore/silo.rs | 48 +++++----- .../db-queries/src/db/datastore/silo_group.rs | 35 +++++-- .../db-queries/src/db/datastore/silo_user.rs | 56 +++++++++--- nexus/db-queries/src/db/datastore/ssh_key.rs | 13 ++- nexus/db-queries/src/policy_test/mod.rs | 18 ++-- .../src/policy_test/resource_builder.rs | 17 ++-- nexus/db-queries/src/policy_test/resources.rs | 6 +- nexus/src/app/device_auth.rs | 5 +- nexus/src/app/iam.rs | 10 +- nexus/src/app/instance.rs | 2 +- nexus/src/app/sagas/instance_create.rs | 3 +- nexus/src/app/session.rs | 5 +- nexus/src/app/silo.rs | 30 +++--- nexus/src/app/ssh_key.rs | 8 +- nexus/src/context.rs | 3 +- nexus/src/external_api/http_entrypoints.rs | 91 +++++++++++++++---- nexus/test-utils/src/http_testing.rs | 3 +- nexus/test-utils/src/resource_helpers.rs | 11 +-- nexus/tests/integration_tests/authn_http.rs | 31 ++++--- nexus/tests/integration_tests/authz.rs | 5 +- nexus/tests/integration_tests/certificates.rs | 72 +++++++++------ nexus/tests/integration_tests/device_auth.rs | 5 +- .../integration_tests/role_assignments.rs | 9 +- nexus/tests/integration_tests/saml.rs | 5 +- nexus/tests/integration_tests/silo_users.rs | 2 +- nexus/tests/integration_tests/silos.rs | 17 ++-- .../tests/integration_tests/users_builtin.rs | 19 ++-- nexus/types/src/external_api/params.rs | 22 +++-- nexus/types/src/external_api/shared.rs | 27 ++++++ nexus/types/src/external_api/views.rs | 30 ++++-- uuid-kinds/src/lib.rs | 3 + 62 files changed, 839 insertions(+), 419 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c6883e8aa4..8114da6b563 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6358,6 +6358,7 @@ dependencies = [ "nexus-types", "omicron-common", "omicron-rpaths", + "omicron-uuid-kinds", "omicron-workspace-hack", "pq-sys", "strum 0.27.2", @@ -8925,6 +8926,7 @@ dependencies = [ "hickory-resolver 0.25.2", "http", "hyper", + "omicron-uuid-kinds", "omicron-workspace-hack", "progenitor 0.10.0", "progenitor-client 0.10.0", diff --git a/clients/oxide-client/Cargo.toml b/clients/oxide-client/Cargo.toml index 8133a978023..0b85129ed1e 100644 --- a/clients/oxide-client/Cargo.toml +++ b/clients/oxide-client/Cargo.toml @@ -26,3 +26,4 @@ thiserror.workspace = true tokio = { workspace = true, features = [ "net" ] } uuid.workspace = true omicron-workspace-hack.workspace = true +omicron-uuid-kinds.workspace = true diff --git a/nexus/auth/src/authn/external/mod.rs b/nexus/auth/src/authn/external/mod.rs index c1ec0b50a96..f420b690673 100644 --- a/nexus/auth/src/authn/external/mod.rs +++ b/nexus/auth/src/authn/external/mod.rs @@ -10,6 +10,7 @@ use crate::authn; use crate::probes; use async_trait::async_trait; use authn::Reason; +use omicron_uuid_kinds::SiloUserUuid; use slog::trace; use std::borrow::Borrow; use uuid::Uuid; @@ -153,7 +154,10 @@ pub enum SchemeResult { /// A context that can look up a Silo user's Silo. #[async_trait] pub trait SiloUserSilo { - async fn silo_user_silo(&self, silo_user_id: Uuid) -> Result; + async fn silo_user_silo( + &self, + silo_user_id: SiloUserUuid, + ) -> Result; } #[cfg(test)] diff --git a/nexus/auth/src/authn/external/session_cookie.rs b/nexus/auth/src/authn/external/session_cookie.rs index 94918776927..d4b3b560983 100644 --- a/nexus/auth/src/authn/external/session_cookie.rs +++ b/nexus/auth/src/authn/external/session_cookie.rs @@ -14,6 +14,7 @@ use dropshot::HttpError; use http::HeaderValue; use nexus_types::authn::cookies::parse_cookies; use omicron_uuid_kinds::ConsoleSessionUuid; +use omicron_uuid_kinds::SiloUserUuid; use slog::debug; use uuid::Uuid; @@ -22,7 +23,7 @@ use uuid::Uuid; pub trait Session { fn id(&self) -> ConsoleSessionUuid; - fn silo_user_id(&self) -> Uuid; + fn silo_user_id(&self) -> SiloUserUuid; fn silo_id(&self) -> Uuid; fn time_last_used(&self) -> DateTime; fn time_created(&self) -> DateTime; @@ -202,6 +203,7 @@ mod test { use chrono::{DateTime, Duration, Utc}; use http; use omicron_uuid_kinds::ConsoleSessionUuid; + use omicron_uuid_kinds::SiloUserUuid; use slog; use std::sync::Mutex; use uuid::Uuid; @@ -216,7 +218,7 @@ mod test { struct FakeSession { id: ConsoleSessionUuid, token: String, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, silo_id: Uuid, time_created: DateTime, time_last_used: DateTime, @@ -226,7 +228,7 @@ mod test { fn id(&self) -> ConsoleSessionUuid { self.id } - fn silo_user_id(&self) -> Uuid { + fn silo_user_id(&self) -> SiloUserUuid { self.silo_user_id } fn silo_id(&self) -> Uuid { @@ -331,7 +333,7 @@ mod test { sessions: Mutex::new(vec![FakeSession { id: ConsoleSessionUuid::new_v4(), token: "abc".to_string(), - silo_user_id: Uuid::new_v4(), + silo_user_id: SiloUserUuid::new_v4(), silo_id: Uuid::new_v4(), time_last_used: Utc::now() - Duration::hours(2), time_created: Utc::now() - Duration::hours(2), @@ -357,7 +359,7 @@ mod test { sessions: Mutex::new(vec![FakeSession { id: ConsoleSessionUuid::new_v4(), token: "abc".to_string(), - silo_user_id: Uuid::new_v4(), + silo_user_id: SiloUserUuid::new_v4(), silo_id: Uuid::new_v4(), time_last_used: Utc::now(), time_created: Utc::now() - Duration::hours(20), @@ -384,7 +386,7 @@ mod test { sessions: Mutex::new(vec![FakeSession { id: ConsoleSessionUuid::new_v4(), token: "abc".to_string(), - silo_user_id: Uuid::new_v4(), + silo_user_id: SiloUserUuid::new_v4(), silo_id: Uuid::new_v4(), time_last_used, time_created: Utc::now(), diff --git a/nexus/auth/src/authn/external/spoof.rs b/nexus/auth/src/authn/external/spoof.rs index 7aba530e63c..8e68691c266 100644 --- a/nexus/auth/src/authn/external/spoof.rs +++ b/nexus/auth/src/authn/external/spoof.rs @@ -18,8 +18,9 @@ use anyhow::anyhow; use async_trait::async_trait; use headers::HeaderMapExt; use headers::authorization::{Authorization, Bearer}; +use omicron_uuid_kinds::SiloUserUuid; use slog::debug; -use uuid::Uuid; +use std::str::FromStr; // This scheme is intended for demos, development, and testing until we have a // more automatable identity provider that can be used for those purposes. @@ -118,7 +119,7 @@ where fn authn_spoof_parse_id( raw_value: Option<&Authorization>, -) -> Result, Reason> { +) -> Result, Reason> { let token = match raw_value { None => return Ok(None), Some(bearer) => bearer.token(), @@ -142,7 +143,7 @@ fn authn_spoof_parse_id( }); } - Uuid::parse_str(str_value) + SiloUserUuid::from_str(str_value) .context("parsing header value as UUID") .map(|silo_user_id| Some(silo_user_id)) .map_err(|source| Reason::BadFormat { source }) @@ -150,7 +151,7 @@ fn authn_spoof_parse_id( /// Returns a value of the `Authorization` header for this actor that will be /// accepted using this scheme -pub fn make_header_value(id: Uuid) -> Authorization { +pub fn make_header_value(id: T) -> Authorization { make_header_value_str(&id.to_string()).unwrap() } @@ -193,6 +194,7 @@ mod test { use headers::HeaderMapExt; use headers::authorization::Bearer; use headers::authorization::Credentials; + use omicron_uuid_kinds::SiloUserUuid; use uuid::Uuid; #[test] @@ -243,14 +245,14 @@ mod test { #[test] fn test_spoof_header_valid() { let test_uuid_str = "37b56e4f-8c60-453b-a37e-99be6efe8a89"; - let test_uuid = test_uuid_str.parse::().unwrap(); + let test_uuid = test_uuid_str.parse::().unwrap(); let test_header = make_header_value(test_uuid); // Success case: the client provided a valid uuid in the header. let success_case = authn_spoof_parse_id(Some(&test_header)); match success_case { - Ok(Some(actor_id)) => { - assert_eq!(actor_id, test_uuid); + Ok(Some(silo_user_id)) => { + assert_eq!(silo_user_id, test_uuid); } _ => { assert!(false); diff --git a/nexus/auth/src/authn/mod.rs b/nexus/auth/src/authn/mod.rs index c79ea352ef7..a2ef8e968ce 100644 --- a/nexus/auth/src/authn/mod.rs +++ b/nexus/auth/src/authn/mod.rs @@ -44,6 +44,8 @@ use nexus_types::external_api::shared::FleetRole; use nexus_types::external_api::shared::SiloRole; use nexus_types::identity::Asset; use omicron_common::api::external::LookupType; +use omicron_uuid_kinds::BuiltInUserUuid; +use omicron_uuid_kinds::SiloUserUuid; use serde::Deserialize; use serde::Serialize; use std::collections::BTreeMap; @@ -197,7 +199,7 @@ impl Context { Context::context_for_builtin_user(USER_SERVICE_BALANCER.id) } - fn context_for_builtin_user(user_builtin_id: Uuid) -> Context { + fn context_for_builtin_user(user_builtin_id: BuiltInUserUuid) -> Context { Context { kind: Kind::Authenticated( Details { actor: Actor::UserBuiltin { user_builtin_id } }, @@ -239,7 +241,7 @@ impl Context { /// Returns an authenticated context for the specific Silo user. Not marked /// as #[cfg(test)] so that this is available in integration tests. pub fn for_test_user( - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, silo_id: Uuid, silo_authn_policy: SiloAuthnPolicy, ) -> Context { @@ -311,35 +313,35 @@ mod test { // The privileges are (or will be) verified in authz tests. let authn = Context::privileged_test_user(); let actor = authn.actor().unwrap(); - assert_eq!(actor.actor_id(), USER_TEST_PRIVILEGED.id()); + assert_eq!(actor.silo_user_id(), Some(USER_TEST_PRIVILEGED.id())); let authn = Context::unprivileged_test_user(); let actor = authn.actor().unwrap(); - assert_eq!(actor.actor_id(), USER_TEST_UNPRIVILEGED.id()); + assert_eq!(actor.silo_user_id(), Some(USER_TEST_UNPRIVILEGED.id())); let authn = Context::internal_read(); let actor = authn.actor().unwrap(); - assert_eq!(actor.actor_id(), USER_INTERNAL_READ.id); + assert_eq!(actor.built_in_user_id(), Some(USER_INTERNAL_READ.id)); let authn = Context::external_authn(); let actor = authn.actor().unwrap(); - assert_eq!(actor.actor_id(), USER_EXTERNAL_AUTHN.id); + assert_eq!(actor.built_in_user_id(), Some(USER_EXTERNAL_AUTHN.id)); let authn = Context::internal_db_init(); let actor = authn.actor().unwrap(); - assert_eq!(actor.actor_id(), USER_DB_INIT.id); + assert_eq!(actor.built_in_user_id(), Some(USER_DB_INIT.id)); let authn = Context::internal_service_balancer(); let actor = authn.actor().unwrap(); - assert_eq!(actor.actor_id(), USER_SERVICE_BALANCER.id); + assert_eq!(actor.built_in_user_id(), Some(USER_SERVICE_BALANCER.id)); let authn = Context::internal_saga_recovery(); let actor = authn.actor().unwrap(); - assert_eq!(actor.actor_id(), USER_SAGA_RECOVERY.id); + assert_eq!(actor.built_in_user_id(), Some(USER_SAGA_RECOVERY.id)); let authn = Context::internal_api(); let actor = authn.actor().unwrap(); - assert_eq!(actor.actor_id(), USER_INTERNAL_API.id); + assert_eq!(actor.built_in_user_id(), Some(USER_INTERNAL_API.id)); } } @@ -366,18 +368,11 @@ pub struct Details { /// Who is performing an operation #[derive(Clone, Copy, Deserialize, Eq, PartialEq, Serialize)] pub enum Actor { - UserBuiltin { user_builtin_id: Uuid }, - SiloUser { silo_user_id: Uuid, silo_id: Uuid }, + UserBuiltin { user_builtin_id: BuiltInUserUuid }, + SiloUser { silo_user_id: SiloUserUuid, silo_id: Uuid }, } impl Actor { - pub fn actor_id(&self) -> Uuid { - match self { - Actor::UserBuiltin { user_builtin_id, .. } => *user_builtin_id, - Actor::SiloUser { silo_user_id, .. } => *silo_user_id, - } - } - pub fn silo_id(&self) -> Option { match self { Actor::UserBuiltin { .. } => None, @@ -385,12 +380,19 @@ impl Actor { } } - pub fn silo_user_id(&self) -> Option { + pub fn silo_user_id(&self) -> Option { match self { Actor::UserBuiltin { .. } => None, Actor::SiloUser { silo_user_id, .. } => Some(*silo_user_id), } } + + pub fn built_in_user_id(&self) -> Option { + match self { + Actor::UserBuiltin { user_builtin_id } => Some(*user_builtin_id), + Actor::SiloUser { .. } => None, + } + } } impl From<&Actor> for nexus_db_model::IdentityType { diff --git a/nexus/auth/src/authz/actor.rs b/nexus/auth/src/authz/actor.rs index f1ce2695ac5..26f7458b3b8 100644 --- a/nexus/auth/src/authz/actor.rs +++ b/nexus/auth/src/authz/actor.rs @@ -38,8 +38,7 @@ impl oso::PolarClass for AnyActor { }) .add_attribute_getter("authn_actor", |a: &AnyActor| { a.actor.map(|actor| AuthenticatedActor { - actor_id: actor.actor_id(), - silo_id: actor.silo_id(), + actor, roles: a.roles.clone(), silo_policy: a.silo_policy.clone(), }) @@ -50,8 +49,7 @@ impl oso::PolarClass for AnyActor { /// Represents an authenticated [`authn::Context`] for Polar #[derive(Clone, Debug)] pub struct AuthenticatedActor { - actor_id: Uuid, - silo_id: Option, + actor: authn::Actor, roles: RoleSet, silo_policy: Option, } @@ -94,7 +92,7 @@ impl AuthenticatedActor { impl PartialEq for AuthenticatedActor { fn eq(&self, other: &Self) -> bool { - self.actor_id == other.actor_id + self.actor == other.actor } } @@ -106,8 +104,9 @@ impl oso::PolarClass for AuthenticatedActor { .with_equality_check() .add_constant( AuthenticatedActor { - actor_id: authn::USER_DB_INIT.id, - silo_id: None, + actor: authn::Actor::UserBuiltin { + user_builtin_id: authn::USER_DB_INIT.id, + }, roles: RoleSet::new(), silo_policy: None, }, @@ -115,21 +114,26 @@ impl oso::PolarClass for AuthenticatedActor { ) .add_constant( AuthenticatedActor { - actor_id: authn::USER_INTERNAL_API.id, - silo_id: None, + actor: authn::Actor::UserBuiltin { + user_builtin_id: authn::USER_INTERNAL_API.id, + }, roles: RoleSet::new(), silo_policy: None, }, "USER_INTERNAL_API", ) .add_attribute_getter("silo", |a: &AuthenticatedActor| { - a.silo_id.map(|silo_id| { - super::Silo::new( - super::FLEET, - silo_id, - LookupType::ById(silo_id), - ) - }) + match a.actor { + authn::Actor::SiloUser { silo_id, .. } => { + Some(super::Silo::new( + super::FLEET, + silo_id, + LookupType::ById(silo_id), + )) + } + + authn::Actor::UserBuiltin { .. } => None, + } }) .add_method( "confers_fleet_role", @@ -139,7 +143,13 @@ impl oso::PolarClass for AuthenticatedActor { ) .add_method( "equals_silo_user", - |a: &AuthenticatedActor, u: SiloUser| a.actor_id == u.id(), + |a: &AuthenticatedActor, u: SiloUser| match a.actor { + authn::Actor::SiloUser { silo_user_id, .. } => { + silo_user_id == u.id() + } + + authn::Actor::UserBuiltin { .. } => false, + }, ) } } diff --git a/nexus/auth/src/authz/api_resources.rs b/nexus/auth/src/authz/api_resources.rs index b384d9f6be2..575b1459f6d 100644 --- a/nexus/auth/src/authz/api_resources.rs +++ b/nexus/auth/src/authz/api_resources.rs @@ -1228,7 +1228,7 @@ authz_resource! { authz_resource! { name = "UserBuiltin", parent = "Fleet", - primary_key = Uuid, + primary_key = { uuid_kind = BuiltInUserKind }, roles_allowed = false, polar_snippet = FleetChild, } @@ -1256,7 +1256,7 @@ impl ApiResourceWithRolesType for Silo { authz_resource! { name = "SiloUser", parent = "Silo", - primary_key = Uuid, + primary_key = { uuid_kind = SiloUserKind }, roles_allowed = false, polar_snippet = Custom, } @@ -1264,7 +1264,7 @@ authz_resource! { authz_resource! { name = "SiloGroup", parent = "Silo", - primary_key = Uuid, + primary_key = { uuid_kind = SiloGroupKind }, roles_allowed = false, polar_snippet = Custom, } diff --git a/nexus/auth/src/authz/roles.rs b/nexus/auth/src/authz/roles.rs index 0716e05bc71..114da7e3baf 100644 --- a/nexus/auth/src/authz/roles.rs +++ b/nexus/auth/src/authz/roles.rs @@ -39,6 +39,7 @@ use crate::authn; use crate::context::OpContext; use omicron_common::api::external::Error; use omicron_common::api::external::ResourceType; +use omicron_uuid_kinds::GenericUuid; use slog::trace; use std::collections::BTreeSet; use uuid::Uuid; @@ -160,7 +161,14 @@ async fn load_directly_attached_roles( .role_asgn_list_for( opctx, actor.into(), - actor.actor_id(), + match &actor { + authn::Actor::SiloUser { silo_user_id, .. } => { + silo_user_id.into_untyped_uuid() + } + authn::Actor::UserBuiltin { user_builtin_id, .. } => { + user_builtin_id.into_untyped_uuid() + } + }, resource_type, resource_id, ) diff --git a/nexus/auth/src/context.rs b/nexus/auth/src/context.rs index 53930a47b00..8f666cbb0e2 100644 --- a/nexus/auth/src/context.rs +++ b/nexus/auth/src/context.rs @@ -13,6 +13,7 @@ use crate::storage::Storage; use chrono::{DateTime, Utc}; use omicron_common::api::external::Error; use omicron_uuid_kinds::ConsoleSessionUuid; +use omicron_uuid_kinds::SiloUserUuid; use slog::debug; use slog::o; use slog::trace; @@ -127,14 +128,24 @@ impl OpContext { let mut metadata = BTreeMap::new(); let log = if let Some(actor) = authn.actor() { - let actor_id = actor.actor_id(); metadata .insert(String::from("authenticated"), String::from("true")); metadata.insert(String::from("actor"), format!("{:?}", actor)); - log.new( - o!("authenticated" => true, "actor_id" => actor_id.to_string()), - ) + match &actor { + authn::Actor::SiloUser { silo_user_id, silo_id } => { + log.new(o!( + "authenticated" => true, + "silo_user_id" => silo_user_id.to_string(), + "silo_id" => silo_id.to_string(), + )) + } + + authn::Actor::UserBuiltin { user_builtin_id } => log.new(o!( + "authenticated" => true, + "user_builtin_id" => user_builtin_id.to_string(), + )), + } } else { metadata .insert(String::from("authenticated"), String::from("false")); @@ -373,9 +384,8 @@ impl Session for ConsoleSessionWithSiloId { fn id(&self) -> ConsoleSessionUuid { self.console_session.id() } - - fn silo_user_id(&self) -> Uuid { - self.console_session.silo_user_id + fn silo_user_id(&self) -> SiloUserUuid { + self.console_session.silo_user_id() } fn silo_id(&self) -> Uuid { self.silo_id diff --git a/nexus/db-fixed-data/Cargo.toml b/nexus/db-fixed-data/Cargo.toml index a6ac298452c..c9d9c9c8517 100644 --- a/nexus/db-fixed-data/Cargo.toml +++ b/nexus/db-fixed-data/Cargo.toml @@ -21,3 +21,4 @@ nexus-db-model.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-workspace-hack.workspace = true +omicron-uuid-kinds.workspace = true diff --git a/nexus/db-fixed-data/src/lib.rs b/nexus/db-fixed-data/src/lib.rs index 4a42b696968..018050ab78c 100644 --- a/nexus/db-fixed-data/src/lib.rs +++ b/nexus/db-fixed-data/src/lib.rs @@ -66,6 +66,18 @@ fn assert_valid_uuid(id: &uuid::Uuid) { }; } +#[cfg(test)] +use omicron_uuid_kinds::GenericUuid; +#[cfg(test)] +use omicron_uuid_kinds::TypedUuid; +#[cfg(test)] +use omicron_uuid_kinds::TypedUuidKind; + +#[cfg(test)] +fn assert_valid_typed_uuid(id: &TypedUuid) { + assert_valid_uuid(&id.into_untyped_uuid()); +} + #[cfg(test)] mod test { use super::FLEET_ID; diff --git a/nexus/db-fixed-data/src/role_assignment.rs b/nexus/db-fixed-data/src/role_assignment.rs index 9b676adcf98..aae06d4b7ae 100644 --- a/nexus/db-fixed-data/src/role_assignment.rs +++ b/nexus/db-fixed-data/src/role_assignment.rs @@ -5,7 +5,6 @@ use super::FLEET_ID; use super::user_builtin; -use nexus_db_model::IdentityType; use nexus_db_model::RoleAssignment; use omicron_common::api::external::ResourceType; use std::sync::LazyLock; @@ -17,8 +16,7 @@ pub static BUILTIN_ROLE_ASSIGNMENTS: LazyLock> = // This is a pretty elevated privilege. // TODO-security We should scope this down (or, really, figure out a // better internal authn/authz story). - RoleAssignment::new( - IdentityType::UserBuiltin, + RoleAssignment::new_for_builtin_user( user_builtin::USER_INTERNAL_API.id, ResourceType::Fleet, *FLEET_ID, @@ -30,8 +28,7 @@ pub static BUILTIN_ROLE_ASSIGNMENTS: LazyLock> = // This is necessary as services exist as resources implied by // "FLEET" - if they ever become more fine-grained, this scope // could also become smaller. - RoleAssignment::new( - IdentityType::UserBuiltin, + RoleAssignment::new_for_builtin_user( user_builtin::USER_SERVICE_BALANCER.id, ResourceType::Fleet, *FLEET_ID, @@ -41,8 +38,7 @@ pub static BUILTIN_ROLE_ASSIGNMENTS: LazyLock> = // Fleet. This will grant them the ability to read various control // plane data (like the list of sleds), which is in turn used to // talk to sleds or allocate resources. - RoleAssignment::new( - IdentityType::UserBuiltin, + RoleAssignment::new_for_builtin_user( user_builtin::USER_INTERNAL_READ.id, ResourceType::Fleet, *FLEET_ID, @@ -51,8 +47,7 @@ pub static BUILTIN_ROLE_ASSIGNMENTS: LazyLock> = // The "external-authenticator" user gets the // "external-authenticator" role on the sole fleet. This grants // them the ability to create sessions. - RoleAssignment::new( - IdentityType::UserBuiltin, + RoleAssignment::new_for_builtin_user( user_builtin::USER_EXTERNAL_AUTHN.id, ResourceType::Fleet, *FLEET_ID, diff --git a/nexus/db-fixed-data/src/silo_user.rs b/nexus/db-fixed-data/src/silo_user.rs index 13e5680a19c..38252826f37 100644 --- a/nexus/db-fixed-data/src/silo_user.rs +++ b/nexus/db-fixed-data/src/silo_user.rs @@ -29,15 +29,13 @@ pub static ROLE_ASSIGNMENTS_PRIVILEGED: LazyLock> = vec![ // The "test-privileged" user gets the "admin" role on the sole // Fleet as well as the default Silo. - model::RoleAssignment::new( - model::IdentityType::SiloUser, + model::RoleAssignment::new_for_silo_user( USER_TEST_PRIVILEGED.id(), ResourceType::Fleet, *crate::FLEET_ID, "admin", ), - model::RoleAssignment::new( - model::IdentityType::SiloUser, + model::RoleAssignment::new_for_silo_user( USER_TEST_PRIVILEGED.id(), ResourceType::Silo, DEFAULT_SILO_ID, @@ -62,14 +60,14 @@ pub static USER_TEST_UNPRIVILEGED: LazyLock = #[cfg(test)] mod test { - use super::super::assert_valid_uuid; + use super::super::assert_valid_typed_uuid; use super::USER_TEST_PRIVILEGED; use super::USER_TEST_UNPRIVILEGED; use nexus_types::identity::Asset; #[test] fn test_silo_user_ids_are_valid() { - assert_valid_uuid(&USER_TEST_PRIVILEGED.id()); - assert_valid_uuid(&USER_TEST_UNPRIVILEGED.id()); + assert_valid_typed_uuid(&USER_TEST_PRIVILEGED.id()); + assert_valid_typed_uuid(&USER_TEST_UNPRIVILEGED.id()); } } diff --git a/nexus/db-fixed-data/src/user_builtin.rs b/nexus/db-fixed-data/src/user_builtin.rs index 08236af5db3..1194fe23a53 100644 --- a/nexus/db-fixed-data/src/user_builtin.rs +++ b/nexus/db-fixed-data/src/user_builtin.rs @@ -4,11 +4,11 @@ //! Built-in users use omicron_common::api; +use omicron_uuid_kinds::BuiltInUserUuid; use std::sync::LazyLock; -use uuid::Uuid; pub struct UserBuiltinConfig { - pub id: Uuid, + pub id: BuiltInUserUuid, pub name: api::external::Name, pub description: &'static str, } @@ -20,7 +20,9 @@ impl UserBuiltinConfig { description: &'static str, ) -> UserBuiltinConfig { UserBuiltinConfig { - id: id.parse().expect("invalid uuid for builtin user id"), + id: id + .parse() + .expect("invalid built-in user uuid for builtin user id"), name: name.parse().expect("invalid name for builtin user name"), description, } @@ -94,7 +96,7 @@ pub static USER_EXTERNAL_AUTHN: LazyLock = #[cfg(test)] mod test { - use super::super::assert_valid_uuid; + use super::super::assert_valid_typed_uuid; use super::USER_DB_INIT; use super::USER_EXTERNAL_AUTHN; use super::USER_INTERNAL_API; @@ -104,11 +106,11 @@ mod test { #[test] fn test_builtin_user_ids_are_valid() { - assert_valid_uuid(&USER_SERVICE_BALANCER.id); - assert_valid_uuid(&USER_DB_INIT.id); - assert_valid_uuid(&USER_INTERNAL_API.id); - assert_valid_uuid(&USER_EXTERNAL_AUTHN.id); - assert_valid_uuid(&USER_INTERNAL_READ.id); - assert_valid_uuid(&USER_SAGA_RECOVERY.id); + assert_valid_typed_uuid(&USER_SERVICE_BALANCER.id); + assert_valid_typed_uuid(&USER_DB_INIT.id); + assert_valid_typed_uuid(&USER_INTERNAL_API.id); + assert_valid_typed_uuid(&USER_EXTERNAL_AUTHN.id); + assert_valid_typed_uuid(&USER_INTERNAL_READ.id); + assert_valid_typed_uuid(&USER_SAGA_RECOVERY.id); } } diff --git a/nexus/db-lookup/src/lookup.rs b/nexus/db-lookup/src/lookup.rs index b326491ca0a..772f67e60a6 100644 --- a/nexus/db-lookup/src/lookup.rs +++ b/nexus/db-lookup/src/lookup.rs @@ -17,6 +17,7 @@ use async_bb8_diesel::AsyncRunQueryDsl; use db_macros::lookup_resource; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use ipnetwork::IpNetwork; +use nexus_auth::authn; use nexus_auth::authz; use nexus_auth::context::OpContext; use nexus_db_errors::{ErrorHandler, public_error_from_diesel}; @@ -26,17 +27,7 @@ use nexus_types::identity::Resource; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; use omicron_common::api::external::{LookupResult, LookupType, ResourceType}; -use omicron_uuid_kinds::AccessTokenKind; -use omicron_uuid_kinds::AlertReceiverUuid; -use omicron_uuid_kinds::AlertUuid; -use omicron_uuid_kinds::ConsoleSessionUuid; -use omicron_uuid_kinds::PhysicalDiskUuid; -use omicron_uuid_kinds::SupportBundleUuid; -use omicron_uuid_kinds::TufArtifactKind; -use omicron_uuid_kinds::TufRepoKind; -use omicron_uuid_kinds::TufTrustRootUuid; -use omicron_uuid_kinds::TypedUuid; -use omicron_uuid_kinds::WebhookSecretUuid; +use omicron_uuid_kinds::*; use slog::{error, trace}; use uuid::Uuid; @@ -257,12 +248,28 @@ impl<'a> LookupPath<'a> { } /// Select a resource of type SiloUser, identified by its id - pub fn silo_user_id(self, id: Uuid) -> SiloUser<'a> { + pub fn silo_user_id(self, id: SiloUserUuid) -> SiloUser<'a> { SiloUser::PrimaryKey(Root { lookup_root: self }, id) } + /// Select a resource of type SiloUser that matches an authenticated Actor + pub fn silo_user_actor( + self, + actor: &'a authn::Actor, + ) -> Result, Error> { + match actor { + authn::Actor::SiloUser { silo_user_id, .. } => Ok( + SiloUser::PrimaryKey(Root { lookup_root: self }, *silo_user_id), + ), + + authn::Actor::UserBuiltin { .. } => Err( + Error::non_resourcetype_not_found("could not find silo user"), + ), + } + } + /// Select a resource of type SiloGroup, identified by its id - pub fn silo_group_id(self, id: Uuid) -> SiloGroup<'a> { + pub fn silo_group_id(self, id: SiloGroupUuid) -> SiloGroup<'a> { SiloGroup::PrimaryKey(Root { lookup_root: self }, id) } @@ -368,8 +375,8 @@ impl<'a> LookupPath<'a> { TufArtifact::PrimaryKey(Root { lookup_root: self }, id) } - /// Select a resource of type UserBuiltin, identified by its `name` - pub fn user_builtin_id<'b>(self, id: Uuid) -> UserBuiltin<'b> + /// Select a resource of type UserBuiltin, identified by its `id` + pub fn user_builtin_id<'b>(self, id: BuiltInUserUuid) -> UserBuiltin<'b> where 'a: 'b, { @@ -532,7 +539,7 @@ lookup_resource! { ancestors = [ "Silo" ], lookup_by_name = false, soft_deletes = true, - primary_key_columns = [ { column_name = "id", rust_type = Uuid } ], + primary_key_columns = [ { column_name = "id", uuid_kind = SiloUserKind } ], visible_outside_silo = true } @@ -541,7 +548,7 @@ lookup_resource! { ancestors = [ "Silo" ], lookup_by_name = false, soft_deletes = true, - primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] + primary_key_columns = [ { column_name = "id", uuid_kind = SiloGroupKind } ] } lookup_resource! { @@ -841,7 +848,7 @@ lookup_resource! { ancestors = [], lookup_by_name = true, soft_deletes = false, - primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] + primary_key_columns = [ { column_name = "id", uuid_kind = BuiltInUserKind } ] } lookup_resource! { diff --git a/nexus/db-macros/src/lookup.rs b/nexus/db-macros/src/lookup.rs index 120a81949f2..def92875e18 100644 --- a/nexus/db-macros/src/lookup.rs +++ b/nexus/db-macros/src/lookup.rs @@ -33,6 +33,9 @@ pub struct Input { /// ordered list of resources that are ancestors of this resource, starting /// with the top of the hierarchy /// (e.g., for an Instance, this would be `[ "Silo", "Project" ]` + /// + /// if the resource name has a * as the last character, the primary key is a + /// typed uuid. ancestors: Vec, /// whether lookup by name is supported (usually within the parent collection) lookup_by_name: bool, @@ -138,22 +141,22 @@ impl Config { fn for_input(input: Input) -> syn::Result { let resource = Resource::for_name(&input.name); + let ancestors: Vec = + input.ancestors.iter().map(|s| Resource::for_name(s)).collect(); + let mut path_types: Vec<_> = - input.ancestors.iter().map(|a| format_ident!("{}", a)).collect(); + ancestors.iter().map(|r| format_ident!("{}", r.name)).collect(); path_types.push(resource.name.clone()); - let mut path_authz_names: Vec<_> = input - .ancestors + let mut path_authz_names: Vec<_> = ancestors .iter() - .map(|a| { - format_ident!("authz_{}", heck::AsSnakeCase(&a).to_string()) - }) + .map(|r| format_ident!("{}", r.authz_name)) .collect(); path_authz_names.push(resource.authz_name.clone()); - let parent = input.ancestors.last().map(|s| Resource::for_name(s)); + let parent = ancestors.last().cloned(); let silo_restricted = !input.visible_outside_silo - && input.ancestors.iter().any(|s| s == "Silo"); + && ancestors.iter().any(|r| r.name == "Silo"); let primary_key_columns: Vec<_> = input .primary_key_columns @@ -176,6 +179,7 @@ impl Config { /// Information about a resource (either the one we're generating or an /// ancestor in its path) +#[derive(Clone)] struct Resource { /// PascalCase resource name itself (e.g., `Project`) /// @@ -185,14 +189,24 @@ struct Resource { name_as_snake: String, /// identifier for an authz object for this resource (e.g., `authz_project`) authz_name: syn::Ident, + /// the primary key uses a typed uuid + primary_key_is_typed_uuid: bool, } impl Resource { fn for_name(name: &str) -> Resource { + // In order to simply the lookup_resource macro invocation, match on the + // resource name here to determine if the associated database column is + // a typed id or not + let primary_key_is_typed_uuid = match name { + "SiloUser" => true, + _ => false, + }; + let name_as_snake = heck::AsSnakeCase(&name).to_string(); let name = format_ident!("{}", name); let authz_name = format_ident!("authz_{}", name_as_snake); - Resource { name, authz_name, name_as_snake } + Resource { name, authz_name, name_as_snake, primary_key_is_typed_uuid } } } @@ -784,13 +798,33 @@ fn generate_database_functions(config: &Config) -> TokenStream { ( quote! { #parent_authz_name: &authz::#parent_resource_name, }, quote! { #parent_authz_name, }, - quote! { - let (#(#ancestors_authz_names,)* _) = - #parent_resource_name::lookup_by_id_no_authz( - opctx, datastore, &db_row.#parent_id.into() - ).await?; + // If the parent's id is a typed uuid, there will be a method + // converting the db typed uuid into an "external" typed uuid. Use + // that for `lookup_by_id_no_authz`. + if p.primary_key_is_typed_uuid { + quote! { + let (#(#ancestors_authz_names,)* _) = + #parent_resource_name::lookup_by_id_no_authz( + opctx, datastore, &db_row.#parent_id() + ).await?; + } + } else { + quote! { + let (#(#ancestors_authz_names,)* _) = + #parent_resource_name::lookup_by_id_no_authz( + opctx, datastore, &db_row.#parent_id.into() + ).await?; + } + }, + // If the parent's id is a typed uuid, then the `to_db_typed_uuid` + // method is required to convert to the db typed uuid + if p.primary_key_is_typed_uuid { + quote! { .filter(dsl::#parent_id.eq( + ::nexus_db_model::to_db_typed_uuid(#parent_authz_name.id()) + )) } + } else { + quote! { .filter(dsl::#parent_id.eq(#parent_authz_name.id())) } }, - quote! { .filter(dsl::#parent_id.eq(#parent_authz_name.id())) }, quote! { #parent_authz_name }, ) } else { diff --git a/nexus/db-model/src/audit_log.rs b/nexus/db-model/src/audit_log.rs index 52a53429f16..6541f64516e 100644 --- a/nexus/db-model/src/audit_log.rs +++ b/nexus/db-model/src/audit_log.rs @@ -13,14 +13,17 @@ use ipnetwork::IpNetwork; use nexus_db_schema::schema::{audit_log, audit_log_complete}; use nexus_types::external_api::views; use omicron_common::api::external::Error; +use omicron_uuid_kinds::BuiltInUserUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SiloUserUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; /// Actor information for audit log initialization. Inspired by `authn::Actor` #[derive(Clone, Debug)] pub enum AuditLogActor { - UserBuiltin { user_builtin_id: Uuid }, - SiloUser { silo_user_id: Uuid, silo_id: Uuid }, + UserBuiltin { user_builtin_id: BuiltInUserUuid }, + SiloUser { silo_user_id: SiloUserUuid, silo_id: Uuid }, Unauthenticated, } @@ -126,12 +129,16 @@ impl From for AuditLogEntryInit { } = params; let (actor_id, actor_silo_id, actor_kind) = match actor { - AuditLogActor::UserBuiltin { user_builtin_id } => { - (Some(user_builtin_id), None, AuditLogActorKind::UserBuiltin) - } - AuditLogActor::SiloUser { silo_user_id, silo_id } => { - (Some(silo_user_id), Some(silo_id), AuditLogActorKind::SiloUser) - } + AuditLogActor::UserBuiltin { user_builtin_id } => ( + Some(user_builtin_id.into_untyped_uuid()), + None, + AuditLogActorKind::UserBuiltin, + ), + AuditLogActor::SiloUser { silo_user_id, silo_id } => ( + Some(silo_user_id.into_untyped_uuid()), + Some(silo_id), + AuditLogActorKind::SiloUser, + ), AuditLogActor::Unauthenticated => { (None, None, AuditLogActorKind::Unauthenticated) } @@ -274,7 +281,11 @@ impl TryFrom for views::AuditLogEntry { "UserBuiltin actor missing actor_id", ) })?; - views::AuditLogEntryActor::UserBuiltin { user_builtin_id } + views::AuditLogEntryActor::UserBuiltin { + user_builtin_id: BuiltInUserUuid::from_untyped_uuid( + user_builtin_id, + ), + } } AuditLogActorKind::SiloUser => { let silo_user_id = entry.actor_id.ok_or_else(|| { @@ -286,7 +297,9 @@ impl TryFrom for views::AuditLogEntry { ) })?; views::AuditLogEntryActor::SiloUser { - silo_user_id, + silo_user_id: SiloUserUuid::from_untyped_uuid( + silo_user_id, + ), silo_id, } } diff --git a/nexus/db-model/src/console_session.rs b/nexus/db-model/src/console_session.rs index 6332cb26638..db0ef14d7de 100644 --- a/nexus/db-model/src/console_session.rs +++ b/nexus/db-model/src/console_session.rs @@ -5,9 +5,11 @@ use chrono::{DateTime, Utc}; use nexus_db_schema::schema::console_session; use nexus_types::external_api::views; +use omicron_uuid_kinds::SiloUserKind; +use omicron_uuid_kinds::SiloUserUuid; use omicron_uuid_kinds::{ConsoleSessionKind, ConsoleSessionUuid, GenericUuid}; -use uuid::Uuid; +use crate::to_db_typed_uuid; use crate::typed_uuid::DbTypedUuid; // TODO: `struct SessionToken(String)` for session token @@ -19,24 +21,37 @@ pub struct ConsoleSession { pub token: String, pub time_created: DateTime, pub time_last_used: DateTime, - pub silo_user_id: Uuid, + silo_user_id: DbTypedUuid, } impl ConsoleSession { - pub fn new(token: String, silo_user_id: Uuid) -> Self { + pub fn new(token: String, silo_user_id: SiloUserUuid) -> Self { let now = Utc::now(); + Self::new_with_times(token, silo_user_id, now, now) + } + + pub fn new_with_times( + token: String, + silo_user_id: SiloUserUuid, + time_created: DateTime, + time_last_used: DateTime, + ) -> Self { Self { id: ConsoleSessionUuid::new_v4().into(), token, - silo_user_id, - time_last_used: now, - time_created: now, + silo_user_id: to_db_typed_uuid(silo_user_id), + time_created, + time_last_used, } } pub fn id(&self) -> ConsoleSessionUuid { self.id.0 } + + pub fn silo_user_id(&self) -> SiloUserUuid { + self.silo_user_id.into() + } } impl From for views::ConsoleSession { diff --git a/nexus/db-model/src/device_auth.rs b/nexus/db-model/src/device_auth.rs index 05db1d8afdb..7cca0adb0ae 100644 --- a/nexus/db-model/src/device_auth.rs +++ b/nexus/db-model/src/device_auth.rs @@ -11,7 +11,9 @@ use nexus_db_schema::schema::{device_access_token, device_auth_request}; use chrono::{DateTime, Duration, Utc}; use nexus_types::external_api::views; -use omicron_uuid_kinds::{AccessTokenKind, GenericUuid, TypedUuid}; +use omicron_uuid_kinds::{ + AccessTokenKind, GenericUuid, SiloUserKind, SiloUserUuid, TypedUuid, +}; use rand::{Rng, RngCore, SeedableRng, rngs::StdRng}; use std::num::NonZeroU32; use uuid::Uuid; @@ -135,7 +137,7 @@ pub struct DeviceAccessToken { pub token: String, pub client_id: Uuid, pub device_code: String, - pub silo_user_id: Uuid, + silo_user_id: DbTypedUuid, pub time_requested: DateTime, pub time_created: DateTime, pub time_expires: Option>, @@ -146,7 +148,7 @@ impl DeviceAccessToken { client_id: Uuid, device_code: String, time_requested: DateTime, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, time_expires: Option>, ) -> Self { let now = Utc::now(); @@ -158,7 +160,7 @@ impl DeviceAccessToken { token: generate_token(), client_id, device_code, - silo_user_id, + silo_user_id: silo_user_id.into(), time_requested, time_created: now, time_expires, @@ -173,6 +175,10 @@ impl DeviceAccessToken { self.time_expires = Some(time); self } + + pub fn silo_user_id(&self) -> SiloUserUuid { + self.silo_user_id.into() + } } impl From for views::DeviceAccessTokenGrant { diff --git a/nexus/db-model/src/role_assignment.rs b/nexus/db-model/src/role_assignment.rs index b733f2e8021..8407e3769a7 100644 --- a/nexus/db-model/src/role_assignment.rs +++ b/nexus/db-model/src/role_assignment.rs @@ -7,6 +7,9 @@ use anyhow::anyhow; use nexus_db_schema::schema::role_assignment; use nexus_types::external_api::shared; use omicron_common::api::external::Error; +use omicron_uuid_kinds::BuiltInUserUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SiloUserUuid; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -81,6 +84,38 @@ impl RoleAssignment { role_name: String::from(role_name), } } + + /// Creates a new database RoleAssignment object for a silo user + pub fn new_for_silo_user( + user_id: SiloUserUuid, + resource_type: omicron_common::api::external::ResourceType, + resource_id: Uuid, + role_name: &str, + ) -> Self { + Self::new( + IdentityType::SiloUser, + user_id.into_untyped_uuid(), + resource_type, + resource_id, + role_name, + ) + } + + /// Creates a new database RoleAssignment object for a built-in user + pub fn new_for_builtin_user( + user_id: BuiltInUserUuid, + resource_type: omicron_common::api::external::ResourceType, + resource_id: Uuid, + role_name: &str, + ) -> Self { + Self::new( + IdentityType::UserBuiltin, + user_id.into_untyped_uuid(), + resource_type, + resource_id, + role_name, + ) + } } impl TryFrom diff --git a/nexus/db-model/src/silo_group.rs b/nexus/db-model/src/silo_group.rs index 1f0ec68552d..384b74edf53 100644 --- a/nexus/db-model/src/silo_group.rs +++ b/nexus/db-model/src/silo_group.rs @@ -2,15 +2,22 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use crate::DbTypedUuid; +use crate::to_db_typed_uuid; use db_macros::Asset; use nexus_db_schema::schema::{silo_group, silo_group_membership}; use nexus_types::external_api::views; use nexus_types::identity::Asset; +use omicron_uuid_kinds::SiloGroupKind; +use omicron_uuid_kinds::SiloGroupUuid; +use omicron_uuid_kinds::SiloUserKind; +use omicron_uuid_kinds::SiloUserUuid; use uuid::Uuid; /// Describes a silo group within the database. #[derive(Asset, Queryable, Insertable, Debug, Selectable)] #[diesel(table_name = silo_group)] +#[asset(uuid_kind = SiloGroupKind)] pub struct SiloGroup { #[diesel(embed)] identity: SiloGroupIdentity, @@ -22,7 +29,7 @@ pub struct SiloGroup { } impl SiloGroup { - pub fn new(id: Uuid, silo_id: Uuid, external_id: String) -> Self { + pub fn new(id: SiloGroupUuid, silo_id: Uuid, external_id: String) -> Self { Self { identity: SiloGroupIdentity::new(id), silo_id, external_id } } } @@ -31,13 +38,19 @@ impl SiloGroup { #[derive(Queryable, Insertable, Debug, Selectable)] #[diesel(table_name = silo_group_membership)] pub struct SiloGroupMembership { - pub silo_group_id: Uuid, - pub silo_user_id: Uuid, + pub silo_group_id: DbTypedUuid, + pub silo_user_id: DbTypedUuid, } impl SiloGroupMembership { - pub fn new(silo_group_id: Uuid, silo_user_id: Uuid) -> Self { - Self { silo_group_id, silo_user_id } + pub fn new( + silo_group_id: SiloGroupUuid, + silo_user_id: SiloUserUuid, + ) -> Self { + Self { + silo_group_id: to_db_typed_uuid(silo_group_id), + silo_user_id: to_db_typed_uuid(silo_user_id), + } } } diff --git a/nexus/db-model/src/silo_user.rs b/nexus/db-model/src/silo_user.rs index b00ec9a3f53..bcf31aeff48 100644 --- a/nexus/db-model/src/silo_user.rs +++ b/nexus/db-model/src/silo_user.rs @@ -6,11 +6,13 @@ use db_macros::Asset; use nexus_db_schema::schema::silo_user; use nexus_types::external_api::views; use nexus_types::identity::Asset; +use omicron_uuid_kinds::SiloUserUuid; use uuid::Uuid; /// Describes a silo user within the database. #[derive(Asset, Queryable, Insertable, Debug, Selectable)] #[diesel(table_name = silo_user)] +#[asset(uuid_kind = SiloUserKind)] pub struct SiloUser { #[diesel(embed)] identity: SiloUserIdentity, @@ -23,7 +25,11 @@ pub struct SiloUser { } impl SiloUser { - pub fn new(silo_id: Uuid, user_id: Uuid, external_id: String) -> Self { + pub fn new( + silo_id: Uuid, + user_id: SiloUserUuid, + external_id: String, + ) -> Self { Self { identity: SiloUserIdentity::new(user_id), time_deleted: None, diff --git a/nexus/db-model/src/silo_user_password_hash.rs b/nexus/db-model/src/silo_user_password_hash.rs index 70edad3deeb..0d6b65fc632 100644 --- a/nexus/db-model/src/silo_user_password_hash.rs +++ b/nexus/db-model/src/silo_user_password_hash.rs @@ -2,14 +2,17 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use crate::DbTypedUuid; +use crate::to_db_typed_uuid; use diesel::backend::Backend; use diesel::deserialize::{self, FromSql}; use diesel::serialize::{self, ToSql}; use diesel::sql_types; use nexus_db_schema::schema::silo_user_password_hash; +use omicron_uuid_kinds::SiloUserKind; +use omicron_uuid_kinds::SiloUserUuid; use parse_display::Display; use ref_cast::RefCast; -use uuid::Uuid; /// Newtype wrapper around [`omicron_passwords::PasswordHashString`]. #[derive( @@ -58,14 +61,22 @@ where #[derive(Queryable, Insertable, Debug, Selectable)] #[diesel(table_name = silo_user_password_hash)] pub struct SiloUserPasswordHash { - pub silo_user_id: Uuid, + silo_user_id: DbTypedUuid, pub hash: PasswordHashString, pub time_created: chrono::DateTime, } impl SiloUserPasswordHash { - pub fn new(silo_user_id: Uuid, hash: PasswordHashString) -> Self { - Self { silo_user_id, hash, time_created: chrono::Utc::now() } + pub fn new(silo_user_id: SiloUserUuid, hash: PasswordHashString) -> Self { + Self { + silo_user_id: to_db_typed_uuid(silo_user_id), + hash, + time_created: chrono::Utc::now(), + } + } + + pub fn silo_user_id(&self) -> SiloUserUuid { + self.silo_user_id.into() } } diff --git a/nexus/db-model/src/ssh_key.rs b/nexus/db-model/src/ssh_key.rs index 44c228898e7..869d21fb8ec 100644 --- a/nexus/db-model/src/ssh_key.rs +++ b/nexus/db-model/src/ssh_key.rs @@ -8,8 +8,13 @@ use nexus_db_schema::schema::ssh_key; use nexus_types::external_api::params; use nexus_types::external_api::views; use nexus_types::identity::Resource; +use omicron_uuid_kinds::SiloUserKind; +use omicron_uuid_kinds::SiloUserUuid; use uuid::Uuid; +use crate::DbTypedUuid; +use crate::to_db_typed_uuid; + /// Describes a user's public SSH key within the database. #[derive(Clone, Debug, Insertable, Queryable, Resource, Selectable)] #[diesel(table_name = ssh_key)] @@ -17,33 +22,40 @@ pub struct SshKey { #[diesel(embed)] identity: SshKeyIdentity, - pub silo_user_id: Uuid, + silo_user_id: DbTypedUuid, pub public_key: String, } impl SshKey { - pub fn new(silo_user_id: Uuid, params: params::SshKeyCreate) -> Self { + pub fn new( + silo_user_id: SiloUserUuid, + params: params::SshKeyCreate, + ) -> Self { Self::new_with_id(Uuid::new_v4(), silo_user_id, params) } pub fn new_with_id( id: Uuid, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, params: params::SshKeyCreate, ) -> Self { Self { identity: SshKeyIdentity::new(id, params.identity), - silo_user_id, + silo_user_id: to_db_typed_uuid(silo_user_id), public_key: params.public_key, } } + + pub fn silo_user_id(&self) -> SiloUserUuid { + self.silo_user_id.into() + } } impl From for views::SshKey { fn from(ssh_key: SshKey) -> Self { Self { identity: ssh_key.identity(), - silo_user_id: ssh_key.silo_user_id, + silo_user_id: ssh_key.silo_user_id(), public_key: ssh_key.public_key, } } diff --git a/nexus/db-model/src/user_builtin.rs b/nexus/db-model/src/user_builtin.rs index f9c386f4dac..fc1e266719b 100644 --- a/nexus/db-model/src/user_builtin.rs +++ b/nexus/db-model/src/user_builtin.rs @@ -7,11 +7,12 @@ use nexus_db_schema::schema::user_builtin; use nexus_types::external_api::params; use nexus_types::external_api::views; use nexus_types::identity::Resource; -use uuid::Uuid; +use omicron_uuid_kinds::BuiltInUserUuid; /// Describes a built-in user, as stored in the database #[derive(Queryable, Insertable, Debug, Resource, Selectable)] #[diesel(table_name = user_builtin)] +#[resource(uuid_kind = BuiltInUserKind)] pub struct UserBuiltin { #[diesel(embed)] pub identity: UserBuiltinIdentity, @@ -19,7 +20,7 @@ pub struct UserBuiltin { impl UserBuiltin { /// Creates a new database UserBuiltin object. - pub fn new(id: Uuid, params: params::UserBuiltinCreate) -> Self { + pub fn new(id: BuiltInUserUuid, params: params::UserBuiltinCreate) -> Self { Self { identity: UserBuiltinIdentity::new(id, params.identity) } } } diff --git a/nexus/db-queries/src/db/datastore/console_session.rs b/nexus/db-queries/src/db/datastore/console_session.rs index 24598ab2140..99cd9d93b24 100644 --- a/nexus/db-queries/src/db/datastore/console_session.rs +++ b/nexus/db-queries/src/db/datastore/console_session.rs @@ -9,6 +9,7 @@ use crate::authn; use crate::authz; use crate::context::OpContext; use crate::db::model::ConsoleSession; +use crate::db::model::to_db_typed_uuid; use crate::db::pagination::paginated; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::TimeDelta; @@ -125,7 +126,7 @@ impl DataStore { })?; let (.., db_silo_user) = LookupPath::new(opctx, self) - .silo_user_id(console_session.silo_user_id) + .silo_user_id(console_session.silo_user_id()) .fetch() .await .map_err(|e| { @@ -186,7 +187,7 @@ impl DataStore { use nexus_db_schema::schema::console_session::dsl; paginated(dsl::console_session, dsl::id, &pagparams) - .filter(dsl::silo_user_id.eq(user_id)) + .filter(dsl::silo_user_id.eq(to_db_typed_uuid(user_id))) // session is not expired according to abs timeout .filter(dsl::time_created.ge(now - abs_ttl)) // session is also not expired according to idle timeout @@ -211,7 +212,7 @@ impl DataStore { use nexus_db_schema::schema::console_session; diesel::delete(console_session::table) - .filter(console_session::silo_user_id.eq(user_id)) + .filter(console_session::silo_user_id.eq(to_db_typed_uuid(user_id))) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) diff --git a/nexus/db-queries/src/db/datastore/device_auth.rs b/nexus/db-queries/src/db/datastore/device_auth.rs index 120f2891dc3..c49042c7f81 100644 --- a/nexus/db-queries/src/db/datastore/device_auth.rs +++ b/nexus/db-queries/src/db/datastore/device_auth.rs @@ -9,6 +9,7 @@ use crate::authz; use crate::context::OpContext; use crate::db::model::DeviceAccessToken; use crate::db::model::DeviceAuthRequest; +use crate::db::model::to_db_typed_uuid; use crate::db::pagination::paginated; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; @@ -92,7 +93,7 @@ impl DataStore { authz_user: &authz::SiloUser, access_token: DeviceAccessToken, ) -> CreateResult { - assert_eq!(authz_user.id(), access_token.silo_user_id); + assert_eq!(authz_user.id(), access_token.silo_user_id()); opctx.authorize(authz::Action::Delete, authz_request).await?; opctx.authorize(authz::Action::CreateChild, authz_user).await?; @@ -198,9 +199,18 @@ impl DataStore { .actor_required() .internal_context("listing current user's tokens")?; + let silo_user_id = match actor.silo_user_id() { + Some(silo_user_id) => silo_user_id, + None => { + return Err(Error::non_resourcetype_not_found( + "could not find silo user", + ))?; + } + }; + use nexus_db_schema::schema::device_access_token::dsl; paginated(dsl::device_access_token, dsl::id, &pagparams) - .filter(dsl::silo_user_id.eq(actor.actor_id())) + .filter(dsl::silo_user_id.eq(to_db_typed_uuid(silo_user_id))) // we don't have time_deleted on tokens. unfortunately this is not // indexed well. maybe it can be! .filter( @@ -227,7 +237,7 @@ impl DataStore { use nexus_db_schema::schema::device_access_token::dsl; paginated(dsl::device_access_token, dsl::id, &pagparams) - .filter(dsl::silo_user_id.eq(silo_user_id)) + .filter(dsl::silo_user_id.eq(to_db_typed_uuid(silo_user_id))) // we don't have time_deleted on tokens. unfortunately this is not // indexed well. maybe it can be! .filter( @@ -251,9 +261,18 @@ impl DataStore { .actor_required() .internal_context("deleting current user's token")?; + let silo_user_id = match actor.silo_user_id() { + Some(silo_user_id) => silo_user_id, + None => { + return Err(Error::non_resourcetype_not_found( + "could not find silo user", + ))?; + } + }; + use nexus_db_schema::schema::device_access_token::dsl; let num_deleted = diesel::delete(dsl::device_access_token) - .filter(dsl::silo_user_id.eq(actor.actor_id())) + .filter(dsl::silo_user_id.eq(to_db_typed_uuid(silo_user_id))) .filter(dsl::id.eq(token_id)) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -279,11 +298,13 @@ impl DataStore { // target user's own silo in particular opctx.authorize(authz::Action::Modify, authz_token_list).await?; + let silo_user_id = authz_token_list.silo_user().id(); + use nexus_db_schema::schema::device_access_token; diesel::delete(device_access_token::table) .filter( device_access_token::silo_user_id - .eq(authz_token_list.silo_user().id()), + .eq(to_db_typed_uuid(silo_user_id)), ) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await diff --git a/nexus/db-queries/src/db/datastore/identity_provider.rs b/nexus/db-queries/src/db/datastore/identity_provider.rs index cb5268f22f2..c87371eafb4 100644 --- a/nexus/db-queries/src/db/datastore/identity_provider.rs +++ b/nexus/db-queries/src/db/datastore/identity_provider.rs @@ -104,37 +104,41 @@ impl DataStore { &self, opctx: &OpContext, authz_idp_list: &authz::SiloIdentityProviderList, - provider: db::model::SamlIdentityProvider, + saml_provider: db::model::SamlIdentityProvider, ) -> CreateResult { opctx.authorize(authz::Action::CreateChild, authz_idp_list).await?; - assert_eq!(provider.silo_id, authz_idp_list.silo().id()); + assert_eq!(saml_provider.silo_id, authz_idp_list.silo().id()); - let name = provider.identity().name.to_string(); + let name = saml_provider.identity().name.to_string(); let conn = self.pool_connection_authorized(opctx).await?; + // Identity providers have two records, one generic, and one + // specialized. Create the generic one from the specialized one here. + let provider = db::model::IdentityProvider { + identity: db::model::IdentityProviderIdentity { + id: saml_provider.identity.id, + name: saml_provider.identity.name.clone(), + description: saml_provider.identity.description.clone(), + time_created: saml_provider.identity.time_created, + time_modified: saml_provider.identity.time_modified, + time_deleted: saml_provider.identity.time_deleted, + }, + silo_id: saml_provider.silo_id, + provider_type: db::model::IdentityProviderType::Saml, + }; + self.transaction_retry_wrapper("saml_identity_provider_create") .transaction(&conn, |conn| { + let saml_provider = saml_provider.clone(); let provider = provider.clone(); + async move { // insert silo identity provider record with type Saml - use nexus_db_schema::schema::identity_provider::dsl as idp_dsl; + use nexus_db_schema::schema::identity_provider::dsl as + idp_dsl; + diesel::insert_into(idp_dsl::identity_provider) - .values(db::model::IdentityProvider { - identity: db::model::IdentityProviderIdentity { - id: provider.identity.id, - name: provider.identity.name.clone(), - description: provider - .identity - .description - .clone(), - time_created: provider.identity.time_created, - time_modified: provider.identity.time_modified, - time_deleted: provider.identity.time_deleted, - }, - silo_id: provider.silo_id, - provider_type: - db::model::IdentityProviderType::Saml, - }) + .values(provider) .execute_async(&conn) .await?; @@ -142,7 +146,7 @@ impl DataStore { use nexus_db_schema::schema::saml_identity_provider::dsl; let result = diesel::insert_into(dsl::saml_identity_provider) - .values(provider) + .values(saml_provider) .returning( db::model::SamlIdentityProvider::as_returning(), ) diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 317680cbf7f..76862d92769 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -505,12 +505,13 @@ mod test { ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, }; use omicron_test_utils::dev; + use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::PhysicalDiskUuid; + use omicron_uuid_kinds::SiloUserUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::VolumeUuid; - use omicron_uuid_kinds::{CollectionUuid, TypedUuid}; use std::collections::HashMap; use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; @@ -595,15 +596,16 @@ mod test { ); let token = "a_token".to_string(); - let silo_user_id = Uuid::new_v4(); + let silo_user_id = SiloUserUuid::new_v4(); - let session = ConsoleSession { - id: TypedUuid::new_v4().into(), - token: token.clone(), - time_created: Utc::now() - Duration::minutes(5), - time_last_used: Utc::now() - Duration::minutes(5), + let both_times = Utc::now() - Duration::minutes(5); + + let session = ConsoleSession::new_with_times( + token.clone(), silo_user_id, - }; + both_times, + both_times, + ); let _ = datastore .session_create(&authn_opctx, session.clone()) @@ -629,7 +631,7 @@ mod test { .unwrap(); let (.., db_silo_user) = LookupPath::new(&opctx, datastore) - .silo_user_id(session.silo_user_id) + .silo_user_id(session.silo_user_id()) .fetch() .await .unwrap(); @@ -640,7 +642,7 @@ mod test { .session_lookup_by_token(&authn_opctx, token.clone()) .await .unwrap(); - assert_eq!(session.silo_user_id, fetched.silo_user_id); + assert_eq!(session.silo_user_id(), fetched.silo_user_id()); assert_eq!(session.id, fetched.id); // also try looking it up by ID @@ -649,7 +651,7 @@ mod test { .fetch() .await .unwrap(); - assert_eq!(session.silo_user_id, fetched.silo_user_id); + assert_eq!(session.silo_user_id(), fetched.silo_user_id()); assert_eq!(session.token, fetched.token); // trying to insert the same one again fails @@ -1754,7 +1756,7 @@ mod test { DEFAULT_SILO_ID, LookupType::ById(DEFAULT_SILO_ID), ); - let silo_user_id = Uuid::new_v4(); + let silo_user_id = SiloUserUuid::new_v4(); datastore .silo_user_create( &authz_silo, @@ -1791,7 +1793,7 @@ mod test { .ssh_key_create(&opctx, &authz_user, ssh_key.clone()) .await .unwrap(); - assert_eq!(created.silo_user_id, ssh_key.silo_user_id); + assert_eq!(created.silo_user_id(), ssh_key.silo_user_id()); assert_eq!(created.public_key, ssh_key.public_key); // Lookup the key we just created. @@ -1804,7 +1806,7 @@ mod test { .unwrap(); assert_eq!(authz_silo.id(), DEFAULT_SILO_ID); assert_eq!(authz_silo_user.id(), silo_user_id); - assert_eq!(found.silo_user_id, ssh_key.silo_user_id); + assert_eq!(found.silo_user_id(), ssh_key.silo_user_id()); assert_eq!(found.public_key, ssh_key.public_key); // Trying to insert the same one again fails. diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 70cf81c1bb6..a2fcb2c6c82 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -54,7 +54,6 @@ use nexus_types::deployment::OmicronZoneExternalIp; use nexus_types::deployment::blueprint_zone_type; use nexus_types::external_api::params as external_params; use nexus_types::external_api::shared; -use nexus_types::external_api::shared::IdentityType; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::SiloRole; use nexus_types::identity::Resource; @@ -70,6 +69,7 @@ use omicron_common::api::external::UpdateResult; use omicron_common::api::external::UserId; use omicron_common::bail_unless; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SiloUserUuid; use omicron_uuid_kinds::SledUuid; use slog_error_chain::InlineErrorChain; use std::sync::{Arc, OnceLock}; @@ -452,7 +452,7 @@ impl DataStore { info!(log, "Created recovery silo"); // Create the first user in the initial Recovery Silo - let silo_user_id = Uuid::new_v4(); + let silo_user_id = SiloUserUuid::new_v4(); let silo_user = SiloUser::new( db_silo.id(), silo_user_id, @@ -495,11 +495,10 @@ impl DataStore { let (q1, q2) = Self::role_assignment_replace_visible_queries( opctx, &authz_silo, - &[shared::RoleAssignment { - identity_type: IdentityType::SiloUser, - identity_id: silo_user_id, - role_name: SiloRole::Admin, - }], + &[shared::RoleAssignment::for_silo_user( + silo_user_id, + SiloRole::Admin, + )], ) .await .map_err(RackInitError::RoleAssignment)?; @@ -1239,7 +1238,7 @@ mod test { let authz_silo_user = authz::SiloUser::new( authz_silo, silo_users[0].id(), - LookupType::ById(silo_users[0].id()), + LookupType::by_id(silo_users[0].id()), ); let hash = datastore .silo_user_password_hash_fetch(&opctx, &authz_silo_user) diff --git a/nexus/db-queries/src/db/datastore/silo.rs b/nexus/db-queries/src/db/datastore/silo.rs index 327848a3c4c..a1babdd62f3 100644 --- a/nexus/db-queries/src/db/datastore/silo.rs +++ b/nexus/db-queries/src/db/datastore/silo.rs @@ -43,6 +43,7 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_uuid_kinds::SiloGroupUuid; use ref_cast::RefCast; use uuid::Uuid; @@ -177,7 +178,7 @@ impl DataStore { dns_update: DnsVersionUpdateBuilder, ) -> Result> { let silo_id = Uuid::new_v4(); - let silo_group_id = Uuid::new_v4(); + let silo_group_id = SiloGroupUuid::new_v4(); let silo_create_query = Self::silo_create_query( opctx, @@ -208,30 +209,31 @@ impl DataStore { None }; - let silo_admin_group_role_assignment_queries = - if new_silo_params.admin_group_name.is_some() { - // Grant silo admin role for members of the admin group. - let policy = shared::Policy { - role_assignments: vec![shared::RoleAssignment { - identity_type: shared::IdentityType::SiloGroup, - identity_id: silo_group_id, - role_name: SiloRole::Admin, - }], - }; - - let silo_admin_group_role_assignment_queries = - DataStore::role_assignment_replace_visible_queries( - opctx, - &authz_silo, - &policy.role_assignments, - ) - .await?; - - Some(silo_admin_group_role_assignment_queries) - } else { - None + let silo_admin_group_role_assignment_queries = if new_silo_params + .admin_group_name + .is_some() + { + // Grant silo admin role for members of the admin group. + let policy = shared::Policy { + role_assignments: vec![shared::RoleAssignment::for_silo_group( + silo_group_id, + SiloRole::Admin, + )], }; + let silo_admin_group_role_assignment_queries = + DataStore::role_assignment_replace_visible_queries( + opctx, + &authz_silo, + &policy.role_assignments, + ) + .await?; + + Some(silo_admin_group_role_assignment_queries) + } else { + None + }; + // This method uses nested transactions, which are not supported // with retryable transactions. let silo = self diff --git a/nexus/db-queries/src/db/datastore/silo_group.rs b/nexus/db-queries/src/db/datastore/silo_group.rs index e6cf139bb23..eb8f2438d35 100644 --- a/nexus/db-queries/src/db/datastore/silo_group.rs +++ b/nexus/db-queries/src/db/datastore/silo_group.rs @@ -12,6 +12,7 @@ use crate::db::IncompleteOnConflictExt; use crate::db::datastore::RunnableQueryNoReturn; use crate::db::model::SiloGroup; use crate::db::model::SiloGroupMembership; +use crate::db::model::to_db_typed_uuid; use crate::db::pagination::paginated; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; @@ -27,6 +28,8 @@ use omicron_common::api::external::InternalContext; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::SiloGroupUuid; +use omicron_uuid_kinds::SiloUserUuid; use uuid::Uuid; impl DataStore { @@ -90,13 +93,13 @@ impl DataStore { &self, opctx: &OpContext, authz_silo: &authz::Silo, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ) -> ListResultVec { opctx.authorize(authz::Action::ListChildren, authz_silo).await?; use nexus_db_schema::schema::silo_group_membership::dsl; dsl::silo_group_membership - .filter(dsl::silo_user_id.eq(silo_user_id)) + .filter(dsl::silo_user_id.eq(to_db_typed_uuid(silo_user_id))) .select(SiloGroupMembership::as_returning()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -116,12 +119,21 @@ impl DataStore { .actor_required() .internal_context("fetching current user's group memberships")?; + let silo_user_id = match actor.silo_user_id() { + Some(silo_user_id) => silo_user_id, + None => { + return Err(Error::non_resourcetype_not_found( + "could not find silo user", + ))?; + } + }; + use nexus_db_schema::schema::{ silo_group as sg, silo_group_membership as sgm, }; paginated(sg::dsl::silo_group, sg::id, pagparams) .inner_join(sgm::table.on(sgm::silo_group_id.eq(sg::id))) - .filter(sgm::silo_user_id.eq(actor.actor_id())) + .filter(sgm::silo_user_id.eq(to_db_typed_uuid(silo_user_id))) .filter(sg::time_deleted.is_null()) .select(SiloGroup::as_returning()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) @@ -142,7 +154,7 @@ impl DataStore { &self, opctx: &OpContext, authz_silo_user: &authz::SiloUser, - silo_group_ids: Vec, + silo_group_ids: Vec, ) -> UpdateResult<()> { opctx.authorize(authz::Action::Modify, authz_silo_user).await?; @@ -157,7 +169,10 @@ impl DataStore { // Delete existing memberships for user let silo_user_id = authz_silo_user.id(); diesel::delete(dsl::silo_group_membership) - .filter(dsl::silo_user_id.eq(silo_user_id)) + .filter( + dsl::silo_user_id + .eq(to_db_typed_uuid(silo_user_id)), + ) .execute_async(&conn) .await?; @@ -167,8 +182,8 @@ impl DataStore { > = silo_group_ids .iter() .map(|group_id| db::model::SiloGroupMembership { - silo_group_id: *group_id, - silo_user_id, + silo_group_id: to_db_typed_uuid(*group_id), + silo_user_id: to_db_typed_uuid(silo_user_id), }) .collect(); @@ -194,7 +209,7 @@ impl DataStore { #[derive(Debug, thiserror::Error)] enum SiloDeleteError { #[error("group {0} still has memberships")] - GroupStillHasMemberships(Uuid), + GroupStillHasMemberships(SiloGroupUuid), } type TxnError = TransactionError; @@ -211,7 +226,7 @@ impl DataStore { silo_group_membership::dsl::silo_group_membership .filter( silo_group_membership::dsl::silo_group_id - .eq(group_id), + .eq(to_db_typed_uuid(group_id)), ) .select(SiloGroupMembership::as_returning()) .limit(1) @@ -227,7 +242,7 @@ impl DataStore { // Delete silo group use nexus_db_schema::schema::silo_group::dsl; diesel::update(dsl::silo_group) - .filter(dsl::id.eq(group_id)) + .filter(dsl::id.eq(to_db_typed_uuid(group_id))) .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(Utc::now())) .execute_async(&conn) diff --git a/nexus/db-queries/src/db/datastore/silo_user.rs b/nexus/db-queries/src/db/datastore/silo_user.rs index 8b19c1886f8..0eb374af0bb 100644 --- a/nexus/db-queries/src/db/datastore/silo_user.rs +++ b/nexus/db-queries/src/db/datastore/silo_user.rs @@ -16,6 +16,7 @@ use crate::db::model::SiloUserPasswordHash; use crate::db::model::SiloUserPasswordUpdate; use crate::db::model::UserBuiltin; use crate::db::model::UserProvisionType; +use crate::db::model::to_db_typed_uuid; use crate::db::pagination::paginated; use crate::db::update_and_check::UpdateAndCheck; use async_bb8_diesel::AsyncRunQueryDsl; @@ -36,6 +37,7 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use omicron_common::bail_unless; +use omicron_uuid_kinds::GenericUuid; use uuid::Uuid; impl DataStore { @@ -71,7 +73,7 @@ impl DataStore { let authz_silo_user = authz::SiloUser::new( authz_silo.clone(), silo_user_id, - LookupType::ById(silo_user_id), + LookupType::by_id(silo_user_id), ); (authz_silo_user, db_silo_user) }) @@ -98,10 +100,14 @@ impl DataStore { { use nexus_db_schema::schema::silo_user::dsl; diesel::update(dsl::silo_user) - .filter(dsl::id.eq(authz_silo_user_id)) + .filter( + dsl::id.eq(to_db_typed_uuid(authz_silo_user_id)), + ) .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(Utc::now())) - .check_if_exists::(authz_silo_user_id) + .check_if_exists::( + authz_silo_user_id.into_untyped_uuid(), + ) .execute_and_check(&conn) .await?; } @@ -110,7 +116,10 @@ impl DataStore { { use nexus_db_schema::schema::console_session::dsl; diesel::delete(dsl::console_session) - .filter(dsl::silo_user_id.eq(authz_silo_user_id)) + .filter( + dsl::silo_user_id + .eq(to_db_typed_uuid(authz_silo_user_id)), + ) .execute_async(&conn) .await?; } @@ -119,7 +128,10 @@ impl DataStore { { use nexus_db_schema::schema::device_access_token::dsl; diesel::delete(dsl::device_access_token) - .filter(dsl::silo_user_id.eq(authz_silo_user_id)) + .filter( + dsl::silo_user_id + .eq(to_db_typed_uuid(authz_silo_user_id)), + ) .execute_async(&conn) .await?; } @@ -128,7 +140,10 @@ impl DataStore { { use nexus_db_schema::schema::silo_group_membership::dsl; diesel::delete(dsl::silo_group_membership) - .filter(dsl::silo_user_id.eq(authz_silo_user_id)) + .filter( + dsl::silo_user_id + .eq(to_db_typed_uuid(authz_silo_user_id)), + ) .execute_async(&conn) .await?; } @@ -137,7 +152,10 @@ impl DataStore { { use nexus_db_schema::schema::ssh_key::dsl; diesel::update(dsl::ssh_key) - .filter(dsl::silo_user_id.eq(authz_silo_user_id)) + .filter( + dsl::silo_user_id + .eq(to_db_typed_uuid(authz_silo_user_id)), + ) .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(Utc::now())) .execute_async(&conn) @@ -233,11 +251,14 @@ impl DataStore { paginated(user::table, user::id, pagparams) .filter(user::silo_id.eq(authz_silo_user_list.silo().id())) .filter(user::time_deleted.is_null()) - .inner_join(user_to_group::table.on( - user_to_group::silo_user_id.eq(user::id).and( - user_to_group::silo_group_id.eq(authz_silo_group.id()), - ), - )) + .inner_join( + user_to_group::table.on(user_to_group::silo_user_id + .eq(user::id) + .and( + user_to_group::silo_group_id + .eq(to_db_typed_uuid(authz_silo_group.id())), + )), + ) .select(SiloUser::as_select()) .load_async::( &*self.pool_connection_authorized(opctx).await?, @@ -267,7 +288,7 @@ impl DataStore { bail_unless!(db_silo_user.id() == authz_silo_user.id()); if let Some(db_silo_user_password_hash) = &db_silo_user_password_hash { bail_unless!( - db_silo_user_password_hash.silo_user_id == db_silo_user.id() + db_silo_user_password_hash.silo_user_id() == db_silo_user.id() ); } @@ -291,7 +312,10 @@ impl DataStore { })?; } else { diesel::delete(dsl::silo_user_password_hash) - .filter(dsl::silo_user_id.eq(authz_silo_user.id())) + .filter( + dsl::silo_user_id + .eq(to_db_typed_uuid(authz_silo_user.id())), + ) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { @@ -324,7 +348,9 @@ impl DataStore { use nexus_db_schema::schema::silo_user_password_hash::dsl; Ok(dsl::silo_user_password_hash - .filter(dsl::silo_user_id.eq(authz_silo_user.id())) + .filter( + dsl::silo_user_id.eq(to_db_typed_uuid(authz_silo_user.id())), + ) .select(SiloUserPasswordHash::as_select()) .load_async::( &*self.pool_connection_authorized(opctx).await?, diff --git a/nexus/db-queries/src/db/datastore/ssh_key.rs b/nexus/db-queries/src/db/datastore/ssh_key.rs index 37e2f939ff6..0eb9c988880 100644 --- a/nexus/db-queries/src/db/datastore/ssh_key.rs +++ b/nexus/db-queries/src/db/datastore/ssh_key.rs @@ -11,6 +11,7 @@ use crate::db; use crate::db::identity::Resource; use crate::db::model::Name; use crate::db::model::SshKey; +use crate::db::model::to_db_typed_uuid; use crate::db::pagination::paginated; use crate::db::update_and_check::UpdateAndCheck; use async_bb8_diesel::AsyncRunQueryDsl; @@ -56,7 +57,7 @@ impl DataStore { use nexus_db_schema::schema::ssh_key::dsl; let result: Vec<(Uuid, Name)> = dsl::ssh_key .filter(dsl::id.eq_any(ids).or(dsl::name.eq_any(names))) - .filter(dsl::silo_user_id.eq(authz_user.id())) + .filter(dsl::silo_user_id.eq(to_db_typed_uuid(authz_user.id()))) .filter(dsl::time_deleted.is_null()) .select((dsl::id, dsl::name)) .get_results_async(&*self.pool_connection_authorized(opctx).await?) @@ -107,7 +108,7 @@ impl DataStore { use nexus_db_schema::schema::ssh_key::dsl; dsl::ssh_key .filter(dsl::id.eq_any(keys.to_owned())) - .filter(dsl::silo_user_id.eq(authz_user.id())) + .filter(dsl::silo_user_id.eq(to_db_typed_uuid(authz_user.id()))) .filter(dsl::time_deleted.is_null()) .select(SshKey::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) @@ -132,7 +133,9 @@ impl DataStore { None => { use nexus_db_schema::schema::ssh_key::dsl; dsl::ssh_key - .filter(dsl::silo_user_id.eq(authz_user.id())) + .filter( + dsl::silo_user_id.eq(to_db_typed_uuid(authz_user.id())), + ) .filter(dsl::time_deleted.is_null()) .select(dsl::id) .get_results_async( @@ -235,7 +238,7 @@ impl DataStore { &pagparams.map_name(|n| Name::ref_cast(n)), ), } - .filter(dsl::silo_user_id.eq(authz_user.id())) + .filter(dsl::silo_user_id.eq(to_db_typed_uuid(authz_user.id()))) .filter(dsl::time_deleted.is_null()) .select(SshKey::as_select()) .load_async(&*self.pool_connection_authorized(opctx).await?) @@ -250,7 +253,7 @@ impl DataStore { authz_user: &authz::SiloUser, ssh_key: SshKey, ) -> CreateResult { - assert_eq!(authz_user.id(), ssh_key.silo_user_id); + assert_eq!(authz_user.id(), ssh_key.silo_user_id()); opctx.authorize(authz::Action::CreateChild, authz_user).await?; let name = ssh_key.name().to_string(); diff --git a/nexus/db-queries/src/policy_test/mod.rs b/nexus/db-queries/src/policy_test/mod.rs index 0b445606315..93524c7da43 100644 --- a/nexus/db-queries/src/policy_test/mod.rs +++ b/nexus/db-queries/src/policy_test/mod.rs @@ -77,11 +77,10 @@ async fn test_iam_roles_behavior() { .role_assignment_replace_visible( &opctx, &main_silo, - &[shared::RoleAssignment { - identity_type: shared::IdentityType::SiloUser, - identity_id: USER_TEST_PRIVILEGED.id(), - role_name: SiloRole::Admin, - }], + &[shared::RoleAssignment::for_silo_user( + USER_TEST_PRIVILEGED.id(), + SiloRole::Admin, + )], ) .await .unwrap(); @@ -343,11 +342,10 @@ async fn test_conferred_roles() { .role_assignment_replace_visible( &opctx, &main_silo, - &[shared::RoleAssignment { - identity_type: shared::IdentityType::SiloUser, - identity_id: USER_TEST_PRIVILEGED.id(), - role_name: SiloRole::Admin, - }], + &[shared::RoleAssignment::for_silo_user( + USER_TEST_PRIVILEGED.id(), + SiloRole::Admin, + )], ) .await .unwrap(); diff --git a/nexus/db-queries/src/policy_test/resource_builder.rs b/nexus/db-queries/src/policy_test/resource_builder.rs index d2ba98ef412..f648810ff2f 100644 --- a/nexus/db-queries/src/policy_test/resource_builder.rs +++ b/nexus/db-queries/src/policy_test/resource_builder.rs @@ -18,6 +18,7 @@ use nexus_db_model::DatabaseString; use nexus_types::external_api::shared; use omicron_common::api::external::Error; use omicron_common::api::external::LookupType; +use omicron_uuid_kinds::SiloUserUuid; use std::sync::Arc; use strum::IntoEnumIterator; use uuid::Uuid; @@ -39,7 +40,7 @@ pub struct ResourceBuilder<'a> { /// list of resources created so far resources: Vec>, /// list of users created so far - users: Vec<(String, Uuid)>, + users: Vec<(String, SiloUserUuid)>, } impl<'a> ResourceBuilder<'a> { @@ -102,7 +103,7 @@ impl<'a> ResourceBuilder<'a> { for role in T::AllowedRoles::iter() { let role_name = role.to_database_string(); let username = format!("{}-{}", resource_name, role_name); - let user_id = Uuid::new_v4(); + let user_id = SiloUserUuid::new_v4(); println!("creating user: {}", &username); self.users.push((username.clone(), user_id)); @@ -125,11 +126,9 @@ impl<'a> ResourceBuilder<'a> { let new_role_assignments = old_role_assignments .into_iter() .map(|r| r.try_into().unwrap()) - .chain(std::iter::once(shared::RoleAssignment { - identity_type: shared::IdentityType::SiloUser, - identity_id: user_id, - role_name: role, - })) + .chain(std::iter::once(shared::RoleAssignment::for_silo_user( + user_id, role, + ))) .collect::>(); datastore .role_assignment_replace_visible( @@ -152,7 +151,7 @@ impl<'a> ResourceBuilder<'a> { /// were created with specific roles on those resources pub struct ResourceSet { resources: Vec>, - users: Vec<(String, Uuid)>, + users: Vec<(String, SiloUserUuid)>, } impl ResourceSet { @@ -167,7 +166,7 @@ impl ResourceSet { /// Iterate the users that were created as `(username, user_id)` pairs pub fn users( &self, - ) -> impl std::iter::Iterator + '_ { + ) -> impl std::iter::Iterator + '_ { self.users.iter() } } diff --git a/nexus/db-queries/src/policy_test/resources.rs b/nexus/db-queries/src/policy_test/resources.rs index 0de038e0991..221a6f9b0f3 100644 --- a/nexus/db-queries/src/policy_test/resources.rs +++ b/nexus/db-queries/src/policy_test/resources.rs @@ -11,6 +11,8 @@ use omicron_common::api::external::LookupType; use omicron_uuid_kinds::AccessTokenKind; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::PhysicalDiskUuid; +use omicron_uuid_kinds::SiloGroupUuid; +use omicron_uuid_kinds::SiloUserUuid; use omicron_uuid_kinds::SupportBundleUuid; use omicron_uuid_kinds::TypedUuid; use oso::PolarClass; @@ -258,7 +260,7 @@ async fn make_silo( )); builder.new_resource(authz::SiloUserList::new(silo.clone())); - let silo_user_id = Uuid::new_v4(); + let silo_user_id = SiloUserUuid::new_v4(); let silo_user = authz::SiloUser::new( silo.clone(), silo_user_id, @@ -271,7 +273,7 @@ async fn make_silo( ssh_key_id, LookupType::ByName(format!("{}-user-ssh-key", silo_name)), )); - let silo_group_id = Uuid::new_v4(); + let silo_group_id = SiloGroupUuid::new_v4(); builder.new_resource(authz::SiloGroup::new( silo.clone(), silo_group_id, diff --git a/nexus/src/app/device_auth.rs b/nexus/src/app/device_auth.rs index 668e363572c..0a97c21cc07 100644 --- a/nexus/src/app/device_auth.rs +++ b/nexus/src/app/device_auth.rs @@ -51,6 +51,7 @@ use nexus_db_queries::authn::{Actor, Reason}; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::model::{DeviceAccessToken, DeviceAuthRequest}; +use omicron_uuid_kinds::SiloUserUuid; use anyhow::anyhow; use nexus_types::external_api::params; @@ -99,7 +100,7 @@ impl super::Nexus { &self, opctx: &OpContext, user_code: String, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ) -> CreateResult { let (.., authz_request, db_request) = LookupPath::new(opctx, &self.db_datastore) @@ -208,7 +209,7 @@ impl super::Nexus { e => Reason::UnknownError { source: e }, })?; - let silo_user_id = db_access_token.silo_user_id; + let silo_user_id = db_access_token.silo_user_id(); let (.., db_silo_user) = LookupPath::new(opctx, &self.db_datastore) .silo_user_id(silo_user_id) .fetch() diff --git a/nexus/src/app/iam.rs b/nexus/src/app/iam.rs index d95cd5ec1a3..d29c86fcb0b 100644 --- a/nexus/src/app/iam.rs +++ b/nexus/src/app/iam.rs @@ -20,6 +20,9 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::BuiltInUserUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SiloGroupUuid; use ref_cast::RefCast; use uuid::Uuid; @@ -84,7 +87,7 @@ impl super::Nexus { &self, opctx: &OpContext, pagparams: &DataPageParams<'_, Uuid>, - group_id: &Uuid, + group_id: &SiloGroupUuid, ) -> ListResultVec { let authz_silo = opctx .authn @@ -118,7 +121,7 @@ impl super::Nexus { .actor_required() .internal_context("loading current user")?; let (.., db_silo_user) = LookupPath::new(opctx, &self.db_datastore) - .silo_user_id(actor.actor_id()) + .silo_user_actor(&actor)? .fetch() .await?; Ok(db_silo_user) @@ -166,7 +169,8 @@ impl super::Nexus { let lookup_path = LookupPath::new(opctx, &self.db_datastore); let user = match user_selector { params::UserBuiltinSelector { user: NameOrId::Id(id) } => { - lookup_path.user_builtin_id(*id) + lookup_path + .user_builtin_id(BuiltInUserUuid::from_untyped_uuid(*id)) } params::UserBuiltinSelector { user: NameOrId::Name(name) } => { lookup_path.user_builtin_name(Name::ref_cast(name)) diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index c2dbf6269da..62a3c015a04 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -273,7 +273,7 @@ async fn normalize_ssh_keys( .actor_required() .internal_context("loading current user's ssh keys for new Instance")?; let (.., authz_user) = LookupPath::new(opctx, datastore) - .silo_user_id(actor.actor_id()) + .silo_user_actor(&actor)? .lookup_for(authz::Action::ListChildren) .await?; diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 953867ad19e..5e6fe7755b1 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -391,7 +391,8 @@ async fn sic_associate_ssh_keys( .map_err(ActionError::action_failed)?; let (.., authz_user) = LookupPath::new(&opctx, datastore) - .silo_user_id(actor.actor_id()) + .silo_user_actor(&actor) + .map_err(ActionError::action_failed)? .lookup_for(authz::Action::ListChildren) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/session.rs b/nexus/src/app/session.rs index d5eb2ab3b7c..53a6204b536 100644 --- a/nexus/src/app/session.rs +++ b/nexus/src/app/session.rs @@ -20,6 +20,7 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::UpdateResult; use omicron_uuid_kinds::ConsoleSessionUuid; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SiloUserUuid; use rand::{RngCore, SeedableRng, rngs::StdRng}; use uuid::Uuid; @@ -56,7 +57,7 @@ impl super::Nexus { self.db_datastore.session_lookup_by_token(&opctx, token).await?; let (.., db_silo_user) = LookupPath::new(opctx, &self.db_datastore) - .silo_user_id(db_session.silo_user_id) + .silo_user_id(db_session.silo_user_id()) .fetch() .await?; @@ -91,7 +92,7 @@ impl super::Nexus { pub(crate) async fn lookup_silo_for_authn( &self, opctx: &OpContext, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ) -> Result { let (.., db_silo_user) = LookupPath::new(opctx, &self.db_datastore) .silo_user_id(silo_user_id) diff --git a/nexus/src/app/silo.rs b/nexus/src/app/silo.rs index 2ae6393cfd9..411ae7b613d 100644 --- a/nexus/src/app/silo.rs +++ b/nexus/src/app/silo.rs @@ -31,6 +31,8 @@ use omicron_common::api::external::{DataPageParams, ResourceType}; use omicron_common::api::external::{DeleteResult, NameOrId}; use omicron_common::api::external::{Error, InternalContext}; use omicron_common::bail_unless; +use omicron_uuid_kinds::SiloGroupUuid; +use omicron_uuid_kinds::SiloUserUuid; use std::net::IpAddr; use std::str::FromStr; use uuid::Uuid; @@ -266,7 +268,7 @@ impl super::Nexus { &self, opctx: &OpContext, authz_silo: &authz::Silo, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, action: authz::Action, ) -> LookupResult<(authz::SiloUser, db::model::SiloUser)> { let (_, authz_silo_user, db_silo_user) = @@ -300,7 +302,7 @@ impl super::Nexus { &self, opctx: &OpContext, silo_lookup: &lookup::Silo<'_>, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ) -> LookupResult { let (authz_silo,) = silo_lookup.lookup_for(authz::Action::Read).await?; let (_, db_silo_user) = self @@ -318,7 +320,7 @@ impl super::Nexus { pub(crate) async fn current_silo_user_logout( &self, opctx: &OpContext, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ) -> UpdateResult<()> { let (_, authz_silo_user, _) = LookupPath::new(opctx, self.datastore()) .silo_user_id(silo_user_id) @@ -354,7 +356,7 @@ impl super::Nexus { pub(crate) async fn current_silo_user_lookup( &self, opctx: &OpContext, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ) -> LookupResult<(authz::SiloUser, db::model::SiloUser)> { let (_, authz_silo_user, db_silo_user) = LookupPath::new(opctx, self.datastore()) @@ -369,7 +371,7 @@ impl super::Nexus { pub(crate) async fn silo_user_token_list( &self, opctx: &OpContext, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, pagparams: &DataPageParams<'_, Uuid>, ) -> ListResultVec { let (_, authz_silo_user, _db_silo_user) = @@ -389,7 +391,7 @@ impl super::Nexus { pub(crate) async fn silo_user_session_list( &self, opctx: &OpContext, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, pagparams: &DataPageParams<'_, Uuid>, // TODO: https://github.com/oxidecomputer/omicron/issues/8625 idle_ttl: TimeDelta, @@ -452,7 +454,7 @@ impl super::Nexus { .await?; let silo_user = db::model::SiloUser::new( authz_silo.id(), - Uuid::new_v4(), + SiloUserUuid::new_v4(), new_user_params.external_id.as_ref().to_owned(), ); // TODO These two steps should happen in a transaction. @@ -461,7 +463,7 @@ impl super::Nexus { let authz_silo_user = authz::SiloUser::new( authz_silo.clone(), db_silo_user.id(), - LookupType::ById(db_silo_user.id()), + LookupType::by_id(db_silo_user.id()), ); self.silo_user_password_set_internal( opctx, @@ -479,7 +481,7 @@ impl super::Nexus { &self, opctx: &OpContext, silo_lookup: &lookup::Silo<'_>, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ) -> DeleteResult { let (authz_silo, _) = self.local_idp_fetch_silo(silo_lookup).await?; @@ -537,7 +539,7 @@ impl super::Nexus { db::model::UserProvisionType::Jit => { let silo_user = db::model::SiloUser::new( authz_silo.id(), - Uuid::new_v4(), + SiloUserUuid::new_v4(), authenticated_subject.external_id.clone(), ); @@ -552,7 +554,7 @@ impl super::Nexus { // IdP sent us. Also, if the silo user provision type is Jit, create // silo groups if new groups from the IdP are seen. - let mut silo_user_group_ids: Vec = + let mut silo_user_group_ids: Vec = Vec::with_capacity(authenticated_subject.groups.len()); for group in &authenticated_subject.groups { @@ -609,7 +611,7 @@ impl super::Nexus { &self, opctx: &OpContext, silo_lookup: &lookup::Silo<'_>, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, password_value: params::UserPassword, ) -> UpdateResult<()> { let (authz_silo, db_silo) = @@ -781,7 +783,7 @@ impl super::Nexus { opctx, authz_silo, db::model::SiloGroup::new( - Uuid::new_v4(), + SiloGroupUuid::new_v4(), authz_silo.id(), external_id.clone(), ), @@ -1031,7 +1033,7 @@ impl super::Nexus { pub fn silo_group_lookup<'a>( &'a self, opctx: &'a OpContext, - group_id: &'a Uuid, + group_id: &'a SiloGroupUuid, ) -> lookup::SiloGroup<'a> { LookupPath::new(opctx, &self.db_datastore).silo_group_id(*group_id) } diff --git a/nexus/src/app/ssh_key.rs b/nexus/src/app/ssh_key.rs index a0b77da5713..c4e6fb06136 100644 --- a/nexus/src/app/ssh_key.rs +++ b/nexus/src/app/ssh_key.rs @@ -12,8 +12,8 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_uuid_kinds::SiloUserUuid; use ref_cast::RefCast; -use uuid::Uuid; impl super::Nexus { // SSH Keys @@ -46,7 +46,7 @@ impl super::Nexus { pub(crate) async fn ssh_key_create( &self, opctx: &OpContext, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, params: params::SshKeyCreate, ) -> CreateResult { let ssh_key = db::model::SshKey::new(silo_user_id, params); @@ -61,7 +61,7 @@ impl super::Nexus { pub(crate) async fn ssh_keys_list( &self, opctx: &OpContext, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, page_params: &PaginatedBy<'_>, ) -> ListResultVec { let (.., authz_user) = LookupPath::new(opctx, self.datastore()) @@ -88,7 +88,7 @@ impl super::Nexus { pub(crate) async fn ssh_key_delete( &self, opctx: &OpContext, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ssh_key_lookup: &lookup::SshKey<'_>, ) -> DeleteResult { let (.., authz_silo_user, authz_ssh_key) = diff --git a/nexus/src/context.rs b/nexus/src/context.rs index d170e7e79d0..91f0db6c31e 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -21,6 +21,7 @@ use nexus_db_queries::{authn, authz, db}; use omicron_common::address::{AZ_PREFIX, Ipv6Subnet}; use omicron_uuid_kinds::ConsoleSessionUuid; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SiloUserUuid; use oximeter::types::ProducerRegistry; use oximeter_instruments::http::{HttpService, LatencyTracker}; use slog::Logger; @@ -442,7 +443,7 @@ impl authn::external::AuthenticatorContext for ServerContext { impl authn::external::SiloUserSilo for ServerContext { async fn silo_user_silo( &self, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ) -> Result { let opctx = self.nexus.opctx_external_authn(); self.nexus.lookup_silo_for_authn(opctx, silo_user_id).await diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index be75d2a3e90..c0768d1a8d5 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -662,7 +662,7 @@ impl NexusExternalApi for NexusExternalApiImpl { Ok(HttpResponseOk(ScanById::results_page( &query, users, - &|_, user: &User| user.id, + &|_, user: &User| user.id.into_untyped_uuid(), )?)) }; apictx @@ -6904,7 +6904,7 @@ impl NexusExternalApi for NexusExternalApiImpl { Ok(HttpResponseOk(ScanById::results_page( &query, users.into_iter().map(|i| i.into()).collect(), - &|_, user: &User| user.id, + &|_, user: &User| user.id.into_untyped_uuid(), )?)) }; apictx @@ -7051,7 +7051,7 @@ impl NexusExternalApi for NexusExternalApiImpl { Ok(HttpResponseOk(ScanById::results_page( &query, groups, - &|_, group: &Group| group.id, + &|_, group: &Group| group.id.into_untyped_uuid(), )?)) }; apictx @@ -7205,7 +7205,7 @@ impl NexusExternalApi for NexusExternalApiImpl { Ok(HttpResponseOk(ScanById::results_page( &query, groups, - &|_, group: &views::Group| group.id, + &|_, group: &views::Group| group.id.into_untyped_uuid(), )?)) }; apictx @@ -7232,12 +7232,23 @@ impl NexusExternalApi for NexusExternalApiImpl { .authn .actor_required() .internal_context("listing current user's ssh keys")?; + + let silo_user_id = match actor.silo_user_id() { + Some(silo_user_id) => silo_user_id, + None => { + return Err(Error::non_resourcetype_not_found( + "could not find silo user", + ))?; + } + }; + let ssh_keys = nexus - .ssh_keys_list(&opctx, actor.actor_id(), &paginated_by) + .ssh_keys_list(&opctx, silo_user_id, &paginated_by) .await? .into_iter() .map(SshKey::from) .collect::>(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( &query, ssh_keys, @@ -7264,9 +7275,20 @@ impl NexusExternalApi for NexusExternalApiImpl { .authn .actor_required() .internal_context("creating ssh key for current user")?; + + let silo_user_id = match actor.silo_user_id() { + Some(silo_user_id) => silo_user_id, + None => { + return Err(Error::non_resourcetype_not_found( + "could not find silo user", + ))?; + } + }; + let ssh_key = nexus - .ssh_key_create(&opctx, actor.actor_id(), new_key.into_inner()) + .ssh_key_create(&opctx, silo_user_id, new_key.into_inner()) .await?; + Ok(HttpResponseCreated(ssh_key.into())) }; apictx @@ -7290,15 +7312,24 @@ impl NexusExternalApi for NexusExternalApiImpl { .authn .actor_required() .internal_context("fetching one of current user's ssh keys")?; - let ssh_key_selector = params::SshKeySelector { - silo_user_id: actor.actor_id(), - ssh_key: path.ssh_key, + + let silo_user_id = match actor.silo_user_id() { + Some(silo_user_id) => silo_user_id, + None => { + return Err(Error::non_resourcetype_not_found( + "could not find silo user", + ))?; + } }; + + let ssh_key_selector = + params::SshKeySelector { silo_user_id, ssh_key: path.ssh_key }; + let ssh_key_lookup = nexus.ssh_key_lookup(&opctx, &ssh_key_selector)?; - let (.., silo_user, _, ssh_key) = ssh_key_lookup.fetch().await?; - // Ensure the SSH key exists in the current silo - assert_eq!(silo_user.id(), actor.actor_id()); + + let (.., ssh_key) = ssh_key_lookup.fetch().await?; + Ok(HttpResponseOk(ssh_key.into())) }; apictx @@ -7322,15 +7353,24 @@ impl NexusExternalApi for NexusExternalApiImpl { .authn .actor_required() .internal_context("deleting one of current user's ssh keys")?; - let ssh_key_selector = params::SshKeySelector { - silo_user_id: actor.actor_id(), - ssh_key: path.ssh_key, + + let silo_user_id = match actor.silo_user_id() { + Some(silo_user_id) => silo_user_id, + None => { + return Err(Error::non_resourcetype_not_found( + "could not find silo user", + ))?; + } }; + + let ssh_key_selector = + params::SshKeySelector { silo_user_id, ssh_key: path.ssh_key }; + let ssh_key_lookup = nexus.ssh_key_lookup(&opctx, &ssh_key_selector)?; - nexus - .ssh_key_delete(&opctx, actor.actor_id(), &ssh_key_lookup) - .await?; + + nexus.ssh_key_delete(&opctx, silo_user_id, &ssh_key_lookup).await?; + Ok(HttpResponseDeleted()) }; apictx @@ -8279,7 +8319,9 @@ impl NexusExternalApi for NexusExternalApiImpl { let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + // This is an authenticated request, so we know who the user // is. In that respect it's more like a regular resource create // operation and not like the true login endpoints `login_local` @@ -8291,13 +8333,24 @@ impl NexusExternalApi for NexusExternalApiImpl { let &actor = opctx.authn.actor_required().internal_context( "creating new device auth session for current user", )?; + + let silo_user_id = match actor.silo_user_id() { + Some(silo_user_id) => silo_user_id, + None => { + return Err(Error::non_resourcetype_not_found( + "could not find silo user", + ))?; + } + }; + let _token = nexus .device_auth_request_verify( &opctx, params.user_code, - actor.actor_id(), + silo_user_id, ) .await?; + Ok(HttpResponseUpdatedNoContent()) } .await; diff --git a/nexus/test-utils/src/http_testing.rs b/nexus/test-utils/src/http_testing.rs index 9b63525a48d..ab4905267e6 100644 --- a/nexus/test-utils/src/http_testing.rs +++ b/nexus/test-utils/src/http_testing.rs @@ -15,6 +15,7 @@ use headers::authorization::Credentials; use http_body_util::BodyExt; use nexus_db_queries::authn::external::spoof; use nexus_db_queries::db::identity::Asset; +use omicron_uuid_kinds::SiloUserUuid; use serde_urlencoded; use std::convert::TryInto; use std::fmt::Debug; @@ -546,7 +547,7 @@ impl TestResponse { pub enum AuthnMode { UnprivilegedUser, PrivilegedUser, - SiloUser(uuid::Uuid), + SiloUser(SiloUserUuid), Session(String), } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index c4849f95c55..55f3d8b8f1b 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -20,7 +20,6 @@ use nexus_types::deployment::Blueprint; use nexus_types::external_api::params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::Baseboard; -use nexus_types::external_api::shared::IdentityType; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::views; use nexus_types::external_api::views::AffinityGroup; @@ -66,6 +65,7 @@ use omicron_test_utils::dev::poll::wait_for_condition; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::PhysicalDiskUuid; +use omicron_uuid_kinds::SiloUserUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use oxnet::Ipv4Net; @@ -1099,7 +1099,7 @@ pub async fn grant_iam( client: &ClientTestContext, grant_resource_url: &str, grant_role: T, - grant_user: Uuid, + grant_user: SiloUserUuid, run_as: AuthnMode, ) where T: serde::Serialize + serde::de::DeserializeOwned, @@ -1113,11 +1113,8 @@ pub async fn grant_iam( .expect("failed to fetch policy") .parsed_body() .expect("failed to parse policy"); - let new_role_assignment = shared::RoleAssignment { - identity_type: IdentityType::SiloUser, - identity_id: grant_user, - role_name: grant_role, - }; + let new_role_assignment = + shared::RoleAssignment::for_silo_user(grant_user, grant_role); let new_role_assignments = existing_policy .role_assignments .into_iter() diff --git a/nexus/tests/integration_tests/authn_http.rs b/nexus/tests/integration_tests/authn_http.rs index 59275e881dd..7a50c137f61 100644 --- a/nexus/tests/integration_tests/authn_http.rs +++ b/nexus/tests/integration_tests/authn_http.rs @@ -19,6 +19,7 @@ use dropshot::test_util::LogContext; use dropshot::test_util::TestContext; use headers::authorization::Credentials; use http::header::HeaderValue; +use nexus_db_queries::authn::Actor; use nexus_db_queries::authn::external::AuthenticatorContext; use nexus_db_queries::authn::external::HttpAuthnScheme; use nexus_db_queries::authn::external::SiloUserSilo; @@ -28,6 +29,7 @@ use nexus_db_queries::authn::external::spoof::HttpAuthnSpoof; use nexus_db_queries::authn::external::spoof::SPOOF_SCHEME_NAME; use nexus_types::silo::DEFAULT_SILO_ID; use omicron_uuid_kinds::ConsoleSessionUuid; +use omicron_uuid_kinds::SiloUserUuid; use std::sync::Mutex; use uuid::Uuid; @@ -62,8 +64,7 @@ async fn test_authn_spoof_allowed() { // Successful authentication let valid_uuid = "7f927c86-3371-4295-c34a-e3246a4b9c02"; - let header = - spoof::make_header_value(valid_uuid.parse().unwrap()).0.encode(); + let header = spoof::make_header_value(valid_uuid).0.encode(); assert_eq!( whoami_request(Some(header), None, &testctx).await.unwrap(), WhoamiResponse { @@ -107,7 +108,7 @@ async fn test_authn_session_cookie() { let valid_session = FakeSession { id: ConsoleSessionUuid::new_v4(), token: "valid".to_string(), - silo_user_id: Uuid::new_v4(), + silo_user_id: SiloUserUuid::new_v4(), silo_id: Uuid::new_v4(), time_last_used: Utc::now() - Duration::seconds(5), time_created: Utc::now() - Duration::seconds(5), @@ -115,7 +116,7 @@ async fn test_authn_session_cookie() { let idle_expired_session = FakeSession { id: ConsoleSessionUuid::new_v4(), token: "idle_expired".to_string(), - silo_user_id: Uuid::new_v4(), + silo_user_id: SiloUserUuid::new_v4(), silo_id: Uuid::new_v4(), time_last_used: Utc::now() - Duration::hours(2), time_created: Utc::now() - Duration::hours(3), @@ -123,7 +124,7 @@ async fn test_authn_session_cookie() { let abs_expired_session = FakeSession { id: ConsoleSessionUuid::new_v4(), token: "abs_expired".to_string(), - silo_user_id: Uuid::new_v4(), + silo_user_id: SiloUserUuid::new_v4(), silo_id: Uuid::new_v4(), time_last_used: Utc::now(), time_created: Utc::now() - Duration::hours(10), @@ -196,11 +197,9 @@ async fn test_authn_spoof_unconfigured() { let values = [ None, Some( - spoof::make_header_value( - "7f927c86-3371-4295-c34a-e3246a4b9c02".parse().unwrap(), - ) - .0 - .encode(), + spoof::make_header_value("7f927c86-3371-4295-c34a-e3246a4b9c02") + .0 + .encode(), ), Some(spoof::make_header_value_raw(b"not-a-uuid").unwrap()), Some(spoof::SPOOF_HEADER_BAD_ACTOR.0.encode()), @@ -334,7 +333,7 @@ impl AuthenticatorContext for WhoamiServerState { impl SiloUserSilo for WhoamiServerState { async fn silo_user_silo( &self, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, ) -> Result { assert_eq!( silo_user_id.to_string(), @@ -348,7 +347,7 @@ impl SiloUserSilo for WhoamiServerState { struct FakeSession { id: ConsoleSessionUuid, token: String, - silo_user_id: Uuid, + silo_user_id: SiloUserUuid, silo_id: Uuid, time_created: DateTime, time_last_used: DateTime, @@ -358,7 +357,7 @@ impl session_cookie::Session for FakeSession { fn id(&self) -> ConsoleSessionUuid { self.id } - fn silo_user_id(&self) -> Uuid { + fn silo_user_id(&self) -> SiloUserUuid { self.silo_user_id } fn silo_id(&self) -> Uuid { @@ -435,7 +434,11 @@ async fn whoami_get( ) -> Result, dropshot::HttpError> { let whoami_state = rqctx.context(); let authn = whoami_state.authn.authn_request(&rqctx).await?; - let actor = authn.actor().map(|a| a.actor_id().to_string()); + let actor = authn.actor().map(|actor| match actor { + Actor::SiloUser { silo_user_id, .. } => silo_user_id.to_string(), + + Actor::UserBuiltin { user_builtin_id } => user_builtin_id.to_string(), + }); let authenticated = actor.is_some(); let schemes_tried = authn.schemes_tried().iter().map(|s| s.to_string()).collect(); diff --git a/nexus/tests/integration_tests/authz.rs b/nexus/tests/integration_tests/authz.rs index 84b97969f9b..fda3f5de0e6 100644 --- a/nexus/tests/integration_tests/authz.rs +++ b/nexus/tests/integration_tests/authz.rs @@ -16,7 +16,7 @@ use omicron_common::api::external::IdentityMetadataCreateParams; use dropshot::ResultsPage; use nexus_test_utils::resource_helpers::{create_local_user, create_silo}; -use uuid::Uuid; +use omicron_uuid_kinds::SiloUserUuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -178,7 +178,8 @@ async fn test_list_silo_users_for_unpriv(cptestctx: &ControlPlaneTestContext) { .unwrap(); // And only show the first silo's user - let user_ids: Vec = users.items.iter().map(|x| x.id).collect(); + let user_ids: Vec = + users.items.iter().map(|x| x.id).collect(); assert_eq!(user_ids, vec![new_silo_user_id]); } diff --git a/nexus/tests/integration_tests/certificates.rs b/nexus/tests/integration_tests/certificates.rs index 79711ea1995..848a869eeca 100644 --- a/nexus/tests/integration_tests/certificates.rs +++ b/nexus/tests/integration_tests/certificates.rs @@ -26,6 +26,8 @@ use omicron_common::api::internal::nexus::Certificate as InternalCertificate; use omicron_test_utils::certificates::CertificateChain; use omicron_test_utils::dev::poll::CondCheckError; use omicron_test_utils::dev::poll::wait_for_condition; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SiloUserUuid; use oxide_client::ClientCurrentUserExt; use oxide_client::ClientSilosExt; use oxide_client::ClientSystemSilosExt; @@ -411,19 +413,23 @@ async fn test_silo_certificates() { .expect("failed to create Silo"); // Create a local user in that Silo. - let silo2_user = silo1_client - .local_idp_user_create() - .silo(silo2.silo_name.clone()) - .body( - oxide_client::types::UserCreate::builder() - .external_id("testuser-silo2") - .password(oxide_client::types::UserPassword::LoginDisallowed), - ) - .send() - .await - .expect("failed to create user") - .into_inner() - .id; + let silo2_user: SiloUserUuid = SiloUserUuid::from_untyped_uuid( + silo1_client + .local_idp_user_create() + .silo(silo2.silo_name.clone()) + .body( + oxide_client::types::UserCreate::builder() + .external_id("testuser-silo2") + .password( + oxide_client::types::UserPassword::LoginDisallowed, + ), + ) + .send() + .await + .expect("failed to create user") + .into_inner() + .id, + ); // Grant that user admin privileges on that Silo. let mut silo2_policy = silo1_client @@ -435,7 +441,7 @@ async fn test_silo_certificates() { .into_inner(); silo2_policy.role_assignments.push( oxide_client::types::SiloRoleRoleAssignment::builder() - .identity_id(silo2_user) + .identity_id(silo2_user.into_untyped_uuid()) .identity_type(oxide_client::types::IdentityType::SiloUser) .role_name(oxide_client::types::SiloRole::Admin) .try_into() @@ -474,19 +480,24 @@ async fn test_silo_certificates() { .send() .await .expect("failed to create Silo"); - let silo3_user = silo1_client - .local_idp_user_create() - .silo(silo3.silo_name.clone()) - .body( - oxide_client::types::UserCreate::builder() - .external_id("testuser-silo3") - .password(oxide_client::types::UserPassword::LoginDisallowed), - ) - .send() - .await - .expect("failed to create user") - .into_inner() - .id; + + let silo3_user: SiloUserUuid = SiloUserUuid::from_untyped_uuid( + silo1_client + .local_idp_user_create() + .silo(silo3.silo_name.clone()) + .body( + oxide_client::types::UserCreate::builder() + .external_id("testuser-silo3") + .password( + oxide_client::types::UserPassword::LoginDisallowed, + ), + ) + .send() + .await + .expect("failed to create user") + .into_inner() + .id, + ); // Grant that user admin privileges on that Silo. let mut silo3_policy = silo1_client @@ -498,7 +509,7 @@ async fn test_silo_certificates() { .into_inner(); silo3_policy.role_assignments.push( oxide_client::types::SiloRoleRoleAssignment::builder() - .identity_id(silo3_user) + .identity_id(silo3_user.into_untyped_uuid()) .identity_type(oxide_client::types::IdentityType::SiloUser) .role_name(oxide_client::types::SiloRole::Admin) .try_into() @@ -544,7 +555,10 @@ async fn test_silo_certificates() { || async { match silo3_client.current_user_view().send().await { Ok(result) => { - assert_eq!(result.into_inner().id, silo3_user); + assert_eq!( + SiloUserUuid::from_untyped_uuid(result.into_inner().id), + silo3_user + ); Ok(()) } Err(oxide_client::Error::CommunicationError(error)) diff --git a/nexus/tests/integration_tests/device_auth.rs b/nexus/tests/integration_tests/device_auth.rs index 78ea189a629..a500f84acbc 100644 --- a/nexus/tests/integration_tests/device_auth.rs +++ b/nexus/tests/integration_tests/device_auth.rs @@ -30,6 +30,7 @@ use nexus_types::external_api::{ DeviceAccessTokenGrant, DeviceAccessTokenType, DeviceAuthResponse, }, }; +use omicron_uuid_kinds::SiloUserUuid; use http::{StatusCode, header, method::Method}; use omicron_sled_agent::sim; @@ -848,7 +849,7 @@ async fn get_tokens_priv( async fn list_user_tokens( testctx: &ClientTestContext, - user_id: Uuid, + user_id: SiloUserUuid, ) -> Vec { NexusRequest::object_get(testctx, "/v1/me/access-tokens") .authn_as(AuthnMode::SiloUser(user_id)) @@ -859,7 +860,7 @@ async fn list_user_tokens( async fn list_user_sessions( testctx: &ClientTestContext, - user_id: Uuid, + user_id: SiloUserUuid, ) -> Vec { let url = format!("/v1/users/{}/sessions", user_id); NexusRequest::object_get(testctx, &url) diff --git a/nexus/tests/integration_tests/role_assignments.rs b/nexus/tests/integration_tests/role_assignments.rs index c11c0e1fb34..40cefc8e662 100644 --- a/nexus/tests/integration_tests/role_assignments.rs +++ b/nexus/tests/integration_tests/role_assignments.rs @@ -424,11 +424,10 @@ async fn run_test( // resource. This is a little ugly, but we don't have a way of creating // silo users yet and it's worth testing this. let mut new_policy = initial_policy.clone(); - let role_assignment = shared::RoleAssignment { - identity_type: shared::IdentityType::SiloUser, - identity_id: USER_TEST_UNPRIVILEGED.id(), - role_name: T::ROLE, - }; + let role_assignment = shared::RoleAssignment::for_silo_user( + USER_TEST_UNPRIVILEGED.id(), + T::ROLE, + ); new_policy.role_assignments.push(role_assignment.clone()); // Make sure the unprivileged user can't grant themselves access! diff --git a/nexus/tests/integration_tests/saml.rs b/nexus/tests/integration_tests/saml.rs index 4b1795a67bc..0995a193009 100644 --- a/nexus/tests/integration_tests/saml.rs +++ b/nexus/tests/integration_tests/saml.rs @@ -14,13 +14,13 @@ use nexus_types::external_api::views::{self, Silo}; use nexus_types::external_api::{params, shared}; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_nexus::TestInterfaces; +use omicron_uuid_kinds::SiloGroupUuid; use base64::Engine; use dropshot::ResultsPage; use http::StatusCode; use http::method::Method; use httptest::{Expectation, Server, matchers::*, responders::*}; -use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -1227,7 +1227,8 @@ async fn test_post_saml_response(cptestctx: &ControlPlaneTestContext) { let silo_group_names: Vec<&str> = groups.items.iter().map(|g| g.display_name.as_str()).collect(); - let silo_group_ids: Vec = groups.items.iter().map(|g| g.id).collect(); + let silo_group_ids: Vec = + groups.items.iter().map(|g| g.id).collect(); assert_same_items(silo_group_names, vec!["SRE", "Admins"]); diff --git a/nexus/tests/integration_tests/silo_users.rs b/nexus/tests/integration_tests/silo_users.rs index 025f81ea238..d1c11f83f74 100644 --- a/nexus/tests/integration_tests/silo_users.rs +++ b/nexus/tests/integration_tests/silo_users.rs @@ -84,7 +84,7 @@ async fn test_silo_group_users(cptestctx: &ControlPlaneTestContext) { let authz_silo_user = authz::SiloUser::new( authz_silo, USER_TEST_UNPRIVILEGED.id(), - LookupType::ById(USER_TEST_UNPRIVILEGED.id()), + LookupType::by_id(USER_TEST_UNPRIVILEGED.id()), ); // Now add unprivileged user to the group, and we should see only that user diff --git a/nexus/tests/integration_tests/silos.rs b/nexus/tests/integration_tests/silos.rs index 4829e7bb168..2aa271dc8f6 100644 --- a/nexus/tests/integration_tests/silos.rs +++ b/nexus/tests/integration_tests/silos.rs @@ -32,6 +32,7 @@ use omicron_common::api::external::{ use omicron_common::api::external::{ObjectIdentity, UserId}; use omicron_test_utils::certificates::CertificateChain; use omicron_test_utils::dev::poll::{CondCheckError, wait_for_condition}; +use omicron_uuid_kinds::SiloUserUuid; use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::fmt::Write; @@ -47,7 +48,6 @@ use nexus_types::external_api::shared::{FleetRole, SiloRole}; use std::convert::Infallible; use std::net::Ipv4Addr; use std::time::Duration; -use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -1080,7 +1080,7 @@ async fn test_silo_groups_jit(cptestctx: &ControlPlaneTestContext) { for group_membership in &group_memberships { let (.., db_group) = LookupPath::new(&authn_opctx, nexus.datastore()) - .silo_group_id(group_membership.silo_group_id) + .silo_group_id(group_membership.silo_group_id.into()) .fetch() .await .unwrap(); @@ -1209,7 +1209,7 @@ async fn test_silo_groups_remove_from_one_group( for group_membership in &group_memberships { let (.., db_group) = LookupPath::new(&authn_opctx, nexus.datastore()) - .silo_group_id(group_membership.silo_group_id) + .silo_group_id(group_membership.silo_group_id.into()) .fetch() .await .unwrap(); @@ -1250,7 +1250,7 @@ async fn test_silo_groups_remove_from_one_group( for group_membership in &group_memberships { let (.., db_group) = LookupPath::new(&authn_opctx, nexus.datastore()) - .silo_group_id(group_membership.silo_group_id) + .silo_group_id(group_membership.silo_group_id.into()) .fetch() .await .unwrap(); @@ -1320,7 +1320,7 @@ async fn test_silo_groups_remove_from_both_groups( for group_membership in &group_memberships { let (.., db_group) = LookupPath::new(&authn_opctx, nexus.datastore()) - .silo_group_id(group_membership.silo_group_id) + .silo_group_id(group_membership.silo_group_id.into()) .fetch() .await .unwrap(); @@ -1361,7 +1361,7 @@ async fn test_silo_groups_remove_from_both_groups( for group_membership in &group_memberships { let (.., db_group) = LookupPath::new(&authn_opctx, nexus.datastore()) - .silo_group_id(group_membership.silo_group_id) + .silo_group_id(group_membership.silo_group_id.into()) .fetch() .await .unwrap(); @@ -1562,7 +1562,8 @@ async fn test_silo_user_views(cptestctx: &ControlPlaneTestContext) { silo2_expected_users.sort_by_key(|u| u.id); let users_by_id = { - let mut users_by_id: BTreeMap = BTreeMap::new(); + let mut users_by_id: BTreeMap = + BTreeMap::new(); assert_eq!(users_by_id.insert(silo1_user1_id, &silo1_user1), None); assert_eq!(users_by_id.insert(silo1_user2_id, &silo1_user2), None); assert_eq!(users_by_id.insert(silo2_user1_id, &silo2_user1), None); @@ -1719,7 +1720,7 @@ async fn create_jit_user( ) -> views::User { assert_eq!(silo.identity_mode, shared::SiloIdentityMode::SamlJit); let silo_id = silo.identity.id; - let silo_user_id = Uuid::new_v4(); + let silo_user_id = SiloUserUuid::new_v4(); let authz_silo = authz::Silo::new(authz::FLEET, silo_id, LookupType::ById(silo_id)); let silo_user = diff --git a/nexus/tests/integration_tests/users_builtin.rs b/nexus/tests/integration_tests/users_builtin.rs index 3df709c7f39..50642e10424 100644 --- a/nexus/tests/integration_tests/users_builtin.rs +++ b/nexus/tests/integration_tests/users_builtin.rs @@ -6,6 +6,7 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::views::UserBuiltin; +use omicron_uuid_kinds::GenericUuid; use std::collections::BTreeMap; type ControlPlaneTestContext = @@ -29,18 +30,24 @@ async fn test_users_builtin(cptestctx: &ControlPlaneTestContext) { .collect::>(); let u = users.remove(&authn::USER_DB_INIT.name.to_string()).unwrap(); - assert_eq!(u.identity.id, authn::USER_DB_INIT.id); + assert_eq!(u.identity.id, authn::USER_DB_INIT.id.into_untyped_uuid()); let u = users.remove(&authn::USER_SERVICE_BALANCER.name.to_string()).unwrap(); - assert_eq!(u.identity.id, authn::USER_SERVICE_BALANCER.id); + assert_eq!( + u.identity.id, + authn::USER_SERVICE_BALANCER.id.into_untyped_uuid() + ); let u = users.remove(&authn::USER_INTERNAL_API.name.to_string()).unwrap(); - assert_eq!(u.identity.id, authn::USER_INTERNAL_API.id); + assert_eq!(u.identity.id, authn::USER_INTERNAL_API.id.into_untyped_uuid()); let u = users.remove(&authn::USER_INTERNAL_READ.name.to_string()).unwrap(); - assert_eq!(u.identity.id, authn::USER_INTERNAL_READ.id); + assert_eq!(u.identity.id, authn::USER_INTERNAL_READ.id.into_untyped_uuid()); let u = users.remove(&authn::USER_EXTERNAL_AUTHN.name.to_string()).unwrap(); - assert_eq!(u.identity.id, authn::USER_EXTERNAL_AUTHN.id); + assert_eq!( + u.identity.id, + authn::USER_EXTERNAL_AUTHN.id.into_untyped_uuid() + ); let u = users.remove(&authn::USER_SAGA_RECOVERY.name.to_string()).unwrap(); - assert_eq!(u.identity.id, authn::USER_SAGA_RECOVERY.id); + assert_eq!(u.identity.id, authn::USER_SAGA_RECOVERY.id.into_untyped_uuid()); assert!(users.is_empty(), "found unexpected built-in users"); // TODO-coverage add test for fetching individual users, including invalid diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 3819cb484c7..cee18c8a66c 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -17,6 +17,8 @@ use omicron_common::api::external::{ RouteDestination, RouteTarget, UserId, }; use omicron_common::disk::DiskVariant; +use omicron_uuid_kinds::SiloGroupUuid; +use omicron_uuid_kinds::SiloUserUuid; use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use parse_display::Display; use schemars::JsonSchema; @@ -45,11 +47,16 @@ macro_rules! path_param { macro_rules! id_path_param { ($struct:ident, $param:ident, $name:tt) => { + id_path_param!($struct, $param, $name, Uuid); + }; + + ($struct:ident, $param:ident, $name:tt, $uuid_type:ident) => { #[derive(Serialize, Deserialize, JsonSchema)] pub struct $struct { #[doc = "ID of the "] #[doc = $name] - pub $param: Uuid, + #[schemars(with = "Uuid")] + pub $param: $uuid_type, } }; } @@ -95,8 +102,8 @@ path_param!(ProbePath, probe, "probe"); path_param!(CertificatePath, certificate, "certificate"); id_path_param!(SupportBundlePath, bundle_id, "support bundle"); -id_path_param!(GroupPath, group_id, "group"); -id_path_param!(UserPath, user_id, "user"); +id_path_param!(GroupPath, group_id, "group", SiloGroupUuid); +id_path_param!(UserPath, user_id, "user", SiloUserUuid); id_path_param!(TokenPath, token_id, "token"); id_path_param!(TufTrustRootPath, trust_root_id, "trust root"); @@ -181,7 +188,8 @@ pub struct OptionalSiloSelector { #[derive(Deserialize, JsonSchema)] pub struct UserParam { /// The user's internal ID - pub user_id: Uuid, + #[schemars(with = "Uuid")] + pub user_id: SiloUserUuid, } #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] @@ -198,7 +206,8 @@ pub struct SamlIdentityProviderSelector { #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] pub struct SshKeySelector { /// ID of the silo user - pub silo_user_id: Uuid, + #[schemars(with = "Uuid")] + pub silo_user_id: SiloUserUuid, /// Name or ID of the SSH key pub ssh_key: NameOrId, } @@ -2295,7 +2304,8 @@ pub struct SnapshotCreate { #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] pub struct OptionalGroupSelector { - pub group: Option, + #[schemars(with = "Option")] + pub group: Option, } // BUILT-IN USERS diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index 58088a9e07b..927536e8181 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -12,6 +12,9 @@ use chrono::DateTime; use chrono::Utc; use omicron_common::api::external::Name; use omicron_common::api::internal::shared::NetworkInterface; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SiloGroupUuid; +use omicron_uuid_kinds::SiloUserUuid; use omicron_uuid_kinds::SupportBundleUuid; use parse_display::FromStr; use schemars::JsonSchema; @@ -88,6 +91,30 @@ pub struct RoleAssignment { pub role_name: AllowedRoles, } +impl RoleAssignment { + pub fn for_silo_user( + silo_user_id: SiloUserUuid, + role_name: AllowedRoles, + ) -> Self { + Self { + identity_type: IdentityType::SiloUser, + identity_id: silo_user_id.into_untyped_uuid(), + role_name, + } + } + + pub fn for_silo_group( + silo_group_id: SiloGroupUuid, + role_name: AllowedRoles, + ) -> Self { + Self { + identity_type: IdentityType::SiloGroup, + identity_id: silo_group_id.into_untyped_uuid(), + role_name, + } + } +} + #[derive( Clone, Copy, diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 7f10955f96f..8ee481a1a64 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -19,7 +19,11 @@ use omicron_common::api::external::{ Digest, Error, FailureDomain, IdentityMetadata, InstanceState, Name, ObjectIdentity, SimpleIdentity, SimpleIdentityOrName, }; -use omicron_uuid_kinds::{AlertReceiverUuid, AlertUuid}; +use omicron_uuid_kinds::AlertReceiverUuid; +use omicron_uuid_kinds::AlertUuid; +use omicron_uuid_kinds::BuiltInUserUuid; +use omicron_uuid_kinds::SiloGroupUuid; +use omicron_uuid_kinds::SiloUserUuid; use oxnet::{Ipv4Net, Ipv6Net}; use schemars::JsonSchema; use semver::Version; @@ -944,7 +948,9 @@ impl fmt::Display for PhysicalDiskState { /// View of a User #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, JsonSchema)] pub struct User { - pub id: Uuid, + #[schemars(with = "Uuid")] + pub id: SiloUserUuid, + /** Human-readable name that can identify the user */ pub display_name: String, @@ -979,7 +985,8 @@ pub struct CurrentUser { /// View of a Group #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, JsonSchema)] pub struct Group { - pub id: Uuid, + #[schemars(with = "Uuid")] + pub id: SiloGroupUuid, /// Human-readable name that can identify the group pub display_name: String, @@ -1013,7 +1020,8 @@ pub struct SshKey { pub identity: IdentityMetadata, /// The user to whom this key belongs - pub silo_user_id: Uuid, + #[schemars(with = "Uuid")] + pub silo_user_id: SiloUserUuid, /// SSH public key, e.g., `"ssh-ed25519 AAAAC3NzaC..."` pub public_key: String, @@ -1643,8 +1651,18 @@ mod test { #[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] #[serde(tag = "kind", rename_all = "snake_case")] pub enum AuditLogEntryActor { - UserBuiltin { user_builtin_id: Uuid }, - SiloUser { silo_user_id: Uuid, silo_id: Uuid }, + UserBuiltin { + #[schemars(with = "Uuid")] + user_builtin_id: BuiltInUserUuid, + }, + + SiloUser { + #[schemars(with = "Uuid")] + silo_user_id: SiloUserUuid, + + silo_id: Uuid, + }, + Unauthenticated, } diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index 00e2793a982..c2bbc054ce2 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -57,6 +57,7 @@ impl_typed_uuid_kind! { AlertReceiver => "alert_receiver", AntiAffinityGroup => "anti_affinity_group", Blueprint => "blueprint", + BuiltInUser => "built_in_user", Collection => "collection", ConsoleSession => "console_session", Dataset => "dataset", @@ -84,6 +85,8 @@ impl_typed_uuid_kind! { RackReset => "rack_reset", ReconfiguratorSim => "reconfigurator_sim", Region => "region", + SiloGroup => "silo_group", + SiloUser => "silo_user", Sled => "sled", SpUpdate => "sp_update", SupportBundle => "support_bundle", From a8524a37614abeeb287a85164bb525d725ef86e5 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Thu, 28 Aug 2025 15:33:43 -0700 Subject: [PATCH 15/38] gateway: versionify; use progenitor client in tests (#8917) Ticks another box on #8727. --- Cargo.lock | 3 + clients/gateway-client/src/lib.rs | 5 +- dev-tools/mgs-dev/src/main.rs | 3 +- dev-tools/omdb/tests/successes.out | 2 +- dev-tools/omdb/tests/test_all_output.rs | 15 ++- dev-tools/omicron-dev/src/main.rs | 5 +- dev-tools/openapi-manager/src/omicron.rs | 2 +- gateway-api/Cargo.toml | 2 + gateway-api/src/lib.rs | 30 ++++++ gateway-test-utils/src/setup.rs | 19 ++-- gateway/Cargo.toml | 1 + gateway/src/lib.rs | 6 ++ .../tests/integration_tests/component_list.rs | 26 +++-- gateway/tests/integration_tests/ereports.rs | 73 ++++++------- .../integration_tests/location_discovery.rs | 11 +- .../tests/integration_tests/serial_console.rs | 101 ++++++++++-------- gateway/tests/integration_tests/task_dump.rs | 16 +-- nexus/inventory/src/collector.rs | 17 +-- .../src/driver_update/test_host_phase_1.rs | 2 +- nexus/mgs-updates/src/test_util/updates.rs | 6 +- nexus/mgs-updates/tests/host_phase1_hash.rs | 10 +- .../execution/src/test_utils.rs | 8 +- .../app/background/tasks/ereport_ingester.rs | 13 +-- nexus/test-utils/src/lib.rs | 7 +- .../gateway-1.0.0-12d926.json} | 2 +- openapi/gateway/gateway-latest.json | 1 + 26 files changed, 203 insertions(+), 183 deletions(-) rename openapi/{gateway.json => gateway/gateway-1.0.0-12d926.json} (99%) create mode 120000 openapi/gateway/gateway-latest.json diff --git a/Cargo.lock b/Cargo.lock index 8114da6b563..12857ad65d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3668,7 +3668,9 @@ dependencies = [ "omicron-common", "omicron-uuid-kinds", "omicron-workspace-hack", + "openapi-manager-types", "schemars", + "semver 1.0.26", "serde", "uuid", ] @@ -7752,6 +7754,7 @@ dependencies = [ "expectorate", "futures", "gateway-api", + "gateway-client", "gateway-messages", "gateway-sp-comms", "gateway-test-utils", diff --git a/clients/gateway-client/src/lib.rs b/clients/gateway-client/src/lib.rs index 4911dcaedfe..ba32c0a2a58 100644 --- a/clients/gateway-client/src/lib.rs +++ b/clients/gateway-client/src/lib.rs @@ -48,7 +48,7 @@ use types::ComponentFirmwareHashStatus; // call into each other. Since `gateway` is a lower-level service and never // calls into Nexus, the current scheme is okay.) progenitor::generate_api!( - spec = "../../openapi/gateway.json", + spec = "../../openapi/gateway/gateway-latest.json", interface = Positional, inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { @@ -70,6 +70,7 @@ progenitor::generate_api!( RotImageError = { derives = [ thiserror::Error, PartialEq, Eq, PartialOrd, Ord] }, RotState = { derives = [PartialEq, Eq, PartialOrd, Ord] }, SpComponentCaboose = { derives = [PartialEq, Eq] }, + SpComponentInfo = { derives = [PartialEq, Eq] }, SpIdentifier = { derives = [Copy, PartialEq, Hash, Eq] }, SpIgnition = { derives = [PartialEq, Eq, PartialOrd, Ord] }, SpIgnitionSystemType = { derives = [Copy, PartialEq, Eq, PartialOrd, Ord] }, @@ -82,6 +83,8 @@ progenitor::generate_api!( RotSlot = gateway_types::rot::RotSlot, Ena = ereport_types::Ena, Ereport = ereport_types::Ereport, + Ereports = ereport_types::Ereports, + TaskDump = gateway_types::task_dump::TaskDump, TypedUuidForEreporterRestartKind = omicron_uuid_kinds::EreporterRestartUuid, TypedUuidForMupdateKind = omicron_uuid_kinds::MupdateUuid, }, diff --git a/dev-tools/mgs-dev/src/main.rs b/dev-tools/mgs-dev/src/main.rs index 716955b6844..2421adfe519 100644 --- a/dev-tools/mgs-dev/src/main.rs +++ b/dev-tools/mgs-dev/src/main.rs @@ -81,8 +81,7 @@ impl MgsRunArgs { .await; println!("mgs-dev: MGS is running."); - let addr = gwtestctx.client.bind_address; - println!("mgs-dev: MGS API: http://{:?}", addr); + println!("mgs-dev: MGS API: {}", gwtestctx.client.baseurl()); // Wait for a signal. let caught_signal = signal_stream.next().await; diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 2dd0c267f7e..f5f91119769 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -231,7 +231,7 @@ SP DETAILS: type "Switch" slot 1 --------------------------------------------- stderr: -note: using MGS URL http://[::1]:REDACTED_PORT/ +note: using MGS URL http://[::1]:REDACTED_PORT ============================================= EXECUTING COMMAND: omdb ["nexus", "background-tasks", "doc"] termination: Exited(0) diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 4b1413f29ef..8dcadc1cc30 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -138,15 +138,12 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { let postgres_url = cptestctx.database.listen_url(); let nexus_internal_url = format!("http://{}/", cptestctx.internal_client.bind_address); - let mgs_url = format!( - "http://{}/", - cptestctx - .gateway - .get(&SwitchLocation::Switch0) - .expect("nexus_test always sets up MGS on switch 0") - .client - .bind_address - ); + let mgs_url = cptestctx + .gateway + .get(&SwitchLocation::Switch0) + .expect("nexus_test always sets up MGS on switch 0") + .client + .baseurl(); let ox_url = format!("http://{}/", cptestctx.oximeter.server_address()); let ox_test_producer = cptestctx.producer.address().ip(); let ch_url = format!("http://{}/", cptestctx.clickhouse.http_address()); diff --git a/dev-tools/omicron-dev/src/main.rs b/dev-tools/omicron-dev/src/main.rs index 7eb569ba345..a7207488759 100644 --- a/dev-tools/omicron-dev/src/main.rs +++ b/dev-tools/omicron-dev/src/main.rs @@ -149,8 +149,9 @@ impl RunAllArgs { ); for (location, gateway) in &cptestctx.gateway { println!( - "omicron-dev: management gateway: http://{} ({})", - gateway.client.bind_address, location, + "omicron-dev: management gateway: {} ({})", + gateway.client.baseurl(), + location, ); } println!("omicron-dev: silo name: {}", cptestctx.silo_name,); diff --git a/dev-tools/openapi-manager/src/omicron.rs b/dev-tools/openapi-manager/src/omicron.rs index e31956b5e73..15857be598f 100644 --- a/dev-tools/openapi-manager/src/omicron.rs +++ b/dev-tools/openapi-manager/src/omicron.rs @@ -98,7 +98,7 @@ pub fn all_apis() -> Vec { }, ManagedApiConfig { title: "Oxide Management Gateway Service API", - versions: Versions::new_lockstep(semver::Version::new(0, 0, 1)), + versions: Versions::new_versioned(gateway_api::supported_versions()), description: "API for interacting with the Oxide \ control plane's gateway service", boundary: ApiBoundary::Internal, diff --git a/gateway-api/Cargo.toml b/gateway-api/Cargo.toml index 45aa8d4fa83..b5a10dd6907 100644 --- a/gateway-api/Cargo.toml +++ b/gateway-api/Cargo.toml @@ -14,6 +14,8 @@ gateway-types.workspace = true omicron-common.workspace = true omicron-uuid-kinds.workspace = true omicron-workspace-hack.workspace = true +openapi-manager-types.workspace = true schemars.workspace = true +semver.workspace = true serde.workspace = true uuid.workspace = true diff --git a/gateway-api/src/lib.rs b/gateway-api/src/lib.rs index 115e50b5d1c..1b502def255 100644 --- a/gateway-api/src/lib.rs +++ b/gateway-api/src/lib.rs @@ -26,10 +26,40 @@ use gateway_types::{ SpComponentResetError, SpUpdateStatus, }, }; +use openapi_manager_types::{ + SupportedVersion, SupportedVersions, api_versions, +}; use schemars::JsonSchema; use serde::Deserialize; use uuid::Uuid; +api_versions!([ + // WHEN CHANGING THE API (part 1 of 2): + // + // +- Pick a new semver and define it in the list below. The list MUST + // | remain sorted, which generally means that your version should go at + // | the very top. + // | + // | Duplicate this line, uncomment the *second* copy, update that copy for + // | your new API version, and leave the first copy commented out as an + // | example for the next person. + // v + // (next_int, IDENT), + (1, INITIAL), +]); + +// WHEN CHANGING THE API (part 2 of 2): +// +// The call to `api_versions!` above defines constants of type +// `semver::Version` that you can use in your Dropshot API definition to specify +// the version when a particular endpoint was added or removed. For example, if +// you used: +// +// (2, ADD_FOOBAR) +// +// Then you could use `VERSION_ADD_FOOBAR` as the version in which endpoints +// were added or removed. + /// This endpoint is used to upload SP and ROT Hubris archives as well as phase 1 host OS /// images. The phase 1 image is 32 MiB, driven by the QSPI flash on hardware. const SP_COMPONENT_UPDATE_MAX_BYTES: usize = 64 * 1024 * 1024; diff --git a/gateway-test-utils/src/setup.rs b/gateway-test-utils/src/setup.rs index 83aef2abad5..5607056a163 100644 --- a/gateway-test-utils/src/setup.rs +++ b/gateway-test-utils/src/setup.rs @@ -6,7 +6,6 @@ use camino::Utf8Path; use camino::Utf8PathBuf; -use dropshot::test_util::ClientTestContext; use dropshot::test_util::LogContext; use gateway_messages::SpPort; use omicron_gateway::MgsArguments; @@ -18,7 +17,6 @@ use omicron_test_utils::dev::poll::CondCheckError; use qorb::resolver::AllBackends; use qorb::resolver::Resolver; use qorb::resolvers::fixed::FixedResolver; -use slog::o; use sp_sim::SimRack; use sp_sim::SimulatedSp; use std::collections::HashSet; @@ -37,8 +35,9 @@ pub const DEFAULT_SP_SIM_CONFIG: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/configs/sp_sim_config.test.toml"); pub struct GatewayTestContext { - pub client: ClientTestContext, + pub client: gateway_client::Client, pub server: omicron_gateway::Server, + pub port: u16, pub simrack: SimRack, pub logctx: LogContext, pub gateway_id: Uuid, @@ -47,13 +46,6 @@ pub struct GatewayTestContext { } impl GatewayTestContext { - pub fn client(&self) -> gateway_client::Client { - gateway_client::Client::new( - &self.client.url("/").to_string(), - self.logctx.log.new(slog::o!("component" => "MgsClient")), - ) - } - pub fn mgs_backends(&self) -> watch::Receiver { self.resolver_backends.clone() } @@ -268,9 +260,9 @@ pub async fn test_setup_with_config( .dropshot_server_for_address(localhost_port_0) .unwrap() .local_addr(); - let client = ClientTestContext::new( - server_addr, - log.new(o!("component" => "client test context")), + let client = gateway_client::Client::new( + &format!("http://{server_addr}"), + logctx.log.new(slog::o!("component" => "MgsClient")), ); let mut resolver = FixedResolver::new(std::iter::once(server_addr)); @@ -279,6 +271,7 @@ pub async fn test_setup_with_config( GatewayTestContext { client, server, + port: server_addr.port(), simrack, logctx, gateway_id, diff --git a/gateway/Cargo.toml b/gateway/Cargo.toml index 61dbcac6e38..7e234d79655 100644 --- a/gateway/Cargo.toml +++ b/gateway/Cargo.toml @@ -52,6 +52,7 @@ zip.workspace = true [dev-dependencies] expectorate.workspace = true +gateway-client.workspace = true gateway-test-utils.workspace = true omicron-test-utils.workspace = true serde_json.workspace = true diff --git a/gateway/src/lib.rs b/gateway/src/lib.rs index 484f33d1899..1d5eb70d28b 100644 --- a/gateway/src/lib.rs +++ b/gateway/src/lib.rs @@ -103,6 +103,12 @@ fn start_dropshot_server( log.new(o!("component" => "dropshot")), ) .config(dropshot) + .version_policy(dropshot::VersionPolicy::Dynamic(Box::new( + dropshot::ClientSpecifiesVersionInHeader::new( + omicron_common::api::VERSION_HEADER, + gateway_api::VERSION_INITIAL, + ), + ))) .start() .map_err(|error| { format!( diff --git a/gateway/tests/integration_tests/component_list.rs b/gateway/tests/integration_tests/component_list.rs index 343f3dcbd61..0b62a072499 100644 --- a/gateway/tests/integration_tests/component_list.rs +++ b/gateway/tests/integration_tests/component_list.rs @@ -4,15 +4,12 @@ // Copyright 2022 Oxide Computer Company -use dropshot::test_util; +use gateway_client::types::{SpComponentInfo, SpComponentPresence}; use gateway_messages::DeviceCapabilities; use gateway_messages::SpComponent; use gateway_messages::SpPort; use gateway_test_utils::current_simulator_state; use gateway_test_utils::setup; -use gateway_types::component::SpComponentInfo; -use gateway_types::component::SpComponentList; -use gateway_types::component::SpComponentPresence; use gateway_types::component::SpType; #[tokio::test] @@ -35,8 +32,11 @@ async fn component_list() { assert!(sim_state.iter().all(|sp| sp.state.is_ok())); // Get the component list for sled 0. - let url = format!("{}", client.url("/sp/sled/0/component")); - let resp: SpComponentList = test_util::object_get(client, &url).await; + let resp = client + .sp_component_list(gateway_client::types::SpType::Sled, 0) + .await + .unwrap() + .into_inner(); assert_eq!( resp.components, @@ -135,8 +135,11 @@ async fn component_list() { ); // Get the component list for sled 1. - let url = format!("{}", client.url("/sp/sled/1/component")); - let resp: SpComponentList = test_util::object_get(client, &url).await; + let resp = client + .sp_component_list(gateway_client::types::SpType::Sled, 1) + .await + .unwrap() + .into_inner(); assert_eq!( resp.components, @@ -235,8 +238,11 @@ async fn component_list() { ); // Get the component list for switch 0. - let url = format!("{}", client.url("/sp/switch/0/component")); - let resp: SpComponentList = test_util::object_get(client, &url).await; + let resp = client + .sp_component_list(gateway_client::types::SpType::Switch, 0) + .await + .unwrap() + .into_inner(); assert_eq!( resp.components, diff --git a/gateway/tests/integration_tests/ereports.rs b/gateway/tests/integration_tests/ereports.rs index cc381944369..b71afe75415 100644 --- a/gateway/tests/integration_tests/ereports.rs +++ b/gateway/tests/integration_tests/ereports.rs @@ -4,7 +4,8 @@ // Copyright 2025 Oxide Computer Company -use dropshot::test_util; +use ereport_types::Ena; +use ereport_types::EreporterRestartUuid; use gateway_messages::SpPort; use gateway_test_utils::current_simulator_state; use gateway_test_utils::setup; @@ -14,41 +15,31 @@ use std::sync::LazyLock; use uuid::Uuid; struct EreportRequest { - sled: usize, - restart_id: Uuid, - start_ena: u64, - committed_ena: Option, - limit: usize, + sled: u16, + restart_id: EreporterRestartUuid, + start_ena: Ena, + committed_ena: Option, + limit: u32, } impl EreportRequest { async fn response( self, - client: &test_util::ClientTestContext, + client: &gateway_client::Client, ) -> ereport_types::Ereports { let Self { sled, restart_id, start_ena, committed_ena, limit } = self; - use std::fmt::Write; - - let base = client.url("/sp/sled"); - let mut url = format!( - "{base}/{sled}/ereports?restart_id={restart_id}&start_at={start_ena}&limit={limit}" - ); - if let Some(committed) = committed_ena { - write!(&mut url, "&committed={committed}").unwrap(); - } - // N.B. that we must use `ClientTestContext::make_request` rather than one - // of the higher level helpers like `objects_post`, as our combination of - // method and status code is a bit weird. - let mut response = client - .make_request::<()>( - http::Method::POST, - &url, - None, - http::StatusCode::OK, + client + .sp_ereports_ingest( + gateway_client::types::SpType::Sled, + sled, + committed_ena.as_ref(), + limit.try_into().unwrap(), + &restart_id, + Some(&start_ena), ) .await - .unwrap(); - test_util::read_json::(&mut response).await + .unwrap() + .into_inner() } } @@ -250,8 +241,8 @@ async fn ereports_basic() { let ereport_types::Ereports { restart_id, reports } = dbg!( EreportRequest { sled: 1, - restart_id: Uuid::new_v4(), - start_ena: 0, + restart_id: EreporterRestartUuid::new_v4(), + start_ena: Ena(0), committed_ena: None, limit: 100 } @@ -284,8 +275,8 @@ async fn ereports_limit() { let ereport_types::Ereports { restart_id, reports } = dbg!( EreportRequest { sled: 0, - restart_id: Uuid::new_v4(), - start_ena: 0, + restart_id: EreporterRestartUuid::new_v4(), + start_ena: Ena(0), committed_ena: None, limit: 3 } @@ -312,8 +303,8 @@ async fn ereports_limit() { let ereport_types::Ereports { restart_id, reports } = dbg!( EreportRequest { sled: 0, - restart_id: restart_id.into_untyped_uuid(), - start_ena: 3, + restart_id, + start_ena: Ena(3), committed_ena: None, limit: 2 } @@ -345,9 +336,9 @@ async fn ereports_commit() { let ereport_types::Ereports { restart_id, reports } = dbg!( EreportRequest { sled: 0, - restart_id: Uuid::new_v4(), - start_ena: 3, - committed_ena: Some(2), + restart_id: EreporterRestartUuid::new_v4(), + start_ena: Ena(3), + committed_ena: Some(Ena(2)), limit: 2 } .response(client) @@ -371,9 +362,9 @@ async fn ereports_commit() { let ereport_types::Ereports { restart_id, reports } = dbg!( EreportRequest { sled: 0, - restart_id: restart_id.into_untyped_uuid(), - start_ena: 0, - committed_ena: Some(2), + restart_id, + start_ena: Ena(0), + committed_ena: Some(Ena(2)), limit: 2 } .response(client) @@ -396,8 +387,8 @@ async fn ereports_commit() { let ereport_types::Ereports { restart_id, reports } = dbg!( EreportRequest { sled: 0, - restart_id: restart_id.into_untyped_uuid(), - start_ena: 0, + restart_id, + start_ena: Ena(0), committed_ena: None, limit: 100 } diff --git a/gateway/tests/integration_tests/location_discovery.rs b/gateway/tests/integration_tests/location_discovery.rs index 1e571d343bc..911110b85b2 100644 --- a/gateway/tests/integration_tests/location_discovery.rs +++ b/gateway/tests/integration_tests/location_discovery.rs @@ -4,10 +4,8 @@ // Copyright 2022 Oxide Computer Company -use dropshot::test_util; use gateway_messages::SpPort; use gateway_test_utils::setup; -use gateway_types::component::SpState; use gateway_types::component::SpType; use omicron_gateway::SpIdentifier; @@ -36,10 +34,11 @@ async fn discovery_both_locations() { // switch 1, and it should match the expected values from the config for (switch, expected_serial) in [(0, "SimSidecar0"), (1, "SimSidecar1")] { for client in [client0, client1] { - let url = - format!("{}", client0.url(&format!("/sp/switch/{}", switch))); - - let state: SpState = test_util::object_get(client, &url).await; + let state = client + .sp_get(gateway_client::types::SpType::Switch, switch) + .await + .unwrap() + .into_inner(); assert_eq!(state.serial_number, expected_serial); } } diff --git a/gateway/tests/integration_tests/serial_console.rs b/gateway/tests/integration_tests/serial_console.rs index f7822aee0cd..5f91d697e92 100644 --- a/gateway/tests/integration_tests/serial_console.rs +++ b/gateway/tests/integration_tests/serial_console.rs @@ -4,7 +4,7 @@ // Copyright 2022 Oxide Computer Company -use dropshot::Method; +use dropshot::HttpErrorResponseBody; use futures::prelude::*; use gateway_messages::SpPort; use gateway_test_utils::current_simulator_state; @@ -12,10 +12,9 @@ use gateway_test_utils::setup; use gateway_test_utils::sim_sp_serial_console; use gateway_types::component::SpType; use http::StatusCode; -use http::Uri; -use http::uri::Scheme; -use tokio_tungstenite::tungstenite; +use tokio_tungstenite::WebSocketStream; use tokio_tungstenite::tungstenite::protocol::Message; +use tokio_tungstenite::tungstenite::protocol::Role; #[tokio::test] async fn serial_console_communication() { @@ -34,14 +33,17 @@ async fn serial_console_communication() { sim_sp_serial_console(&simrack.gimlets[0]).await; // connect to the MGS websocket for this gimlet - let url = { - let mut parts = client - .url("/sp/sled/0/component/sp3-host-cpu/serial-console/attach") - .into_parts(); - parts.scheme = Some(Scheme::try_from("ws").unwrap()); - Uri::from_parts(parts).unwrap() - }; - let (mut ws, _resp) = tokio_tungstenite::connect_async(url).await.unwrap(); + let upgraded = client + .sp_component_serial_console_attach( + gateway_client::types::SpType::Sled, + 0, + "sp3-host-cpu", + ) + .await + .unwrap() + .into_inner(); + let mut ws = + WebSocketStream::from_raw_socket(upgraded, Role::Client, None).await; for i in 0..8 { let msg_from_mgs = format!("hello from MGS {}", i).into_bytes(); @@ -80,36 +82,34 @@ async fn serial_console_detach() { sim_sp_serial_console(&simrack.gimlets[0]).await; // connect to the MGS websocket for this gimlet - let attach_url = { - let mut parts = client - .url("/sp/sled/0/component/sp3-host-cpu/serial-console/attach") - .into_parts(); - parts.scheme = Some(Scheme::try_from("ws").unwrap()); - Uri::from_parts(parts).unwrap() - }; - let (mut ws, _resp) = - tokio_tungstenite::connect_async(attach_url.clone()).await.unwrap(); + let upgraded = client + .sp_component_serial_console_attach( + gateway_client::types::SpType::Sled, + 0, + "sp3-host-cpu", + ) + .await + .unwrap() + .into_inner(); + let mut ws = + WebSocketStream::from_raw_socket(upgraded, Role::Client, None).await; // attempting to connect while the first connection is still open should // fail - let err = - tokio_tungstenite::connect_async(attach_url.clone()).await.unwrap_err(); - match err { - tungstenite::Error::Http(resp) => { - assert_eq!(resp.status(), StatusCode::BAD_REQUEST); - } - tungstenite::Error::ConnectionClosed - | tungstenite::Error::AlreadyClosed - | tungstenite::Error::AttackAttempt - | tungstenite::Error::Io(_) - | tungstenite::Error::Tls(_) - | tungstenite::Error::Capacity(_) - | tungstenite::Error::Protocol(_) - | tungstenite::Error::WriteBufferFull(_) - | tungstenite::Error::Utf8 - | tungstenite::Error::Url(_) - | tungstenite::Error::HttpFormat(_) => panic!("unexpected error"), - } + let err = client + .sp_component_serial_console_attach( + gateway_client::types::SpType::Sled, + 0, + "sp3-host-cpu", + ) + .await + .unwrap_err(); + let gateway_client::Error::UnexpectedResponse(response) = err else { + panic!("unexpected error"); + }; + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let err: HttpErrorResponseBody = response.json().await.unwrap(); + assert!(err.message.contains("serial console already attached")); // the original websocket should still work ws.send(Message::Binary(b"hello".to_vec())).await.unwrap(); @@ -121,12 +121,12 @@ async fn serial_console_detach() { ); // hit the detach endpoint, which should disconnect `ws` - let detach_url = format!( - "{}", - client.url("/sp/sled/0/component/sp3-host-cpu/serial-console/detach") - ); client - .make_request_no_body(Method::POST, &detach_url, StatusCode::NO_CONTENT) + .sp_component_serial_console_detach( + gateway_client::types::SpType::Sled, + 0, + "sp3-host-cpu", + ) .await .unwrap(); match ws.next().await { @@ -137,8 +137,17 @@ async fn serial_console_detach() { } // we should now be able to rettach - let (mut ws, _resp) = - tokio_tungstenite::connect_async(attach_url.clone()).await.unwrap(); + let upgraded = client + .sp_component_serial_console_attach( + gateway_client::types::SpType::Sled, + 0, + "sp3-host-cpu", + ) + .await + .unwrap() + .into_inner(); + let mut ws = + WebSocketStream::from_raw_socket(upgraded, Role::Client, None).await; ws.send(Message::Binary(b"hello".to_vec())).await.unwrap(); assert_eq!(console_read.recv().await.unwrap(), b"hello"); console_write.send(b"world".to_vec()).await.unwrap(); diff --git a/gateway/tests/integration_tests/task_dump.rs b/gateway/tests/integration_tests/task_dump.rs index 882ed28ccb4..f4f5bfb283e 100644 --- a/gateway/tests/integration_tests/task_dump.rs +++ b/gateway/tests/integration_tests/task_dump.rs @@ -5,7 +5,6 @@ // Copyright 2025 Oxide Computer Company use base64::prelude::*; -use dropshot::test_util; use gateway_messages::SpPort; use gateway_test_utils::current_simulator_state; use gateway_test_utils::setup; @@ -27,13 +26,14 @@ async fn task_dump() { assert!(sim_state.iter().all(|sp| sp.state.is_ok())); // Get task dump count for sled 0. - let url = format!("{}", client.url("/sp/sled/0/task-dump")); - let resp: u32 = test_util::object_get(client, &url).await; - + let resp = client + .sp_task_dump_count(gateway_client::types::SpType::Sled, 0) + .await + .unwrap() + .into_inner(); assert_eq!(resp, 1); // Get the task dump. - let url = format!("{}", client.url("/sp/sled/0/task-dump/0")); let TaskDump { task_index, timestamp, @@ -42,7 +42,11 @@ async fn task_dump() { gitc, vers, base64_zip, - } = test_util::object_get(client, &url).await; + } = client + .sp_task_dump_get(gateway_client::types::SpType::Sled, 0, 0) + .await + .unwrap() + .into_inner(); assert_eq!(0, task_index); assert_eq!(1, timestamp); diff --git a/nexus/inventory/src/collector.rs b/nexus/inventory/src/collector.rs index d5c31bed05b..6193d4d3a52 100644 --- a/nexus/inventory/src/collector.rs +++ b/nexus/inventory/src/collector.rs @@ -1106,8 +1106,7 @@ mod test { let sled1_url = format!("http://{}/", sled1.http_server.local_addr()); let sled2_url = format!("http://{}/", sled2.http_server.local_addr()); - let mgs_url = format!("http://{}/", gwtestctx.client.bind_address); - let mgs_client = gateway_client::Client::new(&mgs_url, log.clone()); + let mgs_client = gwtestctx.client.clone(); let sled_enum = StaticSledAgentEnumerator::new([sled1_url, sled2_url]); // We don't have any mocks for this, and it's unclear how much value // there would be in providing them at this juncture. @@ -1186,10 +1185,8 @@ mod test { let sled2_url = format!("http://{}/", sled2.http_server.local_addr()); let mgs_clients = [&gwtestctx1, &gwtestctx2] .into_iter() - .map(|g| { - let url = format!("http://{}/", g.client.bind_address); - gateway_client::Client::new(&url, log.clone()) - }) + .map(|g| &g.client) + .cloned() .collect::>(); let sled_enum = StaticSledAgentEnumerator::new([sled1_url, sled2_url]); // We don't have any mocks for this, and it's unclear how much value @@ -1234,10 +1231,7 @@ mod test { ) .await; let log = &gwtestctx.logctx.log; - let real_client = { - let url = format!("http://{}/", gwtestctx.client.bind_address); - gateway_client::Client::new(&url, log.clone()) - }; + let real_client = gwtestctx.client.clone(); let bad_client = { // This IP range is guaranteed by RFC 6666 to discard traffic. let url = "http://[100::1]:12345"; @@ -1299,8 +1293,7 @@ mod test { let sled1_url = format!("http://{}/", sled1.http_server.local_addr()); let sledbogus_url = String::from("http://[100::1]:45678"); - let mgs_url = format!("http://{}/", gwtestctx.client.bind_address); - let mgs_client = gateway_client::Client::new(&mgs_url, log.clone()); + let mgs_client = gwtestctx.client.clone(); let sled_enum = StaticSledAgentEnumerator::new([sled1_url, sledbogus_url]); // We don't have any mocks for this, and it's unclear how much value diff --git a/nexus/mgs-updates/src/driver_update/test_host_phase_1.rs b/nexus/mgs-updates/src/driver_update/test_host_phase_1.rs index b7ca009785d..059872c605c 100644 --- a/nexus/mgs-updates/src/driver_update/test_host_phase_1.rs +++ b/nexus/mgs-updates/src/driver_update/test_host_phase_1.rs @@ -375,7 +375,7 @@ async fn basic_failures() { // tests below; get the actual artifact hashes reported by our test setup // and ensure none of them matches that. let (active_phase_1_hash, inactive_phase_1_hash, phase_1_slot) = { - let sp_init = SpTestState::load(&gwtestctx.client(), SpType::Sled, 1) + let sp_init = SpTestState::load(&gwtestctx.client, SpType::Sled, 1) .await .expect("loading initial state"); ( diff --git a/nexus/mgs-updates/src/test_util/updates.rs b/nexus/mgs-updates/src/test_util/updates.rs index 343db0f7209..66a85fe4217 100644 --- a/nexus/mgs-updates/src/test_util/updates.rs +++ b/nexus/mgs-updates/src/test_util/updates.rs @@ -99,12 +99,12 @@ impl UpdateDescription<'_> { /// Execution does not start until you call `run_until_status()` or /// `finish()`on the returned value. pub async fn setup(&self) -> InProgressAttempt { - let mgs_client = self.gwtestctx.client(); + let mgs_client = &self.gwtestctx.client; // Fetch information about the device that we're going to update. // This will be used to configure the preconditions (expected baseboard // id and expected active/inactive slot contents). - let sp1 = SpTestState::load(&mgs_client, self.sp_type, self.slot_id) + let sp1 = SpTestState::load(mgs_client, self.sp_type, self.slot_id) .await .expect("loading initial state"); let baseboard_id = Arc::new( @@ -351,7 +351,7 @@ impl UpdateDescription<'_> { step: Some(StepResult::ReadyAgain(StepThrough::new(future))), sp_type: self.sp_type, slot_id: self.slot_id, - mgs_client: self.gwtestctx.client(), + mgs_client: self.gwtestctx.client.clone(), sp1, deployed_artifact: *self.artifact_hash, deployed_caboose: deployed_caboose.cloned(), diff --git a/nexus/mgs-updates/tests/host_phase1_hash.rs b/nexus/mgs-updates/tests/host_phase1_hash.rs index 272bab19128..cf8de06c48d 100644 --- a/nexus/mgs-updates/tests/host_phase1_hash.rs +++ b/nexus/mgs-updates/tests/host_phase1_hash.rs @@ -68,17 +68,13 @@ async fn test_host_phase1_hashing() { .await; // We'll only talk to one sp-sim for this test. - let mgs_client = mgstestctx.client(); + let mgs_client = &mgstestctx.client; let sp_sim = &mgstestctx.simrack.gimlets[0]; let sp_type = SpType::Sled; let sp_component = SpComponent::HOST_CPU_BOOT_FLASH.const_as_str(); let sp_slot = 0; - let phase1_checker = Phase1HashStatusChecker { - mgs_client: &mgs_client, - sp_type, - sp_slot, - sp_component, - }; + let phase1_checker = + Phase1HashStatusChecker { mgs_client, sp_type, sp_slot, sp_component }; // We want explicit (i.e., not-timer-based) control over when hashing // completes. diff --git a/nexus/reconfigurator/execution/src/test_utils.rs b/nexus/reconfigurator/execution/src/test_utils.rs index 15740f443a1..46525d51527 100644 --- a/nexus/reconfigurator/execution/src/test_utils.rs +++ b/nexus/reconfigurator/execution/src/test_utils.rs @@ -99,13 +99,7 @@ pub fn overridables_for_test( for (id_str, switch_location) in scrimlets { let sled_id = id_str.parse().unwrap(); let ip = Ipv6Addr::LOCALHOST; - let mgs_port = cptestctx - .gateway - .get(&switch_location) - .unwrap() - .client - .bind_address - .port(); + let mgs_port = cptestctx.gateway.get(&switch_location).unwrap().port; let dendrite_port = cptestctx.dendrite.get(&switch_location).unwrap().port; let mgd_port = cptestctx.mgd.get(&switch_location).unwrap().port; diff --git a/nexus/src/app/background/tasks/ereport_ingester.rs b/nexus/src/app/background/tasks/ereport_ingester.rs index 7fd1afb6e38..329f32c7a7e 100644 --- a/nexus/src/app/background/tasks/ereport_ingester.rs +++ b/nexus/src/app/background/tasks/ereport_ingester.rs @@ -222,15 +222,11 @@ impl Ingester { // Continue requesting ereports from this SP in a loop until we have // received all its ereports. - while let Some(gateway_client::types::Ereports { - restart_id, - items, - next_page: _, - }) = self + while let Some(ereport_types::Ereports { restart_id, reports }) = self .mgs_requests(&opctx, clients, ¶ms, sp_type, slot, &mut status) .await { - if items.is_empty() { + if reports.items.is_empty() { if let Some(ref mut status) = status { status.requests += 1; } @@ -251,7 +247,8 @@ impl Ingester { } else { status.get_or_insert_default().requests += 1; } - let db_ereports = items + let db_ereports = reports + .items .into_iter() .map(|ereport| { const MISSING_VPD: &str = @@ -379,7 +376,7 @@ impl Ingester { sp_type: nexus_types::inventory::SpType, slot: u16, status: &mut Option, - ) -> Option { + ) -> Option { // If an attempt to collect ereports from one gateway fails, we will try // any other discovered gateways. for GatewayClient { addr, client } in clients.iter() { diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 5022f142924..d4655b3eede 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -750,12 +750,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { sled_id, Ipv6Addr::LOCALHOST, self.dendrite.get(&switch_location).unwrap().port, - self.gateway - .get(&switch_location) - .unwrap() - .client - .bind_address - .port(), + self.gateway.get(&switch_location).unwrap().port, self.mgd.get(&switch_location).unwrap().port, ) .unwrap(); diff --git a/openapi/gateway.json b/openapi/gateway/gateway-1.0.0-12d926.json similarity index 99% rename from openapi/gateway.json rename to openapi/gateway/gateway-1.0.0-12d926.json index bfd2b97cefc..40d06e77146 100644 --- a/openapi/gateway.json +++ b/openapi/gateway/gateway-1.0.0-12d926.json @@ -7,7 +7,7 @@ "url": "https://oxide.computer", "email": "api@oxide.computer" }, - "version": "0.0.1" + "version": "1.0.0" }, "paths": { "/ignition": { diff --git a/openapi/gateway/gateway-latest.json b/openapi/gateway/gateway-latest.json new file mode 120000 index 00000000000..7a5bcae6802 --- /dev/null +++ b/openapi/gateway/gateway-latest.json @@ -0,0 +1 @@ +gateway-1.0.0-12d926.json \ No newline at end of file From 884f2c2a5fb28fd0cba6e6273c8c8e7632ed0089 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Thu, 28 Aug 2025 16:26:33 -0700 Subject: [PATCH 16/38] [mgs-updates] RotCommunicationFailed should be transient (#8930) Closes: https://github.com/oxidecomputer/omicron/issues/8886 --- nexus/mgs-updates/src/driver_update.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nexus/mgs-updates/src/driver_update.rs b/nexus/mgs-updates/src/driver_update.rs index 554ea3f3e16..98361b3b689 100644 --- a/nexus/mgs-updates/src/driver_update.rs +++ b/nexus/mgs-updates/src/driver_update.rs @@ -727,6 +727,7 @@ async fn wait_for_update_done( // * non-empty transient_boot_preference (RoT only) // * failure to fetch inventory from sled-agent (host OS only) // * failure to determine an active slot artifact + // * failure to communicate with the RoT // // We have no reason to think these won't converge, so we proceed // with waiting. @@ -742,6 +743,7 @@ async fn wait_for_update_done( | Err(PrecheckError::MismatchedHostOsActiveSlot { .. }) | Err(PrecheckError::DeterminingActiveArtifact { .. }) | Err(PrecheckError::DeterminingHostOsBootDisk { .. }) + | Err(PrecheckError::RotCommunicationFailed { .. }) | Ok(PrecheckStatus::ReadyForUpdate) => { if before.elapsed() >= timeout { return Err(UpdateWaitError::Timeout(timeout)); @@ -756,8 +758,7 @@ async fn wait_for_update_done( | PrecheckError::WrongActiveVersion { .. } | PrecheckError::WrongActiveArtifact { .. } | PrecheckError::WrongHostOsBootDisk { .. } - | PrecheckError::InvalidHostPhase1Slot { .. } - | PrecheckError::RotCommunicationFailed { .. }), + | PrecheckError::InvalidHostPhase1Slot { .. }), ) => { // Stop trying to make this update happen. It's not going to // happen. From 93c7b0b2127a0b8a9ac7b98226c19837867eeeba Mon Sep 17 00:00:00 2001 From: Rain Date: Thu, 28 Aug 2025 23:59:29 -0700 Subject: [PATCH 17/38] [2/n] [reconfigurator-planning] avoid wiping out boot partition state (#8937) The boot partition state is going to become important in tests soon, since we want to set up the simulated system such that no MGS-driven updates are happening. Simply calling `debug_assume_success` wipes out MGS-related state, so add an alternative that doesn't. --- nexus-sled-agent-shared/src/inventory.rs | 102 ++++++++++++-------- nexus/reconfigurator/planning/src/system.rs | 13 ++- 2 files changed, 73 insertions(+), 42 deletions(-) diff --git a/nexus-sled-agent-shared/src/inventory.rs b/nexus-sled-agent-shared/src/inventory.rs index 176106c12a7..283f136d768 100644 --- a/nexus-sled-agent-shared/src/inventory.rs +++ b/nexus-sled-agent-shared/src/inventory.rs @@ -190,6 +190,31 @@ impl ConfigReconcilerInventory { /// look at the actual `last_reconciliation` value from the parent /// [`Inventory`]. pub fn debug_assume_success(config: OmicronSledConfig) -> Self { + let mut ret = Self { + // These fields will be filled in by `debug_update_assume_success`. + last_reconciled_config: OmicronSledConfig::default(), + external_disks: BTreeMap::new(), + datasets: BTreeMap::new(), + orphaned_datasets: IdOrdMap::new(), + zones: BTreeMap::new(), + remove_mupdate_override: None, + + // These fields will not. + boot_partitions: BootPartitionContents::debug_assume_success(), + }; + + ret.debug_update_assume_success(config); + + ret + } + + /// Given a sled config, update an existing reconciler result to simulate an + /// output that sled-agent could have emitted if reconciliation succeeded. + /// + /// This method should only be used by tests and dev tools; real code should + /// look at the actual `last_reconciliation` value from the parent + /// [`Inventory`]. + pub fn debug_update_assume_success(&mut self, config: OmicronSledConfig) { let external_disks = config .disks .iter() @@ -212,50 +237,17 @@ impl ConfigReconcilerInventory { RemoveMupdateOverrideBootSuccessInventory::Removed, ), non_boot_message: "mupdate override successfully removed \ - on non-boot disks" + on non-boot disks" .to_owned(), } }); - Self { - last_reconciled_config: config, - external_disks, - datasets, - orphaned_datasets: IdOrdMap::new(), - zones, - boot_partitions: { - BootPartitionContents { - boot_disk: Ok(M2Slot::A), - slot_a: Ok(BootPartitionDetails { - header: BootImageHeader { - flags: 0, - data_size: 1000, - image_size: 1000, - target_size: 1000, - sha256: [0; 32], - image_name: "fake from debug_assume_success()" - .to_string(), - }, - artifact_hash: ArtifactHash([0x0a; 32]), - artifact_size: 1000, - }), - slot_b: Ok(BootPartitionDetails { - header: BootImageHeader { - flags: 0, - data_size: 1000, - image_size: 1000, - target_size: 1000, - sha256: [1; 32], - image_name: "fake from debug_assume_success()" - .to_string(), - }, - artifact_hash: ArtifactHash([0x0b; 32]), - artifact_size: 1000, - }), - } - }, - remove_mupdate_override, - } + self.last_reconciled_config = config; + self.external_disks = external_disks; + self.datasets = datasets; + self.orphaned_datasets = IdOrdMap::new(); + self.zones = zones; + self.remove_mupdate_override = remove_mupdate_override; } } @@ -286,6 +278,36 @@ impl BootPartitionContents { M2Slot::B => &self.slot_b, } } + + pub fn debug_assume_success() -> Self { + Self { + boot_disk: Ok(M2Slot::A), + slot_a: Ok(BootPartitionDetails { + header: BootImageHeader { + flags: 0, + data_size: 1000, + image_size: 1000, + target_size: 1000, + sha256: [0; 32], + image_name: "fake from debug_assume_success()".to_string(), + }, + artifact_hash: ArtifactHash([0x0a; 32]), + artifact_size: 1000, + }), + slot_b: Ok(BootPartitionDetails { + header: BootImageHeader { + flags: 0, + data_size: 1000, + image_size: 1000, + target_size: 1000, + sha256: [1; 32], + image_name: "fake from debug_assume_success()".to_string(), + }, + artifact_hash: ArtifactHash([0x0b; 32]), + artifact_size: 1000, + }), + } + } } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, JsonSchema, Serialize)] diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index d9ee2cf64e2..c9741f62977 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -461,8 +461,17 @@ impl SystemDescription { completed_at: Utc::now(), ran_for: Duration::from_secs(5), }; - sled.inventory_sled_agent.last_reconciliation = - Some(ConfigReconcilerInventory::debug_assume_success(sled_config)); + match sled.inventory_sled_agent.last_reconciliation.as_mut() { + Some(last_reconciliation) => { + last_reconciliation.debug_update_assume_success(sled_config); + } + None => { + sled.inventory_sled_agent.last_reconciliation = + Some(ConfigReconcilerInventory::debug_assume_success( + sled_config, + )); + } + }; Ok(self) } From ff423d49a72d77433ab659025d7a4c1be9847d07 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Fri, 29 Aug 2025 10:49:19 -0400 Subject: [PATCH 18/38] [reconfigurator-cli] Extend target-release test to step through zone updates (#8940) This is a step toward addressing #8478. It doesn't quite get there: * I stopped at the point where we're ready to update Nexus; the current behavior is wrong, and it didn't seem worth adding that wrong behavior to this test. * Our simulated system doesn't set up all zone types (it's missing boundary NTP, cockroach, oximeter, and multinode clickhouse). I'm not sure the second bullet is _super_ important for this test; we don't really treat different zone types all that differently, other than that some are expunge -> add and others are upgraded in place (and we do have zones of both of those types in our simulated system). But I'd like to leave #8478 open until we can address the first one. In terms of review: the non-expectorate changes are quite small. I'd recommend skimming the expectorate changes and checking the points where there are comments about what just happened or is about to happen more carefully. (As I was doing this I discovered that our simulated system started with 3 pantry zones but during the upgrade test it expunged them without replacing them; that's what led to changing the example system's `target_crucible_pantry_zone_count`. So I think it is worth at least skimming this output to see if there's anything else funky that I missed.) --- dev-tools/reconfigurator-cli/src/lib.rs | 36 +- .../tests/input/cmds-target-release.txt | 193 +- .../tests/output/cmds-target-release-stdout | 4738 ++++++++++++++++- nexus/reconfigurator/planning/src/system.rs | 3 +- 4 files changed, 4959 insertions(+), 11 deletions(-) diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index 3b1ec1b20e6..ceb7e0feba6 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -245,7 +245,7 @@ fn process_command( cmd_sled_update_host_phase_2(sim, args) } Commands::SledUpdateRotBootloader(args) => { - cmd_sled_update_rot_bootlaoder(sim, args) + cmd_sled_update_rot_bootloader(sim, args) } Commands::SiloList => cmd_silo_list(sim), Commands::SiloAdd(args) => cmd_silo_add(sim, args), @@ -402,6 +402,8 @@ struct SledSetArgs { enum SledSetCommand { /// set the policy for this sled Policy(SledSetPolicyArgs), + /// set the Omicron config for this sled from a blueprint + OmicronConfig(SledSetOmicronConfigArgs), #[clap(flatten)] Visibility(SledSetVisibilityCommand), /// set the mupdate override for this sled @@ -415,6 +417,12 @@ struct SledSetPolicyArgs { policy: SledPolicyOpt, } +#[derive(Debug, Args)] +struct SledSetOmicronConfigArgs { + /// the blueprint to derive the Omicron config from + blueprint: BlueprintIdOpt, +} + #[derive(Debug, Subcommand)] enum SledSetVisibilityCommand { /// mark a sled hidden from inventory @@ -1538,6 +1546,30 @@ fn cmd_sled_set( ); Ok(Some(format!("set sled {sled_id} policy to {policy}"))) } + SledSetCommand::OmicronConfig(command) => { + let resolved_id = + system.resolve_blueprint_id(command.blueprint.into())?; + let blueprint = system.get_blueprint(&resolved_id)?; + let sled_cfg = + blueprint.sleds.get(&sled_id).with_context(|| { + format!("sled id {sled_id} not found in blueprint") + })?; + let omicron_sled_cfg = + sled_cfg.clone().into_in_service_sled_config(); + system + .description_mut() + .sled_set_omicron_config(sled_id, omicron_sled_cfg)?; + sim.commit_and_bump( + format!( + "reconfigurator-cli sled-set omicron-config: \ + {sled_id} from {resolved_id}", + ), + state, + ); + Ok(Some(format!( + "set sled {sled_id} omicron config from {resolved_id}" + ))) + } SledSetCommand::Visibility(command) => { let new = command.to_visibility(); let prev = system @@ -1636,7 +1668,7 @@ fn cmd_sled_update_install_dataset( ))) } -fn cmd_sled_update_rot_bootlaoder( +fn cmd_sled_update_rot_bootloader( sim: &mut ReconfiguratorSim, args: SledUpdateRotBootloaderArgs, ) -> anyhow::Result> { diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt index 5b16cf68076..c285bb55ca8 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt @@ -223,10 +223,195 @@ sled-update-host-phase2 d81c6a84-79b8-4958-ae41-ea46c9b19763 --boot-disk B --slo sled-update-host-phase1 d81c6a84-79b8-4958-ae41-ea46c9b19763 --active B --slot-b 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89 inventory-generate -# Do one more planning run. This should update one control plane zone. +# Do another planning run. This should start updating zones (one at a time). blueprint-plan latest latest blueprint-diff latest -# We should continue walking through the update. We need to build out a -# reconfigurator-cli subcommand to simulate updated zone image sources (just -# like we have sled-update-sp for simulated SP updates). +# Update the first control plane zone and plan again, which should update the +# next zone on this sled. +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# Repeat until all non-Nexus zones on this sled have been updated. Some of these +# steps update a zone in place; others expunge a zone and are followed by a zone +# addition. It doesn't seem _particularly_ useful to spell out which step is +# updating which zone, in terms of keeping this test up to date, but we will +# have to tweak the number of times we iterate on this sled as our simulated +# system or planner placement changes. +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# The previous plan updated the last non-Nexus zone on this sled. Nexus comes +# after everything else, so the next step should update the first zone on the +# next sled. +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# Step through updates of all the non-Nexus zones on this sled. +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# The previous plan updated the last non-Nexus zone on this sled. Nexus comes +# after everything else, so the next step should update the first zone on the +# next sled. +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# Step through updates of all the non-Nexus zones on this sled. +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# The previous step updated the last non-Nexus zone on the final sled. We should +# now see a blueprint where every in-service zone (other than Nexus) has an +# image source set to an artifact from our TUF repo. +blueprint-show latest + +# We ought to update the inventory for the final sled and then step through +# the Nexus handoff process, but that work is still in progress. diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index dd49f0c9f0b..91a353d6813 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -1950,7 +1950,7 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 1 details: active -> B, generated inventory collection 45c1c7bb-984a-43f7-bb3f-4a5437ed7b82 from configured sleds -> # Do one more planning run. This should update one control plane zone. +> # Do another planning run. This should start updating zones (one at a time). > blueprint-plan latest latest INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a @@ -2074,6 +2074,4736 @@ external DNS: -> # We should continue walking through the update. We need to build out a -> # reconfigurator-cli subcommand to simulate updated zone image sources (just -> # like we have sled-update-sp for simulated SP updates). +> # Update the first control plane zone and plan again, which should update the +> # next zone on this sled. +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (e54a0836-53e1-4948-a3af-0b77165289b5) + +> inventory-generate +generated inventory collection ca7f27e8-5949-4ac1-8f32-18ad76d9c217 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 459a45a5-616e-421f-873b-2fb08c36205c based on parent blueprint e54a0836-53e1-4948-a3af-0b77165289b5 +planning report for blueprint 459a45a5-616e-421f-873b-2fb08c36205c: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 62620961-fc4a-481e-968b-f5acbac0dc63 (internal_ntp) +* 24 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 +to: blueprint 459a45a5-616e-421f-873b-2fb08c36205c + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 4 -> 5): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 +* oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset - in service fd00:1122:3344:102::21 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) +- AAAA fd00:1122:3344:102::21 +* name: _internal-ntp._tcp (records: 3 -> 2) +- SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal +- SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal +- SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal ++ SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal ++ SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> # Repeat until all non-Nexus zones on this sled have been updated. Some of these +> # steps update a zone in place; others expunge a zone and are followed by a zone +> # addition. It doesn't seem _particularly_ useful to spell out which step is +> # updating which zone, in terms of keeping this test up to date, but we will +> # have to tweak the number of times we iterate on this sled as our simulated +> # system or planner placement changes. +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (459a45a5-616e-421f-873b-2fb08c36205c) + +> inventory-generate +generated inventory collection 8a02a1c6-9e86-4dc0-9293-cd17da34f319 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 1, num_eligible: 0, num_ineligible: 7 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +INFO some zones not yet up-to-date, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: f83ade6d-9ab1-4679-813b-b9457e039c0b (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] +generated blueprint b2295597-5788-482e-acf9-1731ec63fbd2 based on parent blueprint 459a45a5-616e-421f-873b-2fb08c36205c +planning report for blueprint b2295597-5788-482e-acf9-1731ec63fbd2: +chicken switches: + add zones with mupdate override: false + +* waiting for NTP zones to appear in inventory on sleds: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* missing NTP zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c + + +> blueprint-diff latest +from: blueprint 459a45a5-616e-421f-873b-2fb08c36205c +to: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 5 -> 6): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset - expunged ⏳ fd00:1122:3344:102::21 + └─ + expunged ✓ ++ internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _internal-ntp._tcp (records: 2 -> 3) +- SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal +- SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal ++ SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal ++ SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal ++ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal ++ name: f83ade6d-9ab1-4679-813b-b9457e039c0b.host (records: 1) ++ AAAA fd00:1122:3344:102::29 + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (b2295597-5788-482e-acf9-1731ec63fbd2) + +> inventory-generate +generated inventory collection c1adcd42-121f-4580-bfb9-d8a9937ca9e1 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 2, num_eligible: 0, num_ineligible: 7 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 6fad8fd4-e825-433f-b76d-495484e068ce based on parent blueprint b2295597-5788-482e-acf9-1731ec63fbd2 +planning report for blueprint 6fad8fd4-e825-433f-b76d-495484e068ce: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (external_dns) +* 23 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 +to: blueprint 6fad8fd4-e825-433f-b76d-495484e068ce + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 6 -> 7): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 +* oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c - in service none none off + └─ + expunged +* oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset - in service fd00:1122:3344:102::24 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) +- AAAA fd00:1122:3344:102::24 +* name: _external-dns._tcp (records: 3 -> 2) +- SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal +- SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal +- SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal ++ SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal ++ SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + unchanged names: 49 (records: 61) + +external DNS: +* DNS zone: "oxide.example": +* name: @ (records: 3 -> 2) +- NS ns1.oxide.example +- NS ns2.oxide.example +- NS ns3.oxide.example ++ NS ns1.oxide.example ++ NS ns2.oxide.example +* name: ns1 (records: 1 -> 1) +- A 198.51.100.1 ++ A 198.51.100.2 +* name: ns2 (records: 1 -> 1) +- A 198.51.100.2 ++ A 198.51.100.3 +- name: ns3 (records: 1) +- A 198.51.100.3 + unchanged names: 1 (records: 3) + + + + +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (6fad8fd4-e825-433f-b76d-495484e068ce) + +> inventory-generate +generated inventory collection 94b231f9-80a3-48a9-8d25-70f9b42b64ca from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 2, num_eligible: 0, num_ineligible: 6 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 24b6e243-100c-428d-8ea6-35b504226f55 based on parent blueprint 6fad8fd4-e825-433f-b76d-495484e068ce +planning report for blueprint 24b6e243-100c-428d-8ea6-35b504226f55: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: external_dns +* zone updates waiting on discretionary zones + + +> blueprint-diff latest +from: blueprint 6fad8fd4-e825-433f-b76d-495484e068ce +to: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 7 -> 8): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset - expunged ⏳ fd00:1122:3344:102::24 + └─ + expunged ✓ ++ external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _external-dns._tcp (records: 2 -> 3) +- SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal +- SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal ++ SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal ++ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal ++ SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal ++ name: ba87399e-e9b7-4ee4-8cb7-0032822630e9.host (records: 1) ++ AAAA fd00:1122:3344:102::2a + unchanged names: 49 (records: 61) + +external DNS: +* DNS zone: "oxide.example": +* name: @ (records: 2 -> 3) +- NS ns1.oxide.example +- NS ns2.oxide.example ++ NS ns1.oxide.example ++ NS ns2.oxide.example ++ NS ns3.oxide.example +* name: ns1 (records: 1 -> 1) +- A 198.51.100.2 ++ A 198.51.100.1 +* name: ns2 (records: 1 -> 1) +- A 198.51.100.3 ++ A 198.51.100.2 ++ name: ns3 (records: 1) ++ A 198.51.100.3 + unchanged names: 1 (records: 3) + + + + +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (24b6e243-100c-428d-8ea6-35b504226f55) + +> inventory-generate +generated inventory collection 756aecb6-8353-46ad-a6c4-10ad0f2bbb7f from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 3, num_eligible: 0, num_ineligible: 6 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce based on parent blueprint 24b6e243-100c-428d-8ea6-35b504226f55 +planning report for blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 86a22a56-0168-453d-9df1-cb2a7c64b5d3 (crucible) +* 22 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 +to: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 8 -> 9): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 + external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 - install dataset in service fd00:1122:3344:102::28 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (79fff7a2-2495-4c75-8465-4dc01bab48ce) + +> inventory-generate +generated inventory collection 84152e52-8c2e-46ab-880e-4cc2a1fb9dcb from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 4, num_eligible: 0, num_ineligible: 5 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 based on parent blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce +planning report for blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 99e2f30b-3174-40bf-a78a-90da8abba8ca (internal_dns) +* 21 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce +to: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 9 -> 10): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 +* oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da - in service none none off + └─ + expunged +* oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 + external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset - in service fd00:1122:3344:1::1 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) +- AAAA fd00:1122:3344:1::1 +* name: @ (records: 3 -> 2) +- NS ns1.control-plane.oxide.internal +- NS ns2.control-plane.oxide.internal +- NS ns3.control-plane.oxide.internal ++ NS ns1.control-plane.oxide.internal ++ NS ns2.control-plane.oxide.internal +* name: _nameservice._tcp (records: 3 -> 2) +- SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal +- SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal +- SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal ++ SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal ++ SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal +* name: ns1 (records: 1 -> 1) +- AAAA fd00:1122:3344:1::1 ++ AAAA fd00:1122:3344:2::1 +* name: ns2 (records: 1 -> 1) +- AAAA fd00:1122:3344:2::1 ++ AAAA fd00:1122:3344:3::1 +- name: ns3 (records: 1) +- AAAA fd00:1122:3344:3::1 + unchanged names: 45 (records: 55) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312) + +> inventory-generate +generated inventory collection bcfc7436-77de-47e4-8158-daad15a54da2 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 4, num_eligible: 0, num_ineligible: 4 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d based on parent blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 +planning report for blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: internal_dns +* zone updates waiting on discretionary zones + + +> blueprint-diff latest +from: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 +to: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 10 -> 11): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 + external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset - expunged ⏳ fd00:1122:3344:1::1 + └─ + expunged ✓ ++ internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: @ (records: 2 -> 3) +- NS ns1.control-plane.oxide.internal +- NS ns2.control-plane.oxide.internal ++ NS ns1.control-plane.oxide.internal ++ NS ns2.control-plane.oxide.internal ++ NS ns3.control-plane.oxide.internal +* name: _nameservice._tcp (records: 2 -> 3) +- SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal +- SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal ++ SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal ++ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal ++ SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal ++ name: c821c39d-2b2c-4c55-8874-ac12315ba1e4.host (records: 1) ++ AAAA fd00:1122:3344:1::1 +* name: ns1 (records: 1 -> 1) +- AAAA fd00:1122:3344:2::1 ++ AAAA fd00:1122:3344:1::1 +* name: ns2 (records: 1 -> 1) +- AAAA fd00:1122:3344:3::1 ++ AAAA fd00:1122:3344:2::1 ++ name: ns3 (records: 1) ++ AAAA fd00:1122:3344:3::1 + unchanged names: 45 (records: 55) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d) + +> inventory-generate +generated inventory collection 6dbdc88a-4828-480e-b41d-8946f41a3134 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 5, num_eligible: 0, num_ineligible: 4 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint e2125c83-b255-45c9-bc9b-802cff09a812 based on parent blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d +planning report for blueprint e2125c83-b255-45c9-bc9b-802cff09a812: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (crucible_pantry) +* 20 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d +to: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 11 -> 12): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 +* oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 + external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 + internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset - in service fd00:1122:3344:102::25 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _crucible-pantry._tcp (records: 3 -> 2) +- SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal +- SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal +- SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal ++ SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal ++ SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal +- name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) +- AAAA fd00:1122:3344:102::25 + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (e2125c83-b255-45c9-bc9b-802cff09a812) + +> inventory-generate +generated inventory collection eb500068-cd91-484b-a532-51081571ecbe from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 5, num_eligible: 0, num_ineligible: 3 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 based on parent blueprint e2125c83-b255-45c9-bc9b-802cff09a812 +planning report for blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: crucible_pantry +* zone updates waiting on discretionary zones + + +> blueprint-diff latest +from: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 +to: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 12 -> 13): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 + external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 + internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset - expunged ⏳ fd00:1122:3344:102::25 + └─ + expunged ✓ ++ crucible_pantry 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:102::2b + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": ++ name: 698d1d82-0620-4978-93ac-0ba5d40f3da9.host (records: 1) ++ AAAA fd00:1122:3344:102::2b +* name: _crucible-pantry._tcp (records: 2 -> 3) +- SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal +- SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal ++ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal ++ SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (f4a6848e-d13c-46e1-8c6a-944f886d7ba3) + +> inventory-generate +generated inventory collection 4492baf6-5638-4c1f-bba2-608163519022 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 6, num_eligible: 0, num_ineligible: 3 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c based on parent blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 +planning report for blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone bd354eef-d8a6-4165-9124-283fb5e46d77 (crucible) +* 19 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 +to: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 13 -> 14): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:102::2b + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset expunged ✓ fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 + external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 + internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* crucible bd354eef-d8a6-4165-9124-283fb5e46d77 - install dataset in service fd00:1122:3344:102::26 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (834e4dbe-3b71-443d-bd4c-20e8253abc0c) + +> inventory-generate +generated inventory collection 73f58d4d-6be7-4007-811c-0e578279410e from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 7, num_eligible: 0, num_ineligible: 2 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 based on parent blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c +planning report for blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone e2fdefe7-95b2-4fd2-ae37-56929a06d58c (crucible) +* 18 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c +to: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 14 -> 15): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 1.0.0 in service fd00:1122:3344:102::26 + crucible_pantry 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:102::2b + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset expunged ✓ fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 + external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 + internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 +* crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c - install dataset in service fd00:1122:3344:102::27 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> # The previous plan updated the last non-Nexus zone on this sled. Nexus comes +> # after everything else, so the next step should update the first zone on the +> # next sled. +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1) + +> inventory-generate +generated inventory collection 74448e29-ef07-4d7f-9d31-39079eba8296 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 based on parent blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 +planning report for blueprint e2deb7c0-2262-49fe-855f-4250c22afb36: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 058fd5f9-60a8-4e11-9302-15172782e17d (crucible) +* 17 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 +to: blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 3 -> 4): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* crucible 058fd5f9-60a8-4e11-9302-15172782e17d - install dataset in service fd00:1122:3344:101::27 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> # Step through updates of all the non-Nexus zones on this sled. +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (e2deb7c0-2262-49fe-855f-4250c22afb36) + +> inventory-generate +generated inventory collection a815c282-5564-4cea-b667-a7a5295fc2c1 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 1, num_eligible: 0, num_ineligible: 7 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf based on parent blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 +planning report for blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 427ec88f-f467-42fa-9bbb-66a91a36103c (internal_dns) +* 16 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 +to: blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 4 -> 5): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 +* oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 - in service none none off + └─ + expunged +* oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset - in service fd00:1122:3344:2::1 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) +- AAAA fd00:1122:3344:2::1 +* name: @ (records: 3 -> 2) +- NS ns1.control-plane.oxide.internal +- NS ns2.control-plane.oxide.internal +- NS ns3.control-plane.oxide.internal ++ NS ns1.control-plane.oxide.internal ++ NS ns2.control-plane.oxide.internal +* name: _nameservice._tcp (records: 3 -> 2) +- SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal +- SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal +- SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal ++ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal ++ SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal +* name: ns2 (records: 1 -> 1) +- AAAA fd00:1122:3344:2::1 ++ AAAA fd00:1122:3344:3::1 +- name: ns3 (records: 1) +- AAAA fd00:1122:3344:3::1 + unchanged names: 46 (records: 56) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (23ce505c-8991-44a5-8863-f2b906fba9cf) + +> inventory-generate +generated inventory collection 18ca4fd2-190d-4ac5-b0f3-14a384bcd254 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 1, num_eligible: 0, num_ineligible: 6 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d based on parent blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf +planning report for blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: internal_dns +* zone updates waiting on discretionary zones + + +> blueprint-diff latest +from: blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf +to: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 5 -> 6): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset - expunged ⏳ fd00:1122:3344:2::1 + └─ + expunged ✓ ++ internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": ++ name: 71f71743-8c73-43c6-b080-427ec28ef4c9.host (records: 1) ++ AAAA fd00:1122:3344:2::1 +* name: @ (records: 2 -> 3) +- NS ns1.control-plane.oxide.internal +- NS ns2.control-plane.oxide.internal ++ NS ns1.control-plane.oxide.internal ++ NS ns2.control-plane.oxide.internal ++ NS ns3.control-plane.oxide.internal +* name: _nameservice._tcp (records: 2 -> 3) +- SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal +- SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal ++ SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal ++ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal ++ SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal +* name: ns2 (records: 1 -> 1) +- AAAA fd00:1122:3344:3::1 ++ AAAA fd00:1122:3344:2::1 ++ name: ns3 (records: 1) ++ AAAA fd00:1122:3344:3::1 + unchanged names: 46 (records: 56) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (c0d81ea6-909c-4efb-964e-beff67f6da0d) + +> inventory-generate +generated inventory collection b460bcc7-664d-4dff-92fb-f250def5537c from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 2, num_eligible: 0, num_ineligible: 6 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 60b55d33-5fec-4277-9864-935197eaead7 based on parent blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d +planning report for blueprint 60b55d33-5fec-4277-9864-935197eaead7: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 5199c033-4cf9-4ab6-8ae7-566bd7606363 (crucible) +* 15 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d +to: blueprint 60b55d33-5fec-4277-9864-935197eaead7 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 6 -> 7): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 + internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 - install dataset in service fd00:1122:3344:101::25 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (60b55d33-5fec-4277-9864-935197eaead7) + +> inventory-generate +generated inventory collection f8212fb6-115e-4568-a05c-b241e2e8ffb9 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 3, num_eligible: 0, num_ineligible: 5 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 based on parent blueprint 60b55d33-5fec-4277-9864-935197eaead7 +planning report for blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 6444f8a5-6465-4f0b-a549-1993c113569c (internal_ntp) +* 14 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 60b55d33-5fec-4277-9864-935197eaead7 +to: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 7 -> 8): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 +* oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 + internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset - in service fd00:1122:3344:101::21 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) +- AAAA fd00:1122:3344:101::21 +* name: _internal-ntp._tcp (records: 3 -> 2) +- SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal +- SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal +- SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal ++ SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal ++ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (aa13f40f-41ff-4b68-bee1-df2e1f805544) + +> inventory-generate +generated inventory collection f7602eed-bc12-42db-8eec-6f98a05d9796 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 3, num_eligible: 0, num_ineligible: 4 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +INFO some zones not yet up-to-date, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: cc6fdaf4-0195-4cef-950d-7bacd7059787 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] +generated blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 based on parent blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 +planning report for blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111: +chicken switches: + add zones with mupdate override: false + +* waiting for NTP zones to appear in inventory on sleds: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +* sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +* missing NTP zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 + + +> blueprint-diff latest +from: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 +to: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 8 -> 9): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 + internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset - expunged ⏳ fd00:1122:3344:101::21 + └─ + expunged ✓ ++ internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _internal-ntp._tcp (records: 2 -> 3) +- SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal +- SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal ++ SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal ++ SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal ++ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal ++ name: cc6fdaf4-0195-4cef-950d-7bacd7059787.host (records: 1) ++ AAAA fd00:1122:3344:101::28 + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (316ccd9e-5c53-46c3-a2e9-20c3867b7111) + +> inventory-generate +generated inventory collection af824d9a-296d-4a2f-b704-c985c7470a1a from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 4, num_eligible: 0, num_ineligible: 4 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a based on parent blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 +planning report for blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 803bfb63-c246-41db-b0da-d3b87ddfc63d (external_dns) +* 13 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 +to: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 9 -> 10): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 +* oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e - in service none none off + └─ + expunged +* oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 + internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 + internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset - in service fd00:1122:3344:101::23 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) +- AAAA fd00:1122:3344:101::23 +* name: _external-dns._tcp (records: 3 -> 2) +- SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal +- SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal +- SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal ++ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal ++ SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + unchanged names: 49 (records: 61) + +external DNS: +* DNS zone: "oxide.example": +* name: @ (records: 3 -> 2) +- NS ns1.oxide.example +- NS ns2.oxide.example +- NS ns3.oxide.example ++ NS ns1.oxide.example ++ NS ns2.oxide.example +* name: ns2 (records: 1 -> 1) +- A 198.51.100.2 ++ A 198.51.100.3 +- name: ns3 (records: 1) +- A 198.51.100.3 + unchanged names: 2 (records: 4) + + + + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (02078c95-3d58-4b7b-a03f-9b160361c50a) + +> inventory-generate +generated inventory collection 2d5a41c5-bf7b-464c-a4b7-b14ab35982c4 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 4, num_eligible: 0, num_ineligible: 3 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 based on parent blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a +planning report for blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: external_dns +* zone updates waiting on discretionary zones + + +> blueprint-diff latest +from: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a +to: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 10 -> 11): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 + internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 + internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset - expunged ⏳ fd00:1122:3344:101::23 + └─ + expunged ✓ ++ external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _external-dns._tcp (records: 2 -> 3) +- SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal +- SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal ++ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal ++ SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal ++ SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal ++ name: e14f91b0-0c41-48a0-919d-e5078d2b89b0.host (records: 1) ++ AAAA fd00:1122:3344:101::29 + unchanged names: 49 (records: 61) + +external DNS: +* DNS zone: "oxide.example": +* name: @ (records: 2 -> 3) +- NS ns1.oxide.example +- NS ns2.oxide.example ++ NS ns1.oxide.example ++ NS ns2.oxide.example ++ NS ns3.oxide.example +* name: ns2 (records: 1 -> 1) +- A 198.51.100.3 ++ A 198.51.100.2 ++ name: ns3 (records: 1) ++ A 198.51.100.3 + unchanged names: 2 (records: 4) + + + + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (e7a01ffc-6b0e-408b-917b-b1efe18b3110) + +> inventory-generate +generated inventory collection 94c793da-83b2-4fdf-b085-d7ef476bf204 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 5, num_eligible: 0, num_ineligible: 3 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 based on parent blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 +planning report for blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone ba4994a8-23f9-4b1a-a84f-a08d74591389 (crucible_pantry) +* 12 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 +to: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 11 -> 12): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 +* oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset expunged ✓ fd00:1122:3344:101::23 + external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 + internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 + internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset - in service fd00:1122:3344:101::24 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _crucible-pantry._tcp (records: 3 -> 2) +- SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal +- SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal +- SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal ++ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal +- name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) +- AAAA fd00:1122:3344:101::24 + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (880e2ffc-8187-4275-a2f3-1b36aa2f4482) + +> inventory-generate +generated inventory collection 388a8c73-4ec0-4a23-9f82-225e652d8f37 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 5, num_eligible: 0, num_ineligible: 2 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec based on parent blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 +planning report for blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: crucible_pantry +* zone updates waiting on discretionary zones + + +> blueprint-diff latest +from: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 +to: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 12 -> 13): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset expunged ✓ fd00:1122:3344:101::23 + external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 + internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 + internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset - expunged ⏳ fd00:1122:3344:101::24 + └─ + expunged ✓ ++ crucible_pantry 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::2a + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": ++ name: 26bdd109-c842-43a9-95cb-15aba9b0832b.host (records: 1) ++ AAAA fd00:1122:3344:101::2a +* name: _crucible-pantry._tcp (records: 2 -> 3) +- SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal +- SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal ++ SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal ++ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (c4a20bcb-1a71-4e88-97b4-36d16f55daec) + +> inventory-generate +generated inventory collection 2d608b8f-bf88-4707-ac27-6be62f3d5146 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 6, num_eligible: 0, num_ineligible: 2 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint a2c6496d-98fc-444d-aa36-99508aa72367 based on parent blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec +planning report for blueprint a2c6496d-98fc-444d-aa36-99508aa72367: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone dfac80b4-a887-430a-ae87-a4e065dba787 (crucible) +* 11 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec +to: blueprint a2c6496d-98fc-444d-aa36-99508aa72367 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 13 -> 14): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible_pantry 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::2a + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset expunged ✓ fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset expunged ✓ fd00:1122:3344:101::23 + external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 + internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 + internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 +* crucible dfac80b4-a887-430a-ae87-a4e065dba787 - install dataset in service fd00:1122:3344:101::26 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> # The previous plan updated the last non-Nexus zone on this sled. Nexus comes +> # after everything else, so the next step should update the first zone on the +> # next sled. +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (a2c6496d-98fc-444d-aa36-99508aa72367) + +> inventory-generate +generated inventory collection 21e24074-fdd0-438e-a4e7-11665b7071bb from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 based on parent blueprint a2c6496d-98fc-444d-aa36-99508aa72367 +planning report for blueprint 6ed56354-5941-40d1-a06c-b0e940701d52: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 694bd14f-cb24-4be4-bb19-876e79cda2c8 (crucible) +* 10 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint a2c6496d-98fc-444d-aa36-99508aa72367 +to: blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 3 -> 4): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 - install dataset in service fd00:1122:3344:103::26 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> # Step through updates of all the non-Nexus zones on this sled. +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (6ed56354-5941-40d1-a06c-b0e940701d52) + +> inventory-generate +generated inventory collection ed8ea2c4-4271-407e-9c84-54129418d171 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 1, num_eligible: 0, num_ineligible: 7 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 based on parent blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 +planning report for blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 75b220ba-a0f4-4872-8202-dc7c87f062d0 (crucible_pantry) +* 9 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 +to: blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 4 -> 5): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 +* oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset - in service fd00:1122:3344:103::24 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) +- AAAA fd00:1122:3344:103::24 +* name: _crucible-pantry._tcp (records: 3 -> 2) +- SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal +- SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal +- SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal ++ SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal ++ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (9078c4ba-3a73-4b3f-ac2c-acb501f89cb2) + +> inventory-generate +generated inventory collection 336dbc73-f973-4962-a210-3c9d424bd6a3 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 1, num_eligible: 0, num_ineligible: 6 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 based on parent blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 +planning report for blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: crucible_pantry +* zone updates waiting on discretionary zones + + +> blueprint-diff latest +from: blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 +to: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 5 -> 6): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset - expunged ⏳ fd00:1122:3344:103::24 + └─ + expunged ✓ ++ crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _crucible-pantry._tcp (records: 2 -> 3) +- SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal +- SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal ++ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 17000 c88fcd7d-9509-470e-8c4f-3e6f09104cdc.host.control-plane.oxide.internal ++ name: c88fcd7d-9509-470e-8c4f-3e6f09104cdc.host (records: 1) ++ AAAA fd00:1122:3344:103::28 + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (8763abc1-8a42-4932-b5a7-33109e0e0152) + +> inventory-generate +generated inventory collection 897721fc-b087-41be-a566-809d59c8aeea from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 2, num_eligible: 0, num_ineligible: 6 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 based on parent blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 +planning report for blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 7c252b64-c5af-4ec1-989e-9a03f3b0f111 (crucible) +* 8 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 +to: blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 6 -> 7): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 + crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 - install dataset in service fd00:1122:3344:103::27 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (2b89e0d7-f15b-4474-8ac4-85959ed1bc88) + +> inventory-generate +generated inventory collection 5d0b9686-48df-4642-a39c-e2dea04d5330 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 3, num_eligible: 0, num_ineligible: 5 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 based on parent blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 +planning report for blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone ea5b4030-b52f-44b2-8d70-45f15f987d01 (internal_dns) +* 7 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 +to: blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 7 -> 8): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 +* oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 - in service none none off + └─ + expunged +* oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 + crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset - in service fd00:1122:3344:3::1 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: @ (records: 3 -> 2) +- NS ns1.control-plane.oxide.internal +- NS ns2.control-plane.oxide.internal +- NS ns3.control-plane.oxide.internal ++ NS ns1.control-plane.oxide.internal ++ NS ns2.control-plane.oxide.internal +* name: _nameservice._tcp (records: 3 -> 2) +- SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal +- SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal +- SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal ++ SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal ++ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal +- name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) +- AAAA fd00:1122:3344:3::1 +- name: ns3 (records: 1) +- AAAA fd00:1122:3344:3::1 + unchanged names: 47 (records: 57) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4) + +> inventory-generate +generated inventory collection 90f0f757-fd33-4744-abee-36616a645b87 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 3, num_eligible: 0, num_ineligible: 4 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 59630e63-c953-4807-9e84-9e750a79f68e based on parent blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 +planning report for blueprint 59630e63-c953-4807-9e84-9e750a79f68e: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: internal_dns +* zone updates waiting on discretionary zones + + +> blueprint-diff latest +from: blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 +to: blueprint 59630e63-c953-4807-9e84-9e750a79f68e + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 8 -> 9): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 + crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset - expunged ⏳ fd00:1122:3344:3::1 + └─ + expunged ✓ ++ internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": ++ name: 7fbd0103-d7f8-48a5-b95e-29bf812cac1f.host (records: 1) ++ AAAA fd00:1122:3344:3::1 +* name: @ (records: 2 -> 3) +- NS ns1.control-plane.oxide.internal +- NS ns2.control-plane.oxide.internal ++ NS ns1.control-plane.oxide.internal ++ NS ns2.control-plane.oxide.internal ++ NS ns3.control-plane.oxide.internal +* name: _nameservice._tcp (records: 2 -> 3) +- SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal +- SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal ++ SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal ++ SRV port 5353 7fbd0103-d7f8-48a5-b95e-29bf812cac1f.host.control-plane.oxide.internal ++ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal ++ name: ns3 (records: 1) ++ AAAA fd00:1122:3344:3::1 + unchanged names: 47 (records: 57) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (59630e63-c953-4807-9e84-9e750a79f68e) + +> inventory-generate +generated inventory collection ee9bc64a-70f7-4d81-b39d-a709754ce118 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 4, num_eligible: 0, num_ineligible: 4 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 based on parent blueprint 59630e63-c953-4807-9e84-9e750a79f68e +planning report for blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (internal_ntp) +* 6 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 59630e63-c953-4807-9e84-9e750a79f68e +to: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 9 -> 10): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 +* oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 + crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset - in service fd00:1122:3344:103::21 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _internal-ntp._tcp (records: 3 -> 2) +- SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal +- SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal +- SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal ++ SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal ++ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal +- name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) +- AAAA fd00:1122:3344:103::21 + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (e93650dc-b5ba-4ec7-8550-9171c1ada194) + +> inventory-generate +generated inventory collection 3dc9d8c8-8f50-4d6e-9396-97058d1d2722 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 4, num_eligible: 0, num_ineligible: 3 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +INFO some zones not yet up-to-date, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: d5fd048a-8786-42d3-938e-820eae95d7f4 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] +generated blueprint 90650737-8142-47a6-9a48-a10efc487e57 based on parent blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 +planning report for blueprint 90650737-8142-47a6-9a48-a10efc487e57: +chicken switches: + add zones with mupdate override: false + +* waiting for NTP zones to appear in inventory on sleds: d81c6a84-79b8-4958-ae41-ea46c9b19763 +* sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: d81c6a84-79b8-4958-ae41-ea46c9b19763 +* missing NTP zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 + + +> blueprint-diff latest +from: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 +to: blueprint 90650737-8142-47a6-9a48-a10efc487e57 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 10 -> 11): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 + crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset - expunged ⏳ fd00:1122:3344:103::21 + └─ + expunged ✓ ++ internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _internal-ntp._tcp (records: 2 -> 3) +- SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal +- SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal ++ SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal ++ SRV port 123 d5fd048a-8786-42d3-938e-820eae95d7f4.host.control-plane.oxide.internal ++ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal ++ name: d5fd048a-8786-42d3-938e-820eae95d7f4.host (records: 1) ++ AAAA fd00:1122:3344:103::29 + unchanged names: 49 (records: 61) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (90650737-8142-47a6-9a48-a10efc487e57) + +> inventory-generate +generated inventory collection a4dab274-0fff-47fa-bc22-b98d11ec54d2 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 5, num_eligible: 0, num_ineligible: 3 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa based on parent blueprint 90650737-8142-47a6-9a48-a10efc487e57 +planning report for blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone updated in-place: + * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f55647d4-5500-4ad3-893a-df45bd50d622 (crucible) +* 5 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 90650737-8142-47a6-9a48-a10efc487e57 +to: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 11 -> 12): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 + crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 + internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset expunged ✓ fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* crucible f55647d4-5500-4ad3-893a-df45bd50d622 - install dataset in service fd00:1122:3344:103::25 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (2182613d-dc9f-41eb-9c6a-d33801849caa) + +> inventory-generate +generated inventory collection d483be68-4bf3-4133-aed1-661cba8e1194 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 6, num_eligible: 0, num_ineligible: 2 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece based on parent blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa +planning report for blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece: +chicken switches: + add zones with mupdate override: false + +* 1 out-of-date zone expunged: + * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (external_dns) +* 4 remaining out-of-date zones + + +> blueprint-diff latest +from: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa +to: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 12 -> 13): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 +* oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 - in service none none off + └─ + expunged +* oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 1.0.0 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 + crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 + internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset expunged ✓ fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset - in service fd00:1122:3344:103::23 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _external-dns._tcp (records: 3 -> 2) +- SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal +- SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal +- SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal ++ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal ++ SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal +- name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) +- AAAA fd00:1122:3344:103::23 + unchanged names: 49 (records: 61) + +external DNS: +* DNS zone: "oxide.example": +* name: @ (records: 3 -> 2) +- NS ns1.oxide.example +- NS ns2.oxide.example +- NS ns3.oxide.example ++ NS ns1.oxide.example ++ NS ns2.oxide.example +- name: ns3 (records: 1) +- A 198.51.100.3 + unchanged names: 3 (records: 5) + + + + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (e8b088a8-7da0-480b-a2dc-75ffef068ece) + +> inventory-generate +generated inventory collection 74b742c1-01da-4461-a011-785e2e11a5b2 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 6, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 based on parent blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece +planning report for blueprint 810ea95a-4730-43dd-867e-1984aeb9d873: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: external_dns +* zone updates waiting on discretionary zones + + +> blueprint-diff latest +from: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece +to: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 13 -> 14): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 850d04b8-c706-46e9-b405-a7a800b744b5 in service none none off ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01 b24bee8e-82a0-4b4d-a57c-77a1010f3e38 in service none none off + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 1.0.0 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 + crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 + internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset expunged ✓ fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 +* external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset - expunged ⏳ fd00:1122:3344:103::23 + └─ + expunged ✓ ++ external_dns f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01 artifact: version 1.0.0 in service fd00:1122:3344:103::2a + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +* name: _external-dns._tcp (records: 2 -> 3) +- SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal +- SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal ++ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal ++ SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal ++ SRV port 5353 f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01.host.control-plane.oxide.internal ++ name: f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01.host (records: 1) ++ AAAA fd00:1122:3344:103::2a + unchanged names: 49 (records: 61) + +external DNS: +* DNS zone: "oxide.example": +* name: @ (records: 2 -> 3) +- NS ns1.oxide.example +- NS ns2.oxide.example ++ NS ns1.oxide.example ++ NS ns2.oxide.example ++ NS ns3.oxide.example ++ name: ns3 (records: 1) ++ A 198.51.100.3 + unchanged names: 3 (records: 5) + + + + +> # The previous step updated the last non-Nexus zone on the final sled. We should +> # now see a blueprint where every in-service zone (other than Nexus) has an +> # image source set to an artifact from our TUF repo. +> blueprint-show latest +blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 +parent: e8b088a8-7da0-480b-a2dc-75ffef068ece + + sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 15) + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------ + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------ + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 1.0.0 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 1.0.0 in service fd00:1122:3344:102::27 + crucible_pantry 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:102::2b + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset expunged ✓ fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 + external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 + internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 + internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + + + + sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 14) + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------ + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------ + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 1.0.0 in service fd00:1122:3344:101::26 + crucible_pantry 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::2a + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset expunged ✓ fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset expunged ✓ fd00:1122:3344:101::23 + external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 + internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 + internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 + + + + sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 14) + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 850d04b8-c706-46e9-b405-a7a800b744b5 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01 b24bee8e-82a0-4b4d-a57c-77a1010f3e38 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------ + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------ + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 1.0.0 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 + crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01 artifact: version 1.0.0 in service fd00:1122:3344:103::2a + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset expunged ✓ fd00:1122:3344:103::23 + internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 + internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset expunged ✓ fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + OXIMETER SETTINGS: + generation: 1 + read from:: SingleNode + + METADATA: + created by::::::::::::: reconfigurator-sim + created at::::::::::::: + comment:::::::::::::::: (none) + internal DNS version::: 1 + external DNS version::: 1 + target release min gen: 1 + + PENDING MGS-MANAGED UPDATES: 0 + +planning report for blueprint 810ea95a-4730-43dd-867e-1984aeb9d873: +chicken switches: + add zones with mupdate override: false + +* discretionary zones placed: + * 1 zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: external_dns +* zone updates waiting on discretionary zones + + + + +> # We ought to update the inventory for the final sled and then step through +> # the Nexus handoff process, but that work is still in progress. diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index c9741f62977..a993a8de3c6 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -63,6 +63,7 @@ use omicron_common::api::external::Generation; use omicron_common::disk::DiskIdentity; use omicron_common::disk::DiskVariant; use omicron_common::disk::M2Slot; +use omicron_common::policy::CRUCIBLE_PANTRY_REDUNDANCY; use omicron_common::policy::INTERNAL_DNS_REDUNDANCY; use omicron_common::policy::NEXUS_REDUNDANCY; use omicron_uuid_kinds::MupdateOverrideUuid; @@ -163,6 +164,7 @@ impl SystemDescription { // Policy defaults let target_nexus_zone_count = NEXUS_REDUNDANCY; let target_internal_dns_zone_count = INTERNAL_DNS_REDUNDANCY; + let target_crucible_pantry_zone_count = CRUCIBLE_PANTRY_REDUNDANCY; // TODO-cleanup These are wrong, but we don't currently set up any // of these zones in our fake system, so this prevents downstream test @@ -171,7 +173,6 @@ impl SystemDescription { let target_boundary_ntp_zone_count = 0; let target_cockroachdb_zone_count = 0; let target_oximeter_zone_count = 0; - let target_crucible_pantry_zone_count = 0; let target_cockroachdb_cluster_version = CockroachDbClusterVersion::POLICY; From f7147e57b1061456e59365fb15667ce4b91038fb Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Fri, 29 Aug 2025 17:29:33 -0400 Subject: [PATCH 19/38] Fix expectorate mismerge (#8957) This was a not-visible-to-git conflict between #8937 and #8940. Fixes #8956 --- .../tests/output/cmds-target-release-stdout | 64 ------------------- 1 file changed, 64 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 91a353d6813..7938d183877 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -2090,7 +2090,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -2226,7 +2225,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -2358,7 +2356,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -2505,7 +2502,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -2653,7 +2649,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -2779,7 +2774,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -2929,7 +2923,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -3080,7 +3073,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -3218,7 +3210,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -3357,7 +3348,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -3488,7 +3478,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -3622,7 +3611,6 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -3740,9 +3728,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -3880,9 +3866,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -4021,9 +4005,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -4143,9 +4125,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -4274,9 +4254,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -4407,9 +4385,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -4552,9 +4528,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -4698,9 +4672,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -4834,9 +4806,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -4971,9 +4941,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -5103,9 +5071,7 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update @@ -5223,11 +5189,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 1, num_eligible: 0, num_ineligible: 7 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update generated blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 based on parent blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 @@ -5352,11 +5315,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 1, num_eligible: 0, num_ineligible: 6 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update generated blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 based on parent blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 @@ -5482,11 +5442,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 2, num_eligible: 0, num_ineligible: 6 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update generated blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 based on parent blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 @@ -5604,11 +5561,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 3, num_eligible: 0, num_ineligible: 5 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update generated blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 based on parent blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 @@ -5744,11 +5698,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 3, num_eligible: 0, num_ineligible: 4 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update generated blueprint 59630e63-c953-4807-9e84-9e750a79f68e based on parent blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 @@ -5885,11 +5836,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 4, num_eligible: 0, num_ineligible: 4 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update generated blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 based on parent blueprint 59630e63-c953-4807-9e84-9e750a79f68e @@ -6019,11 +5967,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 4, num_eligible: 0, num_ineligible: 3 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update INFO some zones not yet up-to-date, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: d5fd048a-8786-42d3-938e-820eae95d7f4 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] @@ -6155,11 +6100,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 5, num_eligible: 0, num_ineligible: 3 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update generated blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa based on parent blueprint 90650737-8142-47a6-9a48-a10efc487e57 @@ -6282,11 +6224,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 6, num_eligible: 0, num_ineligible: 2 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update generated blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece based on parent blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa @@ -6427,11 +6366,8 @@ INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41c INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 6, num_eligible: 0, num_ineligible: 1 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial0, part_number: model0, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial1, part_number: model1, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -WARN cannot configure host OS update for board (active phase 1 slot doesn't match boot disk; is the sled already being updated?), serial_number: serial2, part_number: model2, active_phase_1_slot: B, boot_disk: A INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update generated blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 based on parent blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece From 3be1d57fadcca52b82039df9d08ebdfed49b2868 Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Fri, 29 Aug 2025 15:10:13 -0700 Subject: [PATCH 20/38] Report IP Pool utilization as capacity and remaining (#8928) - Remove IP version-specific utilization types. All pools are only of one version, so we can use the same types for both. - Report IP Pool utilization through the API as a floating-point capacity and count of _remaining_ addresses, rather than count of allocated. This avoids dealing with enormous bit-width numbers like a u128. The cost is reduced precision when either the capacity or remaining is > 2**53, but in that case, the caller almost certainly doesn't care about that. As the remaining number of addresses is smaller, they get perfect precision. - Fixes #8888 - Fixes #5347 --- nexus/db-model/src/utilization.rs | 30 +-- nexus/db-queries/src/db/datastore/ip_pool.rs | 175 ++++++++++-------- nexus/src/app/utilization.rs | 63 +++---- nexus/test-utils/src/resource_helpers.rs | 34 ++-- nexus/tests/integration_tests/external_ips.rs | 35 ++-- nexus/tests/integration_tests/instances.rs | 73 ++++---- nexus/tests/integration_tests/ip_pools.rs | 26 +-- nexus/types/src/external_api/views.rs | 84 ++------- openapi/nexus.json | 67 ++----- 9 files changed, 229 insertions(+), 358 deletions(-) diff --git a/nexus/db-model/src/utilization.rs b/nexus/db-model/src/utilization.rs index 39b3a1b491b..3db4bee80ff 100644 --- a/nexus/db-model/src/utilization.rs +++ b/nexus/db-model/src/utilization.rs @@ -57,39 +57,15 @@ impl From for views::Utilization { } } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Ipv4Utilization { - pub allocated: u32, - pub capacity: u32, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Ipv6Utilization { - pub allocated: u128, - pub capacity: u128, -} - // Not really a DB model, just the result of a datastore function #[derive(Debug, Clone, Serialize, Deserialize)] pub struct IpPoolUtilization { - pub ipv4: Ipv4Utilization, - pub ipv6: Ipv6Utilization, -} - -impl From for views::Ipv4Utilization { - fn from(util: Ipv4Utilization) -> Self { - Self { allocated: util.allocated, capacity: util.capacity } - } -} - -impl From for views::Ipv6Utilization { - fn from(util: Ipv6Utilization) -> Self { - Self { allocated: util.allocated, capacity: util.capacity } - } + pub remaining: f64, + pub capacity: f64, } impl From for views::IpPoolUtilization { fn from(util: IpPoolUtilization) -> Self { - Self { ipv4: util.ipv4.into(), ipv6: util.ipv6.into() } + Self { remaining: util.remaining, capacity: util.capacity } } } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 436c7f1227e..d8084cc4abd 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -57,16 +57,6 @@ use omicron_common::api::external::http_pagination::PaginatedBy; use ref_cast::RefCast; use uuid::Uuid; -pub struct IpsAllocated { - pub ipv4: i64, - pub ipv6: i64, -} - -pub struct IpsCapacity { - pub ipv4: u32, - pub ipv6: u128, -} - /// Helper type with both an authz IP Pool and the actual DB record. #[derive(Debug, Clone)] pub struct ServiceIpPool { @@ -401,53 +391,100 @@ impl DataStore { }) } - pub async fn ip_pool_allocated_count( + /// Return the number of IPs allocated from and the capacity of the provided + /// IP Pool. + pub async fn ip_pool_utilization( &self, opctx: &OpContext, authz_pool: &authz::IpPool, - ) -> Result { + ) -> Result<(i64, u128), Error> { opctx.authorize(authz::Action::Read, authz_pool).await?; + opctx.authorize(authz::Action::ListChildren, authz_pool).await?; + let conn = self.pool_connection_authorized(opctx).await?; + let (allocated, ranges) = self + .transaction_retry_wrapper("ip_pool_utilization") + .transaction(&conn, |conn| async move { + let allocated = self + .ip_pool_allocated_count_on_connection(&conn, authz_pool) + .await?; + let ranges = self + .ip_pool_list_ranges_batched_on_connection( + &conn, authz_pool, + ) + .await?; + Ok((allocated, ranges)) + }) + .await + .map_err(|e| match &e { + DieselError::NotFound => public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_pool), + ), + _ => public_error_from_diesel(e, ErrorHandler::Server), + })?; + let capacity = Self::accumulate_ip_range_sizes(ranges)?; + Ok((allocated, capacity)) + } - use diesel::dsl::sql; - use diesel::sql_types::BigInt; - use nexus_db_schema::schema::external_ip; + /// Return the total number of IPs allocated from the provided pool. + #[cfg(test)] + async fn ip_pool_allocated_count( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + ) -> Result { + opctx.authorize(authz::Action::Read, authz_pool).await?; + let conn = self.pool_connection_authorized(opctx).await?; + self.ip_pool_allocated_count_on_connection(&conn, authz_pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } - let (ipv4, ipv6) = external_ip::table + async fn ip_pool_allocated_count_on_connection( + &self, + conn: &async_bb8_diesel::Connection, + authz_pool: &authz::IpPool, + ) -> Result { + use nexus_db_schema::schema::external_ip; + external_ip::table .filter(external_ip::ip_pool_id.eq(authz_pool.id())) .filter(external_ip::time_deleted.is_null()) - // need to do distinct IP because SNAT IPs are shared between - // multiple instances, and each gets its own row in the table - .select(( - sql::( - "count(distinct ip) FILTER (WHERE family(ip) = 4)", - ), - sql::( - "count(distinct ip) FILTER (WHERE family(ip) = 6)", - ), - )) - .first_async::<(i64, i64)>( - &*self.pool_connection_authorized(opctx).await?, - ) + .select(diesel::dsl::count_distinct(external_ip::ip)) + .first_async::(conn) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - - Ok(IpsAllocated { ipv4, ipv6 }) } - pub async fn ip_pool_total_capacity( + /// Return the total capacity of the provided pool. + #[cfg(test)] + async fn ip_pool_total_capacity( &self, opctx: &OpContext, authz_pool: &authz::IpPool, - ) -> Result { + ) -> Result { opctx.authorize(authz::Action::Read, authz_pool).await?; opctx.authorize(authz::Action::ListChildren, authz_pool).await?; + let conn = self.pool_connection_authorized(opctx).await?; + self.ip_pool_list_ranges_batched_on_connection(&conn, authz_pool) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_pool), + ) + }) + .and_then(Self::accumulate_ip_range_sizes) + } + async fn ip_pool_list_ranges_batched_on_connection( + &self, + conn: &async_bb8_diesel::Connection, + authz_pool: &authz::IpPool, + ) -> Result, DieselError> { use nexus_db_schema::schema::ip_pool_range; - - let ranges = ip_pool_range::table + ip_pool_range::table .filter(ip_pool_range::ip_pool_id.eq(authz_pool.id())) .filter(ip_pool_range::time_deleted.is_null()) - .select(IpPoolRange::as_select()) + .select((ip_pool_range::first_address, ip_pool_range::last_address)) // This is a rare unpaginated DB query, which means we are // vulnerable to a resource exhaustion attack in which someone // creates a very large number of ranges in order to make this @@ -457,28 +494,25 @@ impl DataStore { // than 10,000 ranges in a pool, we will undercount, but I have a // hard time seeing that as a practical problem. .limit(10000) - .get_results_async::( - &*self.pool_connection_authorized(opctx).await?, - ) + .get_results_async::<(IpNetwork, IpNetwork)>(conn) .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::NotFoundByResource(authz_pool), - ) - })?; - - let mut ipv4: u32 = 0; - let mut ipv6: u128 = 0; + } - for range in &ranges { - let r = IpRange::from(range); + fn accumulate_ip_range_sizes( + ranges: Vec<(IpNetwork, IpNetwork)>, + ) -> Result { + let mut count: u128 = 0; + for range in ranges.into_iter() { + let first = range.0.ip(); + let last = range.1.ip(); + let r = IpRange::try_from((first, last)) + .map_err(|e| Error::internal_error(e.as_str()))?; match r { - IpRange::V4(r) => ipv4 += r.len(), - IpRange::V6(r) => ipv6 += r.len(), + IpRange::V4(r) => count += u128::from(r.len()), + IpRange::V6(r) => count += r.len(), } } - Ok(IpsCapacity { ipv4, ipv6 }) + Ok(count) } pub async fn ip_pool_silo_list( @@ -1523,8 +1557,7 @@ mod test { .ip_pool_total_capacity(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(max_ips.ipv4, 0); - assert_eq!(max_ips.ipv6, 0); + assert_eq!(max_ips, 0); let range = IpRange::V4( Ipv4Range::new( @@ -1543,8 +1576,7 @@ mod test { .ip_pool_total_capacity(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(max_ips.ipv4, 5); - assert_eq!(max_ips.ipv6, 0); + assert_eq!(max_ips, 5); let link = IpPoolResource { ip_pool_id: pool.id(), @@ -1561,8 +1593,7 @@ mod test { .ip_pool_allocated_count(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(ip_count.ipv4, 0); - assert_eq!(ip_count.ipv6, 0); + assert_eq!(ip_count, 0); let identity = IdentityMetadataCreateParams { name: "my-ip".parse().unwrap(), @@ -1578,16 +1609,14 @@ mod test { .ip_pool_allocated_count(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(ip_count.ipv4, 1); - assert_eq!(ip_count.ipv6, 0); + assert_eq!(ip_count, 1); // allocating one has nothing to do with total capacity let max_ips = datastore .ip_pool_total_capacity(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(max_ips.ipv4, 5); - assert_eq!(max_ips.ipv6, 0); + assert_eq!(max_ips, 5); db.terminate().await; logctx.cleanup_successful(); @@ -1642,8 +1671,7 @@ mod test { .ip_pool_total_capacity(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(max_ips.ipv4, 0); - assert_eq!(max_ips.ipv6, 0); + assert_eq!(max_ips, 0); // Add an IPv6 range let ipv6_range = IpRange::V6( @@ -1661,15 +1689,13 @@ mod test { .ip_pool_total_capacity(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(max_ips.ipv4, 0); - assert_eq!(max_ips.ipv6, 11 + 65536); + assert_eq!(max_ips, 11 + 65536); let ip_count = datastore .ip_pool_allocated_count(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(ip_count.ipv4, 0); - assert_eq!(ip_count.ipv6, 0); + assert_eq!(ip_count, 0); let identity = IdentityMetadataCreateParams { name: "my-ip".parse().unwrap(), @@ -1685,16 +1711,14 @@ mod test { .ip_pool_allocated_count(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(ip_count.ipv4, 0); - assert_eq!(ip_count.ipv6, 1); + assert_eq!(ip_count, 1); // allocating one has nothing to do with total capacity let max_ips = datastore .ip_pool_total_capacity(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(max_ips.ipv4, 0); - assert_eq!(max_ips.ipv6, 11 + 65536); + assert_eq!(max_ips, 11 + 65536); // add a giant range for fun let ipv6_range = IpRange::V6( @@ -1715,8 +1739,7 @@ mod test { .ip_pool_total_capacity(&opctx, &authz_pool) .await .unwrap(); - assert_eq!(max_ips.ipv4, 0); - assert_eq!(max_ips.ipv6, 1208925819614629174706166); + assert_eq!(max_ips, 1208925819614629174706166); db.terminate().await; logctx.cleanup_successful(); diff --git a/nexus/src/app/utilization.rs b/nexus/src/app/utilization.rs index b343c71dd64..a631a2eae92 100644 --- a/nexus/src/app/utilization.rs +++ b/nexus/src/app/utilization.rs @@ -6,8 +6,6 @@ use nexus_db_lookup::lookup; use nexus_db_model::IpPoolUtilization; -use nexus_db_model::Ipv4Utilization; -use nexus_db_model::Ipv6Utilization; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; @@ -42,46 +40,27 @@ impl super::Nexus { let (.., authz_pool) = pool_lookup.lookup_for(authz::Action::Read).await?; - let allocated = self - .db_datastore - .ip_pool_allocated_count(opctx, &authz_pool) - .await?; - let capacity = self - .db_datastore - .ip_pool_total_capacity(opctx, &authz_pool) - .await?; + let (allocated, capacity) = + self.db_datastore.ip_pool_utilization(opctx, &authz_pool).await?; - // we have one query for v4 and v6 allocated and one for both v4 and - // v6 capacity so we can do two queries instead 4, but in the response - // we want them paired by IP version - Ok(IpPoolUtilization { - ipv4: Ipv4Utilization { - // This one less trivial to justify than the u128 conversion - // below because an i64 could obviously be too big for u32. - // In addition to the fact that it is unlikely for anyone to - // allocate 4 billion IPs, we rely on the fact that there can - // only be 2^32 IPv4 addresses, period. - allocated: u32::try_from(allocated.ipv4).map_err(|_e| { - Error::internal_error(&format!( - "Failed to convert i64 {} IPv4 address count to u32", - allocated.ipv4 - )) - })?, - capacity: capacity.ipv4, - }, - ipv6: Ipv6Utilization { - // SQL represents counts as signed integers for historical - // or compatibility reasons even though they can't really be - // negative, and Diesel follows that. We assume here that it - // will be a positive i64. - allocated: u128::try_from(allocated.ipv6).map_err(|_e| { - Error::internal_error(&format!( - "Failed to convert i64 {} IPv6 address count to u128", - allocated.ipv6 - )) - })?, - capacity: capacity.ipv6, - }, - }) + // Compute the remaining count in full 128-bit arithmetic, checking for + // negative values, and convert to f64s at the end. + let Ok(allocated) = u128::try_from(allocated) else { + return Err(Error::internal_error( + "Impossible negative number of allocated IP addresses", + )); + }; + let Some(remaining) = capacity.checked_sub(allocated) else { + return Err(Error::internal_error( + format!( + "Computed an impossible negative count of remaining IP \ + addresses. Capacity = {capacity}, allocated = {allocated}" + ) + .as_str(), + )); + }; + let remaining = remaining as f64; + let capacity = capacity as f64; + Ok(IpPoolUtilization { remaining, capacity }) } } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 55f3d8b8f1b..dec5559368f 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -1057,35 +1057,29 @@ pub async fn detach_ip_address_from_igw( .unwrap(); } +/// Assert that the utilization of the provided pool matches expectations. +/// +/// Note that the third argument is the number of _allocated_ addresses as an +/// integer. This is compared against the count of remaining addresses +/// internally, which is what the API returns. pub async fn assert_ip_pool_utilization( client: &ClientTestContext, pool_name: &str, - ipv4_allocated: u32, - ipv4_capacity: u32, - ipv6_allocated: u128, - ipv6_capacity: u128, + allocated: u32, + capacity: f64, ) { let url = format!("/v1/system/ip-pools/{}/utilization", pool_name); let utilization: views::IpPoolUtilization = object_get(client, &url).await; + let remaining = capacity - f64::from(allocated); assert_eq!( - utilization.ipv4.allocated, ipv4_allocated, - "IP pool '{}': expected {} IPv4 allocated, got {:?}", - pool_name, ipv4_allocated, utilization.ipv4.allocated - ); - assert_eq!( - utilization.ipv4.capacity, ipv4_capacity, - "IP pool '{}': expected {} IPv4 capacity, got {:?}", - pool_name, ipv4_capacity, utilization.ipv4.capacity - ); - assert_eq!( - utilization.ipv6.allocated, ipv6_allocated, - "IP pool '{}': expected {} IPv6 allocated, got {:?}", - pool_name, ipv6_allocated, utilization.ipv6.allocated + remaining, utilization.remaining, + "IP pool '{}': expected {} remaining, got {}", + pool_name, remaining, utilization.remaining, ); assert_eq!( - utilization.ipv6.capacity, ipv6_capacity, - "IP pool '{}': expected {} IPv6 capacity, got {:?}", - pool_name, ipv6_capacity, utilization.ipv6.capacity + capacity, utilization.capacity, + "IP pool '{}': expected {} capacity, got {:?}", + pool_name, capacity, utilization.capacity, ); } diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index f2d9b0428d5..b3f78e0b2c8 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -165,17 +165,19 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { // automatically linked to current silo let default_pool = create_default_ip_pool(&client).await; - assert_ip_pool_utilization(client, "default", 0, 65536, 0, 0).await; + const CAPACITY: f64 = 65536.0; + assert_ip_pool_utilization(client, "default", 0, CAPACITY).await; - let other_pool_range = IpRange::V4( + let ipv4_range = Ipv4Range::new(Ipv4Addr::new(10, 1, 0, 1), Ipv4Addr::new(10, 1, 0, 5)) - .unwrap(), - ); + .unwrap(); + let other_capacity = ipv4_range.len().into(); + let other_pool_range = IpRange::V4(ipv4_range); // not automatically linked to currently silo. see below let (other_pool, ..) = create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; - assert_ip_pool_utilization(client, "other-pool", 0, 5, 0, 0).await; + assert_ip_pool_utilization(client, "other-pool", 0, other_capacity).await; let project = create_project(client, PROJECT_NAME).await; @@ -195,7 +197,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.ip, IpAddr::from(Ipv4Addr::new(10, 0, 0, 0))); assert_eq!(fip.ip_pool_id, default_pool.identity.id); - assert_ip_pool_utilization(client, "default", 1, 65536, 0, 0).await; + assert_ip_pool_utilization(client, "default", 1, CAPACITY).await; // Create with chosen IP and fallback to default pool. let fip_name = FIP_NAMES[1]; @@ -214,7 +216,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.ip, ip_addr); assert_eq!(fip.ip_pool_id, default_pool.identity.id); - assert_ip_pool_utilization(client, "default", 2, 65536, 0, 0).await; + assert_ip_pool_utilization(client, "default", 2, CAPACITY).await; // Creating with other-pool fails with 404 until it is linked to the current silo let fip_name = FIP_NAMES[2]; @@ -231,7 +233,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { object_create_error(client, &url, ¶ms, StatusCode::NOT_FOUND).await; assert_eq!(error.message, "not found: ip-pool with name \"other-pool\""); - assert_ip_pool_utilization(client, "other-pool", 0, 5, 0, 0).await; + assert_ip_pool_utilization(client, "other-pool", 0, other_capacity).await; // now link the pool and everything should work with the exact same params let silo_id = DEFAULT_SILO.id(); @@ -245,7 +247,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.ip, IpAddr::from(Ipv4Addr::new(10, 1, 0, 1))); assert_eq!(fip.ip_pool_id, other_pool.identity.id); - assert_ip_pool_utilization(client, "other-pool", 1, 5, 0, 0).await; + assert_ip_pool_utilization(client, "other-pool", 1, other_capacity).await; // Create with chosen IP from non-default pool. let fip_name = FIP_NAMES[3]; @@ -264,7 +266,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.ip, ip_addr); assert_eq!(fip.ip_pool_id, other_pool.identity.id); - assert_ip_pool_utilization(client, "other-pool", 2, 5, 0, 0).await; + assert_ip_pool_utilization(client, "other-pool", 2, other_capacity).await; } #[nexus_test] @@ -743,7 +745,8 @@ async fn test_external_ip_live_attach_detach( create_default_ip_pool(&client).await; let project = create_project(client, PROJECT_NAME).await; - assert_ip_pool_utilization(client, "default", 0, 65536, 0, 0).await; + const CAPACITY: f64 = 65536.0; + assert_ip_pool_utilization(client, "default", 0, CAPACITY).await; // Create 2 instances, and a floating IP for each instance. // One instance will be started, and one will be stopped. @@ -762,7 +765,7 @@ async fn test_external_ip_live_attach_detach( } // 2 floating IPs have been allocated - assert_ip_pool_utilization(client, "default", 2, 65536, 0, 0).await; + assert_ip_pool_utilization(client, "default", 2, CAPACITY).await; let mut instances = vec![]; for (i, start) in [false, true].iter().enumerate() { @@ -799,7 +802,7 @@ async fn test_external_ip_live_attach_detach( // the two instances above were deliberately not given ephemeral IPs, but // they still always get SNAT IPs, but they share one, so we go from 2 to 3 - assert_ip_pool_utilization(client, "default", 3, 65536, 0, 0).await; + assert_ip_pool_utilization(client, "default", 3, CAPACITY).await; // Attach a floating IP and ephemeral IP to each instance. let mut recorded_ephs = vec![]; @@ -847,7 +850,7 @@ async fn test_external_ip_live_attach_detach( // now 5 because an ephemeral IP was added for each instance. floating IPs // were attached, but they were already allocated - assert_ip_pool_utilization(client, "default", 5, 65536, 0, 0).await; + assert_ip_pool_utilization(client, "default", 5, CAPACITY).await; // Detach a floating IP and ephemeral IP from each instance. for (instance, fip) in instances.iter().zip(&fips) { @@ -881,7 +884,7 @@ async fn test_external_ip_live_attach_detach( } // 2 ephemeral go away on detachment but still 2 floating and 1 SNAT - assert_ip_pool_utilization(client, "default", 3, 65536, 0, 0).await; + assert_ip_pool_utilization(client, "default", 3, CAPACITY).await; // Finally, two kind of funny tests. There is special logic in the handler // for the case where the floating IP is specified by name but the instance @@ -969,7 +972,7 @@ async fn test_external_ip_live_attach_detach( ); // none of that changed the number of allocated IPs - assert_ip_pool_utilization(client, "default", 3, 65536, 0, 0).await; + assert_ip_pool_utilization(client, "default", 3, CAPACITY).await; } #[nexus_test] diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index f469c8959fd..9160dd6b4cd 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -6138,32 +6138,32 @@ async fn test_instance_ephemeral_ip_from_correct_pool( // // The first is given to the "default" pool, the provided to a distinct // explicit pool. - let range1 = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 5), - ) - .unwrap(), - ); - let range2 = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 1, 0, 1), - std::net::Ipv4Addr::new(10, 1, 0, 5), - ) - .unwrap(), - ); + let ipv4_range1 = Ipv4Range::new( + std::net::Ipv4Addr::new(10, 0, 0, 1), + std::net::Ipv4Addr::new(10, 0, 0, 5), + ) + .unwrap(); + let capacity1 = ipv4_range1.len().into(); + let range1 = IpRange::V4(ipv4_range1); + let ipv4_range2 = Ipv4Range::new( + std::net::Ipv4Addr::new(10, 1, 0, 1), + std::net::Ipv4Addr::new(10, 1, 0, 5), + ) + .unwrap(); + let capacity2 = ipv4_range2.len().into(); + let range2 = IpRange::V4(ipv4_range2); // make first pool the default for the priv user's silo create_ip_pool(&client, "pool1", Some(range1)).await; link_ip_pool(&client, "pool1", &DEFAULT_SILO.id(), /*default*/ true).await; - assert_ip_pool_utilization(client, "pool1", 0, 5, 0, 0).await; + assert_ip_pool_utilization(client, "pool1", 0, capacity1).await; // second pool is associated with the silo but not default create_ip_pool(&client, "pool2", Some(range2)).await; link_ip_pool(&client, "pool2", &DEFAULT_SILO.id(), /*default*/ false).await; - assert_ip_pool_utilization(client, "pool2", 0, 5, 0, 0).await; + assert_ip_pool_utilization(client, "pool2", 0, capacity2).await; // Create an instance with pool name blank, expect IP from default pool create_instance_with_pool(client, "pool1-inst", None).await; @@ -6174,9 +6174,9 @@ async fn test_instance_ephemeral_ip_from_correct_pool( "Expected ephemeral IP to come from pool1" ); // 1 ephemeral + 1 snat - assert_ip_pool_utilization(client, "pool1", 2, 5, 0, 0).await; + assert_ip_pool_utilization(client, "pool1", 2, capacity1).await; // pool2 unaffected - assert_ip_pool_utilization(client, "pool2", 0, 5, 0, 0).await; + assert_ip_pool_utilization(client, "pool2", 0, capacity2).await; // Create an instance explicitly using the non-default "other-pool". create_instance_with_pool(client, "pool2-inst", Some("pool2")).await; @@ -6189,10 +6189,10 @@ async fn test_instance_ephemeral_ip_from_correct_pool( // SNAT comes from default pool, but count does not change because // SNAT IPs can be shared. https://github.com/oxidecomputer/omicron/issues/5043 // is about getting SNAT IP from specified pool instead of default. - assert_ip_pool_utilization(client, "pool1", 2, 5, 0, 0).await; + assert_ip_pool_utilization(client, "pool1", 2, capacity1).await; // ephemeral IP comes from specified pool - assert_ip_pool_utilization(client, "pool2", 1, 5, 0, 0).await; + assert_ip_pool_utilization(client, "pool2", 1, capacity2).await; // make pool2 default and create instance with default pool. check that it now it comes from pool2 let _: views::IpPoolSiloLink = object_put( @@ -6210,9 +6210,9 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ); // pool1 unchanged - assert_ip_pool_utilization(client, "pool1", 2, 5, 0, 0).await; - // +1 snat (now that pool2 is default) and +1 ephemeral - assert_ip_pool_utilization(client, "pool2", 3, 5, 0, 0).await; + assert_ip_pool_utilization(client, "pool1", 2, capacity1).await; + // +1 snat (now that pool2 is default) and +1 ephemeral, so 3 total + assert_ip_pool_utilization(client, "pool2", 3, capacity2).await; // try to delete association with pool1, but it fails because there is an // instance with an IP from the pool in this silo @@ -6232,11 +6232,11 @@ async fn test_instance_ephemeral_ip_from_correct_pool( stop_and_delete_instance(&cptestctx, "pool1-inst").await; stop_and_delete_instance(&cptestctx, "pool2-inst").await; - // pool1 is down to 0 because it had 1 snat + 1 ephemeral from pool1-inst + // pool1 is back up to 5 because it had 1 snat + 1 ephemeral from pool1-inst // and 1 snat from pool2-inst - assert_ip_pool_utilization(client, "pool1", 0, 5, 0, 0).await; + assert_ip_pool_utilization(client, "pool1", 0, capacity1).await; // pool2 drops one because it had 1 ephemeral from pool2-inst - assert_ip_pool_utilization(client, "pool2", 2, 5, 0, 0).await; + assert_ip_pool_utilization(client, "pool2", 2, capacity2).await; // now unlink works object_delete(client, &pool1_silo_url).await; @@ -6426,17 +6426,17 @@ async fn test_instance_attach_several_external_ips( let _ = create_project(&client, PROJECT_NAME).await; // Create a single (large) IP pool - let default_pool_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 10), - ) - .unwrap(), - ); + let range = Ipv4Range::new( + std::net::Ipv4Addr::new(10, 0, 0, 1), + std::net::Ipv4Addr::new(10, 0, 0, 10), + ) + .unwrap(); + let capacity = range.len().into(); + let default_pool_range = IpRange::V4(range); create_ip_pool(&client, "default", Some(default_pool_range)).await; link_ip_pool(&client, "default", &DEFAULT_SILO.id(), true).await; - assert_ip_pool_utilization(client, "default", 0, 10, 0, 0).await; + assert_ip_pool_utilization(client, "default", 0, capacity).await; // Create several floating IPs for the instance, totalling 8 IPs. let mut external_ip_create = @@ -6468,7 +6468,7 @@ async fn test_instance_attach_several_external_ips( // 1 ephemeral + 7 floating + 1 SNAT const N_EXPECTED_IPS: u32 = 9; - assert_ip_pool_utilization(client, "default", N_EXPECTED_IPS, 10, 0, 0) + assert_ip_pool_utilization(client, "default", N_EXPECTED_IPS, capacity) .await; // Verify that all external IPs are visible on the instance and have @@ -6629,7 +6629,8 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { create_ip_pool(&client, "default", None).await; link_ip_pool(&client, "default", &silo.identity.id, true).await; - assert_ip_pool_utilization(client, "default", 0, 65536, 0, 0).await; + const CAPACITY: f64 = 65536.0; + assert_ip_pool_utilization(client, "default", 0, CAPACITY).await; // Create test projects NexusRequest::objects_post( diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 52be987643d..8d55655ee71 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -830,7 +830,7 @@ async fn test_ipv4_ip_pool_utilization_total( let _pool = create_pool(client, "p0").await; - assert_ip_pool_utilization(client, "p0", 0, 0, 0, 0).await; + assert_ip_pool_utilization(client, "p0", 0, 0.0).await; let add_url = "/v1/system/ip-pools/p0/ranges/add"; @@ -844,7 +844,7 @@ async fn test_ipv4_ip_pool_utilization_total( ); object_create::(client, &add_url, &range).await; - assert_ip_pool_utilization(client, "p0", 0, 5, 0, 0).await; + assert_ip_pool_utilization(client, "p0", 0, 5.0).await; } // We're going to test adding an IPv6 pool and collecting its utilization, even @@ -877,7 +877,7 @@ async fn test_ipv6_ip_pool_utilization_total( .expect("should be able to create IPv6 pool"); // Check the utilization is zero. - assert_ip_pool_utilization(client, "p0", 0, 0, 0, 0).await; + assert_ip_pool_utilization(client, "p0", 0, 0.0).await; // Now let's add a gigantic range. This requires direct datastore // shenanigans because adding IPv6 ranges through the API is currently not @@ -890,21 +890,21 @@ async fn test_ipv6_ip_pool_utilization_total( .fetch_for(authz::Action::CreateChild) .await .expect("should be able to fetch pool we just created"); - let big_range = IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 0), - std::net::Ipv6Addr::new( - 0xfd00, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, - ), - ) - .unwrap(), - ); + let ipv6_range = Ipv6Range::new( + std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 0), + std::net::Ipv6Addr::new( + 0xfd00, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + ), + ) + .unwrap(); + let big_range = IpRange::V6(ipv6_range); datastore .ip_pool_add_range(&opctx, &authz_pool, &db_pool, &big_range) .await .expect("could not add range"); - assert_ip_pool_utilization(client, "p0", 0, 0, 0, 2u128.pow(80)).await; + let capacity = ipv6_range.len() as f64; + assert_ip_pool_utilization(client, "p0", 0, capacity).await; } // Data for testing overlapping IP ranges diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 8ee481a1a64..625e4fa7753 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -398,80 +398,20 @@ pub struct IpPool { pub identity: IdentityMetadata, } -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -pub struct Ipv4Utilization { - /// The number of IPv4 addresses allocated from this pool - pub allocated: u32, - /// The total number of IPv4 addresses in the pool, i.e., the sum of the - /// lengths of the IPv4 ranges. Unlike IPv6 capacity, can be a 32-bit - /// integer because there are only 2^32 IPv4 addresses. - pub capacity: u32, -} - -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -pub struct Ipv6Utilization { - /// The number of IPv6 addresses allocated from this pool. A 128-bit integer - /// string to match the capacity field. - #[serde(with = "U128String")] - pub allocated: u128, - - /// The total number of IPv6 addresses in the pool, i.e., the sum of the - /// lengths of the IPv6 ranges. An IPv6 range can contain up to 2^128 - /// addresses, so we represent this value in JSON as a numeric string with a - /// custom "uint128" format. - #[serde(with = "U128String")] - pub capacity: u128, -} - +/// The utilization of IP addresses in a pool. +/// +/// Note that both the count of remaining addresses and the total capacity are +/// integers, reported as floating point numbers. This accommodates allocations +/// larger than a 64-bit integer, which is common with IPv6 address spaces. With +/// very large IP Pools (> 2**53 addresses), integer precision will be lost, in +/// exchange for representing the entire range. In such a case the pool still +/// has many available addresses. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPoolUtilization { - /// Number of allocated and total available IPv4 addresses in pool - pub ipv4: Ipv4Utilization, - /// Number of allocated and total available IPv6 addresses in pool - pub ipv6: Ipv6Utilization, -} - -// Custom struct for serializing/deserializing u128 as a string. The serde -// docs will suggest using a module (or serialize_with and deserialize_with -// functions), but as discussed in the comments on the UserData de/serializer, -// schemars wants this to be a type, so it has to be a struct. -struct U128String; -impl U128String { - pub fn serialize(value: &u128, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(&value.to_string()) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - s.parse().map_err(serde::de::Error::custom) - } -} - -impl JsonSchema for U128String { - fn schema_name() -> String { - "String".to_string() - } - - fn json_schema( - _: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - instance_type: Some(schemars::schema::InstanceType::String.into()), - format: Some("uint128".to_string()), - ..Default::default() - } - .into() - } - - fn is_referenceable() -> bool { - false - } + /// The number of remaining addresses in the pool. + pub remaining: f64, + /// The total number of addresses in the pool. + pub capacity: f64, } /// An IP pool in the context of a silo diff --git a/openapi/nexus.json b/openapi/nexus.json index 8933823f9e4..01b45952a29 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -21389,28 +21389,23 @@ } }, "IpPoolUtilization": { + "description": "The utilization of IP addresses in a pool.\n\nNote that both the count of remaining addresses and the total capacity are integers, reported as floating point numbers. This accommodates allocations larger than a 64-bit integer, which is common with IPv6 address spaces. With very large IP Pools (> 2**53 addresses), integer precision will be lost, in exchange for representing the entire range. In such a case the pool still has many available addresses.", "type": "object", "properties": { - "ipv4": { - "description": "Number of allocated and total available IPv4 addresses in pool", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Utilization" - } - ] + "capacity": { + "description": "The total number of addresses in the pool.", + "type": "number", + "format": "double" }, - "ipv6": { - "description": "Number of allocated and total available IPv6 addresses in pool", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Utilization" - } - ] + "remaining": { + "description": "The number of remaining addresses in the pool.", + "type": "number", + "format": "double" } }, "required": [ - "ipv4", - "ipv6" + "capacity", + "remaining" ] }, "IpRange": { @@ -21463,27 +21458,6 @@ "last" ] }, - "Ipv4Utilization": { - "type": "object", - "properties": { - "allocated": { - "description": "The number of IPv4 addresses allocated from this pool", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "capacity": { - "description": "The total number of IPv4 addresses in the pool, i.e., the sum of the lengths of the IPv4 ranges. Unlike IPv6 capacity, can be a 32-bit integer because there are only 2^32 IPv4 addresses.", - "type": "integer", - "format": "uint32", - "minimum": 0 - } - }, - "required": [ - "allocated", - "capacity" - ] - }, "Ipv6Net": { "example": "fd12:3456::/64", "title": "An IPv6 subnet", @@ -21514,25 +21488,6 @@ "last" ] }, - "Ipv6Utilization": { - "type": "object", - "properties": { - "allocated": { - "description": "The number of IPv6 addresses allocated from this pool. A 128-bit integer string to match the capacity field.", - "type": "string", - "format": "uint128" - }, - "capacity": { - "description": "The total number of IPv6 addresses in the pool, i.e., the sum of the lengths of the IPv6 ranges. An IPv6 range can contain up to 2^128 addresses, so we represent this value in JSON as a numeric string with a custom \"uint128\" format.", - "type": "string", - "format": "uint128" - } - }, - "required": [ - "allocated", - "capacity" - ] - }, "L4PortRange": { "example": "22", "title": "A range of IP ports", From 6d42a023447d4d6c36a92118f4378c2247899ae2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 29 Aug 2025 16:56:34 -0700 Subject: [PATCH 21/38] (1/N) db_metadata_nexus schema changes, db queries. Populate the tables (#8924) Split off of https://github.com/oxidecomputer/omicron/pull/8845 Creates the schema, ensures it stays up-to-date. Does not attempt to read it. First part of #8501: adding schema for records, writing them. Not yet reading these records. --- nexus/db-model/src/db_metadata.rs | 50 ++ nexus/db-model/src/schema_versions.rs | 6 +- .../src/db/datastore/db_metadata.rs | 635 +++++++++++++++++- nexus/db-queries/src/db/datastore/rack.rs | 17 + nexus/db-schema/src/enums.rs | 1 + nexus/db-schema/src/schema.rs | 8 + .../reconfigurator/execution/src/database.rs | 24 + nexus/reconfigurator/execution/src/lib.rs | 35 + .../execution/src/omicron_zones.rs | 9 +- nexus/tests/integration_tests/schema.rs | 156 ++++- nexus/types/src/deployment/execution/spec.rs | 1 + schema/crdb/dbinit.sql | 82 ++- .../crdb/populate-db-metadata-nexus/up01.sql | 6 + .../crdb/populate-db-metadata-nexus/up02.sql | 11 + .../crdb/populate-db-metadata-nexus/up03.sql | 4 + .../crdb/populate-db-metadata-nexus/up04.sql | 16 + 16 files changed, 1027 insertions(+), 34 deletions(-) create mode 100644 nexus/reconfigurator/execution/src/database.rs create mode 100644 schema/crdb/populate-db-metadata-nexus/up01.sql create mode 100644 schema/crdb/populate-db-metadata-nexus/up02.sql create mode 100644 schema/crdb/populate-db-metadata-nexus/up03.sql create mode 100644 schema/crdb/populate-db-metadata-nexus/up04.sql diff --git a/nexus/db-model/src/db_metadata.rs b/nexus/db-model/src/db_metadata.rs index de7e2862eb7..080da4d423c 100644 --- a/nexus/db-model/src/db_metadata.rs +++ b/nexus/db-model/src/db_metadata.rs @@ -3,8 +3,14 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use crate::SemverVersion; +use crate::impl_enum_type; +use crate::typed_uuid::DbTypedUuid; use chrono::{DateTime, Utc}; use nexus_db_schema::schema::db_metadata; +use nexus_db_schema::schema::db_metadata_nexus; +use omicron_uuid_kinds::{ + BlueprintKind, BlueprintUuid, OmicronZoneKind, OmicronZoneUuid, +}; use serde::{Deserialize, Serialize}; /// Internal database metadata @@ -33,3 +39,47 @@ impl DbMetadata { &self.version } } + +impl_enum_type!( + DbMetadataNexusStateEnum: + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq, Serialize, Deserialize)] + pub enum DbMetadataNexusState; + + // Enum values + Active => b"active" + NotYet => b"not_yet" + Quiesced => b"quiesced" +); + +#[derive( + Queryable, Insertable, Debug, Clone, Selectable, Serialize, Deserialize, +)] +#[diesel(table_name = db_metadata_nexus)] +pub struct DbMetadataNexus { + nexus_id: DbTypedUuid, + last_drained_blueprint_id: Option>, + state: DbMetadataNexusState, +} + +impl DbMetadataNexus { + pub fn new(nexus_id: OmicronZoneUuid, state: DbMetadataNexusState) -> Self { + Self { + nexus_id: nexus_id.into(), + last_drained_blueprint_id: None, + state, + } + } + + pub fn state(&self) -> DbMetadataNexusState { + self.state + } + + pub fn nexus_id(&self) -> OmicronZoneUuid { + self.nexus_id.into() + } + + pub fn last_drained_blueprint_id(&self) -> Option { + self.last_drained_blueprint_id.map(|id| id.into()) + } +} diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 80df59b44f4..c471d07e50d 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(184, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(185, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(185, "populate-db-metadata-nexus"), KnownVersion::new(184, "store-silo-admin-group-name"), KnownVersion::new(183, "add-ip-version-to-pools"), KnownVersion::new(182, "add-tuf-artifact-board"), @@ -228,6 +229,9 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { /// The earliest supported schema version. pub const EARLIEST_SUPPORTED_VERSION: Version = Version::new(1, 0, 0); +/// The version where "db_metadata_nexus" was added. +pub const DB_METADATA_NEXUS_SCHEMA_VERSION: Version = Version::new(185, 0, 0); + /// Describes one version of the database schema #[derive(Debug, Clone)] struct KnownVersion { diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index dbc1de58571..43a60817848 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -4,18 +4,27 @@ //! [`DataStore`] methods on Database Metadata. -use super::DataStore; +use super::{DataStore, DbConnection}; +use crate::authz; +use crate::context::OpContext; + use anyhow::{Context, bail, ensure}; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use chrono::Utc; use diesel::prelude::*; +use futures::FutureExt; use nexus_db_errors::ErrorHandler; use nexus_db_errors::public_error_from_diesel; use nexus_db_model::AllSchemaVersions; +use nexus_db_model::DbMetadataNexus; +use nexus_db_model::DbMetadataNexusState; use nexus_db_model::EARLIEST_SUPPORTED_VERSION; use nexus_db_model::SchemaUpgradeStep; use nexus_db_model::SchemaVersion; +use nexus_types::deployment::BlueprintZoneDisposition; use omicron_common::api::external::Error; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; use semver::Version; use slog::{Logger, error, info, o}; use std::ops::Bound; @@ -340,6 +349,183 @@ impl DataStore { Ok(()) } + // Returns the access this Nexus has to the database + #[cfg(test)] + async fn database_nexus_access( + &self, + nexus_id: OmicronZoneUuid, + ) -> Result, Error> { + use nexus_db_schema::schema::db_metadata_nexus::dsl; + + let nexus_access: Option = dsl::db_metadata_nexus + .filter( + dsl::nexus_id.eq(nexus_db_model::to_db_typed_uuid(nexus_id)), + ) + .first_async(&*self.pool_connection_unauthorized().await?) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(nexus_access) + } + + // Checks if any db_metadata_nexus records exist in the database using an + // existing connection + async fn database_nexus_access_any_exist_on_connection( + conn: &async_bb8_diesel::Connection, + ) -> Result { + use nexus_db_schema::schema::db_metadata_nexus::dsl; + + let exists: bool = diesel::select(diesel::dsl::exists( + dsl::db_metadata_nexus.select(dsl::nexus_id), + )) + .get_result_async(conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(exists) + } + + /// Deletes the "db_metadata_nexus" record for a Nexus ID, if it exists. + pub async fn database_nexus_access_delete( + &self, + opctx: &OpContext, + nexus_id: OmicronZoneUuid, + ) -> Result<(), Error> { + use nexus_db_schema::schema::db_metadata_nexus::dsl; + + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let conn = &*self.pool_connection_authorized(&opctx).await?; + + diesel::delete( + dsl::db_metadata_nexus + .filter(dsl::nexus_id.eq(nexus_id.into_untyped_uuid())), + ) + .execute_async(conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } + + /// Propagate the nexus records to the database if and only if + /// the blueprint is the current target. + /// + /// If any of these records already exist, they are unmodified. + pub async fn database_nexus_access_create( + &self, + opctx: &OpContext, + blueprint: &nexus_types::deployment::Blueprint, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + + // TODO: Without https://github.com/oxidecomputer/omicron/pull/8863, we + // treat all Nexuses as active. Some will become "not_yet", depending on + // the Nexus Generation, once it exists. + let active_nexus_zones = blueprint + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(_sled, zone_cfg)| { + if zone_cfg.zone_type.is_nexus() { + Some(zone_cfg) + } else { + None + } + }); + let new_nexuses = active_nexus_zones + .map(|z| DbMetadataNexus::new(z.id, DbMetadataNexusState::Active)) + .collect::>(); + + let conn = &*self.pool_connection_authorized(&opctx).await?; + self.transaction_if_current_blueprint_is( + &conn, + "database_nexus_access_create", + opctx, + blueprint.id, + |conn| { + let new_nexuses = new_nexuses.clone(); + async move { + use nexus_db_schema::schema::db_metadata_nexus::dsl; + + diesel::insert_into(dsl::db_metadata_nexus) + .values(new_nexuses) + .on_conflict(dsl::nexus_id) + .do_nothing() + .execute_async(conn) + .await?; + Ok(()) + } + .boxed() + }, + ) + .await + } + + // Registers a Nexus instance as having active access to the database + #[cfg(test)] + async fn database_nexus_access_insert( + &self, + nexus_id: OmicronZoneUuid, + state: DbMetadataNexusState, + ) -> Result<(), Error> { + use nexus_db_schema::schema::db_metadata_nexus::dsl; + + let new_nexus = DbMetadataNexus::new(nexus_id, state); + + diesel::insert_into(dsl::db_metadata_nexus) + .values(new_nexus) + .on_conflict(dsl::nexus_id) + .do_update() + .set(dsl::state.eq(diesel::upsert::excluded(dsl::state))) + .execute_async(&*self.pool_connection_unauthorized().await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } + + /// Initializes Nexus database access records from a blueprint using an + /// existing connection + /// + /// This function finds all Nexus zones in the given blueprint and creates + /// active database access records for them. Used during RSS rack setup. + /// + /// Returns an error if: + /// - Any db_metadata_nexus records already exist (should only be called + /// during initial setup) + pub async fn initialize_nexus_access_from_blueprint_on_connection( + &self, + conn: &async_bb8_diesel::Connection, + nexus_zone_ids: Vec, + ) -> Result<(), Error> { + use nexus_db_schema::schema::db_metadata_nexus::dsl; + + // Ensure no db_metadata_nexus records already exist + let any_records_exist = + Self::database_nexus_access_any_exist_on_connection(conn).await?; + if any_records_exist { + return Err(Error::conflict( + "Cannot initialize Nexus access from blueprint: \ + db_metadata_nexus records already exist. This function should \ + only be called during initial rack setup.", + )); + } + + // Create db_metadata_nexus records for all Nexus zones + let new_nexuses: Vec = nexus_zone_ids + .iter() + .map(|&nexus_id| { + DbMetadataNexus::new(nexus_id, DbMetadataNexusState::Active) + }) + .collect(); + + diesel::insert_into(dsl::db_metadata_nexus) + .values(new_nexuses) + .execute_async(conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } pub async fn database_schema_version( &self, ) -> Result<(Version, Option), Error> { @@ -497,8 +683,35 @@ mod test { use crate::db::pub_test_utils::TestDatabase; use camino::Utf8Path; use camino_tempfile::Utf8TempDir; + use id_map::IdMap; use nexus_db_model::SCHEMA_VERSION; + use nexus_inventory::now_db_precision; + use nexus_types::deployment::Blueprint; + use nexus_types::deployment::BlueprintHostPhase2DesiredSlots; + use nexus_types::deployment::BlueprintSledConfig; + use nexus_types::deployment::BlueprintTarget; + use nexus_types::deployment::BlueprintZoneConfig; + use nexus_types::deployment::BlueprintZoneDisposition; + use nexus_types::deployment::BlueprintZoneImageSource; + use nexus_types::deployment::BlueprintZoneType; + use nexus_types::deployment::CockroachDbPreserveDowngrade; + use nexus_types::deployment::OximeterReadMode; + use nexus_types::deployment::PendingMgsUpdates; + use nexus_types::deployment::PlanningReport; + use nexus_types::deployment::blueprint_zone_type; + use nexus_types::external_api::views::SledState; + use nexus_types::inventory::NetworkInterface; + use nexus_types::inventory::NetworkInterfaceKind; + use omicron_common::api::external::Generation; + use omicron_common::api::external::MacAddr; + use omicron_common::api::external::Vni; + use omicron_common::zpool_name::ZpoolName; use omicron_test_utils::dev; + use omicron_uuid_kinds::BlueprintUuid; + use omicron_uuid_kinds::ExternalIpUuid; + use omicron_uuid_kinds::SledUuid; + use omicron_uuid_kinds::ZpoolUuid; + use std::collections::BTreeMap; // Confirms that calling the internal "ensure_schema" function can succeed // when the database is already at that version. @@ -768,6 +981,426 @@ mod test { .expect("Failed to get data"); assert_eq!(data, "abcd"); + db.terminate().await; + logctx.cleanup_successful(); + } + fn create_test_blueprint( + nexus_zones: Vec<(OmicronZoneUuid, BlueprintZoneDisposition)>, + ) -> Blueprint { + let blueprint_id = BlueprintUuid::new_v4(); + let sled_id = SledUuid::new_v4(); + + let zones: IdMap = nexus_zones + .into_iter() + .map(|(zone_id, disposition)| BlueprintZoneConfig { + disposition, + id: zone_id, + filesystem_pool: ZpoolName::new_external(ZpoolUuid::new_v4()), + zone_type: BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + internal_address: "[::1]:0".parse().unwrap(), + external_dns_servers: Vec::new(), + external_ip: nexus_types::deployment::OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: std::net::IpAddr::V6(std::net::Ipv6Addr::LOCALHOST), + }, + external_tls: true, + nic: NetworkInterface { + id: uuid::Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: zone_id.into_untyped_uuid(), + }, + name: "test-nic".parse().unwrap(), + ip: "192.168.1.1".parse().unwrap(), + mac: MacAddr::random_system(), + subnet: ipnetwork::IpNetwork::V4( + "192.168.1.0/24".parse().unwrap() + ).into(), + vni: Vni::try_from(100).unwrap(), + primary: true, + slot: 0, + transit_ips: Vec::new(), + }, + }), + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .collect(); + + let mut sleds = BTreeMap::new(); + sleds.insert( + sled_id, + BlueprintSledConfig { + state: SledState::Active, + sled_agent_generation: Generation::new(), + zones, + disks: IdMap::new(), + datasets: IdMap::new(), + remove_mupdate_override: None, + host_phase_2: BlueprintHostPhase2DesiredSlots::current_contents( + ), + }, + ); + + Blueprint { + id: blueprint_id, + sleds, + pending_mgs_updates: PendingMgsUpdates::new(), + parent_blueprint_id: None, + internal_dns_version: Generation::new(), + external_dns_version: Generation::new(), + target_release_minimum_generation: Generation::new(), + cockroachdb_fingerprint: String::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, + clickhouse_cluster_config: None, + oximeter_read_mode: OximeterReadMode::SingleNode, + oximeter_read_version: Generation::new(), + time_created: now_db_precision(), + creator: "test suite".to_string(), + comment: "test blueprint".to_string(), + report: PlanningReport::new(blueprint_id), + } + } + + #[tokio::test] + async fn test_database_nexus_access_create() { + let logctx = dev::test_setup_log("test_database_nexus_access_create"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); + let opctx = db.opctx(); + + // Create a blueprint with two in-service Nexus zones, + // and one expunged Nexus. + let nexus1_id = OmicronZoneUuid::new_v4(); + let nexus2_id = OmicronZoneUuid::new_v4(); + let expunged_nexus = OmicronZoneUuid::new_v4(); + let blueprint = create_test_blueprint(vec![ + (nexus1_id, BlueprintZoneDisposition::InService), + (nexus2_id, BlueprintZoneDisposition::InService), + ( + expunged_nexus, + BlueprintZoneDisposition::Expunged { + as_of_generation: Generation::new(), + ready_for_cleanup: true, + }, + ), + ]); + + // Insert the blueprint and make it the target + datastore + .blueprint_insert(&opctx, &blueprint) + .await + .expect("Failed to insert blueprint"); + datastore + .blueprint_target_set_current( + &opctx, + BlueprintTarget { + target_id: blueprint.id, + enabled: false, + time_made_target: chrono::Utc::now(), + }, + ) + .await + .expect("Failed to set blueprint target"); + + // Create nexus access records + datastore + .database_nexus_access_create(&opctx, &blueprint) + .await + .expect("Failed to create nexus access"); + + // Verify records were created with Active state + let nexus1_access = datastore + .database_nexus_access(nexus1_id) + .await + .expect("Failed to get nexus1 access"); + let nexus2_access = datastore + .database_nexus_access(nexus2_id) + .await + .expect("Failed to get nexus2 access"); + let expunged_access = datastore + .database_nexus_access(expunged_nexus) + .await + .expect("Failed to get expunged access"); + + assert!(nexus1_access.is_some(), "nexus1 should have access record"); + assert!(nexus2_access.is_some(), "nexus2 should have access record"); + assert!( + expunged_access.is_none(), + "expunged nexus should not have access record" + ); + + let nexus1_record = nexus1_access.unwrap(); + let nexus2_record = nexus2_access.unwrap(); + assert_eq!(nexus1_record.state(), DbMetadataNexusState::Active); + assert_eq!(nexus2_record.state(), DbMetadataNexusState::Active); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_database_nexus_access_create_idempotent() { + let logctx = + dev::test_setup_log("test_database_nexus_access_create_idempotent"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); + let opctx = db.opctx(); + + // Create a blueprint with one Nexus zone + let nexus_id = OmicronZoneUuid::new_v4(); + let blueprint = create_test_blueprint(vec![( + nexus_id, + BlueprintZoneDisposition::InService, + )]); + + // Insert the blueprint and make it the target + datastore + .blueprint_insert(&opctx, &blueprint) + .await + .expect("Failed to insert blueprint"); + datastore + .blueprint_target_set_current( + &opctx, + BlueprintTarget { + target_id: blueprint.id, + enabled: false, + time_made_target: chrono::Utc::now(), + }, + ) + .await + .expect("Failed to set blueprint target"); + + // Create nexus access records (first time) + datastore + .database_nexus_access_create(&opctx, &blueprint) + .await + .expect("Failed to create nexus access (first time)"); + + // Verify record was created + async fn confirm_state( + datastore: &DataStore, + nexus_id: OmicronZoneUuid, + expected_state: DbMetadataNexusState, + ) { + let state = datastore + .database_nexus_access(nexus_id) + .await + .expect("Failed to get nexus access after first create") + .expect("Entry for Nexus should have been inserted"); + assert_eq!(state.state(), expected_state); + } + + confirm_state(datastore, nexus_id, DbMetadataNexusState::Active).await; + + // Creating the record again: not an error. + datastore + .database_nexus_access_create(&opctx, &blueprint) + .await + .expect("Failed to create nexus access (first time)"); + confirm_state(datastore, nexus_id, DbMetadataNexusState::Active).await; + + // Manually make the record "Quiesced". + use nexus_db_schema::schema::db_metadata_nexus::dsl; + diesel::update(dsl::db_metadata_nexus) + .filter(dsl::nexus_id.eq(nexus_id.into_untyped_uuid())) + .set(dsl::state.eq(DbMetadataNexusState::Quiesced)) + .execute_async( + &*datastore.pool_connection_unauthorized().await.unwrap(), + ) + .await + .expect("Failed to update record"); + confirm_state(datastore, nexus_id, DbMetadataNexusState::Quiesced) + .await; + + // Create nexus access records another time - should be idempotent, + // but should be "on-conflict, ignore". + datastore + .database_nexus_access_create(&opctx, &blueprint) + .await + .expect("Failed to create nexus access (second time)"); + confirm_state(datastore, nexus_id, DbMetadataNexusState::Quiesced) + .await; + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_database_nexus_access_create_fails_wrong_target_blueprint() { + let logctx = dev::test_setup_log( + "test_database_nexus_access_create_fails_wrong_target_blueprint", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); + let opctx = db.opctx(); + + // Create two different blueprints + let nexus_id = OmicronZoneUuid::new_v4(); + let target_blueprint = create_test_blueprint(vec![( + nexus_id, + BlueprintZoneDisposition::InService, + )]); + let non_target_blueprint = create_test_blueprint(vec![( + nexus_id, + BlueprintZoneDisposition::InService, + )]); + + // Insert both blueprints + datastore + .blueprint_insert(&opctx, &target_blueprint) + .await + .expect("Failed to insert target blueprint"); + datastore + .blueprint_insert(&opctx, &non_target_blueprint) + .await + .expect("Failed to insert non-target blueprint"); + + // Set the first blueprint as the target + datastore + .blueprint_target_set_current( + &opctx, + BlueprintTarget { + target_id: target_blueprint.id, + enabled: false, + time_made_target: chrono::Utc::now(), + }, + ) + .await + .expect("Failed to set target blueprint"); + + // Try to create nexus access records using the non-target blueprint. + // This should fail because the transaction should check if the + // blueprint is the current target + let result = datastore + .database_nexus_access_create(&opctx, &non_target_blueprint) + .await; + assert!( + result.is_err(), + "Creating nexus access with wrong target blueprint should fail" + ); + + // Verify no records were created for the nexus + let access = datastore + .database_nexus_access(nexus_id) + .await + .expect("Failed to get nexus access"); + assert!( + access.is_none(), + "No access record should exist when wrong blueprint is used" + ); + + // Verify that using the correct target blueprint works + datastore + .database_nexus_access_create(&opctx, &target_blueprint) + .await + .expect( + "Creating nexus access with correct blueprint should succeed", + ); + + let access_after_correct = datastore + .database_nexus_access(nexus_id) + .await + .expect("Failed to get nexus access after correct blueprint"); + assert!( + access_after_correct.is_some(), + "Access record should exist after using correct target blueprint" + ); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_database_nexus_access_delete() { + let logctx = dev::test_setup_log("test_database_nexus_access_delete"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); + let opctx = db.opctx(); + + // Create test nexus IDs + let nexus1_id = OmicronZoneUuid::new_v4(); + let nexus2_id = OmicronZoneUuid::new_v4(); + + // Insert records directly using the test method + datastore + .database_nexus_access_insert( + nexus1_id, + DbMetadataNexusState::Active, + ) + .await + .expect("Failed to insert nexus1 access"); + datastore + .database_nexus_access_insert( + nexus2_id, + DbMetadataNexusState::NotYet, + ) + .await + .expect("Failed to insert nexus2 access"); + + // Verify records were created + let nexus1_before = datastore + .database_nexus_access(nexus1_id) + .await + .expect("Failed to get nexus1 access"); + let nexus2_before = datastore + .database_nexus_access(nexus2_id) + .await + .expect("Failed to get nexus2 access"); + assert!(nexus1_before.is_some(), "nexus1 should have access record"); + assert!(nexus2_before.is_some(), "nexus2 should have access record"); + + // Delete nexus1 record + datastore + .database_nexus_access_delete(&opctx, nexus1_id) + .await + .expect("Failed to delete nexus1 access"); + + // Verify nexus1 record was deleted, nexus2 record remains + let nexus1_after = datastore + .database_nexus_access(nexus1_id) + .await + .expect("Failed to get nexus1 access after delete"); + let nexus2_after = datastore + .database_nexus_access(nexus2_id) + .await + .expect("Failed to get nexus2 access after delete"); + assert!( + nexus1_after.is_none(), + "nexus1 should not have access record after delete" + ); + assert!( + nexus2_after.is_some(), + "nexus2 should still have access record" + ); + + // Delete nexus2 record + datastore + .database_nexus_access_delete(&opctx, nexus2_id) + .await + .expect("Failed to delete nexus2 access"); + + // Verify nexus2 record was also deleted + let nexus2_final = datastore + .database_nexus_access(nexus2_id) + .await + .expect("Failed to get nexus2 access after final delete"); + assert!( + nexus2_final.is_none(), + "nexus2 should not have access record after delete" + ); + + // Confirm deletion is idempotent + datastore + .database_nexus_access_delete(&opctx, nexus1_id) + .await + .expect("Failed to delete nexus1 access idempotently"); + + // This also means deleting non-existent records should be fine + datastore + .database_nexus_access_delete(&opctx, OmicronZoneUuid::new_v4()) + .await + .expect("Failed to delete non-existent record"); + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index a2fcb2c6c82..31e0ec43284 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -727,6 +727,7 @@ impl DataStore { // - Zpools // - Datasets // - A blueprint + // - Nexus database access records // // Which RSS has already allocated during bootstrapping. @@ -793,6 +794,22 @@ impl DataStore { DieselError::RollbackTransaction })?; + // Insert Nexus database access records + self.initialize_nexus_access_from_blueprint_on_connection( + &conn, + blueprint.all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(_sled, zone_cfg)| { + if zone_cfg.zone_type.is_nexus() { + Some(zone_cfg.id) + } else { + None + } + }).collect(), + ).await.map_err(|e| { + err.set(RackInitError::BlueprintTargetSet(e)).unwrap(); + DieselError::RollbackTransaction + })?; + // Allocate networking records for all services. for (_, zone_config) in blueprint.all_omicron_zones(BlueprintZoneDisposition::is_in_service) { self.rack_populate_service_networking_records( diff --git a/nexus/db-schema/src/enums.rs b/nexus/db-schema/src/enums.rs index 1766054c9ad..bac6d5ae3b4 100644 --- a/nexus/db-schema/src/enums.rs +++ b/nexus/db-schema/src/enums.rs @@ -37,6 +37,7 @@ define_enums! { CabooseWhichEnum => "caboose_which", ClickhouseModeEnum => "clickhouse_mode", DatasetKindEnum => "dataset_kind", + DbMetadataNexusStateEnum => "db_metadata_nexus_state", DnsGroupEnum => "dns_group", DownstairsClientStopRequestReasonEnum => "downstairs_client_stop_request_reason_type", DownstairsClientStoppedReasonEnum => "downstairs_client_stopped_reason_type", diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 28a168d6f76..4f8bd54a7a2 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -2373,6 +2373,14 @@ table! { } } +table! { + db_metadata_nexus (nexus_id) { + nexus_id -> Uuid, + last_drained_blueprint_id -> Nullable, + state -> crate::enums::DbMetadataNexusStateEnum, + } +} + table! { migration (id) { id -> Uuid, diff --git a/nexus/reconfigurator/execution/src/database.rs b/nexus/reconfigurator/execution/src/database.rs new file mode 100644 index 00000000000..9652e53ec1a --- /dev/null +++ b/nexus/reconfigurator/execution/src/database.rs @@ -0,0 +1,24 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Manages deployment of records into the database. + +use anyhow::anyhow; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use nexus_types::deployment::Blueprint; + +/// Idempotently ensure that the Nexus records for the zones are populated +/// in the database. +pub(crate) async fn deploy_db_metadata_nexus_records( + opctx: &OpContext, + datastore: &DataStore, + blueprint: &Blueprint, +) -> Result<(), anyhow::Error> { + datastore + .database_nexus_access_create(opctx, blueprint) + .await + .map_err(|err| anyhow!(err))?; + Ok(()) +} diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index 43c09485557..adfda2e5958 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -32,8 +32,10 @@ use tokio::sync::watch; use update_engine::StepSuccess; use update_engine::StepWarning; use update_engine::merge_anyhow_list; + mod clickhouse; mod cockroachdb; +mod database; mod dns; mod omicron_physical_disks; mod omicron_sled_config; @@ -196,6 +198,13 @@ pub async fn realize_blueprint( ) .into_shared(); + register_deploy_db_metadata_nexus_records_step( + &engine.for_component(ExecutionComponent::DeployNexusRecords), + &opctx, + datastore, + blueprint, + ); + register_deploy_sled_configs_step( &engine.for_component(ExecutionComponent::SledAgent), &opctx, @@ -390,6 +399,32 @@ fn register_sled_list_step<'a>( .register() } +fn register_deploy_db_metadata_nexus_records_step<'a>( + registrar: &ComponentRegistrar<'_, 'a>, + opctx: &'a OpContext, + datastore: &'a DataStore, + blueprint: &'a Blueprint, +) { + registrar + .new_step( + ExecutionStepId::Ensure, + "Ensure db_metadata_nexus_state records exist", + async move |_cx| match database::deploy_db_metadata_nexus_records( + opctx, &datastore, &blueprint, + ) + .await + { + Ok(()) => StepSuccess::new(()).into(), + Err(err) => StepWarning::new( + (), + err.context("ensuring db_metadata_nexus_state").to_string(), + ) + .into(), + }, + ) + .register(); +} + fn register_deploy_sled_configs_step<'a>( registrar: &ComponentRegistrar<'_, 'a>, opctx: &'a OpContext, diff --git a/nexus/reconfigurator/execution/src/omicron_zones.rs b/nexus/reconfigurator/execution/src/omicron_zones.rs index 28c981fd90e..74e4625358d 100644 --- a/nexus/reconfigurator/execution/src/omicron_zones.rs +++ b/nexus/reconfigurator/execution/src/omicron_zones.rs @@ -72,10 +72,13 @@ async fn clean_up_expunged_zones_impl( )); let result = match &config.zone_type { - // Zones which need no cleanup work after expungement. - BlueprintZoneType::Nexus(_) => None, - // Zones which need cleanup after expungement. + BlueprintZoneType::Nexus(_) => Some( + datastore + .database_nexus_access_delete(&opctx, config.id) + .await + .map_err(|err| anyhow::anyhow!(err)), + ), BlueprintZoneType::CockroachDb(_) => { if decommission_cockroach { Some( diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 9111e6a0949..db6c0cb6eaf 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -2889,6 +2889,157 @@ fn after_171_0_0<'a>(ctx: &'a MigrationContext<'a>) -> BoxFuture<'a, ()> { }) } +const NEXUS_ID_185_0: &str = "387433f9-1473-4ca2-b156-9670452985e0"; +const EXPUNGED_NEXUS_ID_185_0: &str = "287433f9-1473-4ca2-b156-9670452985e0"; +const OLD_NEXUS_ID_185_0: &str = "187433f9-1473-4ca2-b156-9670452985e0"; + +const BP_ID_185_0: &str = "5a5ff941-3b5a-403b-9fda-db2049f4c736"; +const OLD_BP_ID_185_0: &str = "4a5ff941-3b5a-403b-9fda-db2049f4c736"; + +fn before_185_0_0<'a>(ctx: &'a MigrationContext<'a>) -> BoxFuture<'a, ()> { + Box::pin(async move { + // Create a blueprint which contains a Nexus - we'll use this for the migration. + // + // It also contains an exupnged Nexus, which should be ignored. + ctx.client + .execute( + &format!( + "INSERT INTO omicron.public.bp_target + (version, blueprint_id, enabled, time_made_target) + VALUES + (1, '{BP_ID_185_0}', true, now());", + ), + &[], + ) + .await + .expect("inserted bp_target rows for 182"); + ctx.client + .execute( + &format!( + "INSERT INTO omicron.public.bp_omicron_zone ( + blueprint_id, sled_id, id, zone_type, + primary_service_ip, primary_service_port, + second_service_ip, second_service_port, + dataset_zpool_name, bp_nic_id, + dns_gz_address, dns_gz_address_index, + ntp_ntp_servers, ntp_dns_servers, ntp_domain, + nexus_external_tls, nexus_external_dns_servers, + snat_ip, snat_first_port, snat_last_port, + external_ip_id, filesystem_pool, disposition, + disposition_expunged_as_of_generation, + disposition_expunged_ready_for_cleanup, + image_source, image_artifact_sha256 + ) + VALUES ( + '{BP_ID_185_0}', gen_random_uuid(), '{NEXUS_ID_185_0}', + 'nexus', '192.168.1.10', 8080, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, false, ARRAY[]::INET[], + NULL, NULL, NULL, NULL, gen_random_uuid(), + 'in_service', NULL, false, 'install_dataset', NULL + ), + ( + '{BP_ID_185_0}', gen_random_uuid(), + '{EXPUNGED_NEXUS_ID_185_0}', 'nexus', '192.168.1.11', + 8080, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, false, ARRAY[]::INET[], NULL, NULL, NULL, NULL, + gen_random_uuid(), 'expunged', 1, false, + 'install_dataset', NULL + );" + ), + &[], + ) + .await + .expect("inserted bp_omicron_zone rows for 182"); + + // ALSO create an old blueprint, which isn't the latest target. + // + // We should ignore this one! No rows should be inserted for old data. + ctx.client + .execute( + &format!( + "INSERT INTO omicron.public.bp_target + (version, blueprint_id, enabled, time_made_target) + VALUES + (0, '{OLD_BP_ID_185_0}', true, now());", + ), + &[], + ) + .await + .expect("inserted bp_target rows for 182"); + ctx.client + .execute( + &format!( + "INSERT INTO omicron.public.bp_omicron_zone ( + blueprint_id, sled_id, id, zone_type, + primary_service_ip, primary_service_port, + second_service_ip, second_service_port, + dataset_zpool_name, bp_nic_id, + dns_gz_address, dns_gz_address_index, + ntp_ntp_servers, ntp_dns_servers, ntp_domain, + nexus_external_tls, nexus_external_dns_servers, + snat_ip, snat_first_port, snat_last_port, + external_ip_id, filesystem_pool, disposition, + disposition_expunged_as_of_generation, + disposition_expunged_ready_for_cleanup, + image_source, image_artifact_sha256 + ) + VALUES ( + '{OLD_BP_ID_185_0}', gen_random_uuid(), + '{OLD_NEXUS_ID_185_0}', 'nexus', '192.168.1.10', 8080, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, + false, ARRAY[]::INET[], NULL, NULL, NULL, + NULL, gen_random_uuid(), 'in_service', + NULL, false, 'install_dataset', NULL + );" + ), + &[], + ) + .await + .expect("inserted bp_omicron_zone rows for 182"); + }) +} + +fn after_185_0_0<'a>(ctx: &'a MigrationContext<'a>) -> BoxFuture<'a, ()> { + Box::pin(async move { + // After the migration, the new row should be created - only for Nexuses + // in the latest blueprint. + // + // Note that "OLD_NEXUS_ID_185_0" doesn't get a row - it's in an old + // blueprint. + let rows = ctx + .client + .query( + "SELECT + nexus_id, + last_drained_blueprint_id, + state + FROM omicron.public.db_metadata_nexus;", + &[], + ) + .await + .expect("queried post-migration inv_sled_config_reconciler"); + + let rows = process_rows(&rows); + assert_eq!(rows.len(), 1); + let row = &rows[0]; + + // Create a new row for the Nexuses in the target blueprint + assert_eq!( + row.values[0].expect("nexus_id").unwrap(), + &AnySqlType::Uuid(NEXUS_ID_185_0.parse().unwrap()) + ); + assert_eq!(row.values[1].expect("last_drained_blueprint_id"), None); + assert_eq!( + row.values[2].expect("state").unwrap(), + &AnySqlType::Enum(SqlEnum::from(( + "db_metadata_nexus_state", + "active" + ))) + ); + }) +} + // Lazily initializes all migration checks. The combination of Rust function // pointers and async makes defining a static table fairly painful, so we're // using lazy initialization instead. @@ -2987,7 +3138,10 @@ fn get_migration_checks() -> BTreeMap { Version::new(171, 0, 0), DataMigrationFns::new().before(before_171_0_0).after(after_171_0_0), ); - + map.insert( + Version::new(185, 0, 0), + DataMigrationFns::new().before(before_185_0_0).after(after_185_0_0), + ); map } diff --git a/nexus/types/src/deployment/execution/spec.rs b/nexus/types/src/deployment/execution/spec.rs index 482355dfee5..df02d3c58a3 100644 --- a/nexus/types/src/deployment/execution/spec.rs +++ b/nexus/types/src/deployment/execution/spec.rs @@ -30,6 +30,7 @@ pub enum ExecutionComponent { ExternalNetworking, SupportBundles, SledList, + DeployNexusRecords, SledAgent, PhysicalDisks, OmicronZones, diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index e6e2949c08b..bad381cb5c8 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -5630,29 +5630,6 @@ CREATE INDEX IF NOT EXISTS lookup_region_snapshot_replacement_step_by_state CREATE INDEX IF NOT EXISTS lookup_region_snapshot_replacement_step_by_old_volume_id on omicron.public.region_snapshot_replacement_step (old_snapshot_volume_id); -/* - * Metadata for the schema itself. This version number isn't great, as there's - * nothing to ensure it gets bumped when it should be, but it's a start. - */ -CREATE TABLE IF NOT EXISTS omicron.public.db_metadata ( - -- There should only be one row of this table for the whole DB. - -- It's a little goofy, but filter on "singleton = true" before querying - -- or applying updates, and you'll access the singleton row. - -- - -- We also add a constraint on this table to ensure it's not possible to - -- access the version of this table with "singleton = false". - singleton BOOL NOT NULL PRIMARY KEY, - time_created TIMESTAMPTZ NOT NULL, - time_modified TIMESTAMPTZ NOT NULL, - -- Semver representation of the DB version - version STRING(64) NOT NULL, - - -- (Optional) Semver representation of the DB version to which we're upgrading - target_version STRING(64), - - CHECK (singleton = true) -); - -- An allowlist of IP addresses that can make requests to user-facing services. CREATE TABLE IF NOT EXISTS omicron.public.allow_list ( id UUID PRIMARY KEY, @@ -6553,10 +6530,59 @@ ON omicron.public.host_ereport ( ) WHERE time_deleted IS NULL; -/* - * Keep this at the end of file so that the database does not contain a version - * until it is fully populated. - */ +-- Metadata for the schema itself. +-- +-- This table may be read by Nexuses with different notions of "what the schema should be". +-- Unlike other tables in the database, caution should be taken when upgrading this schema. +CREATE TABLE IF NOT EXISTS omicron.public.db_metadata ( + -- There should only be one row of this table for the whole DB. + -- It's a little goofy, but filter on "singleton = true" before querying + -- or applying updates, and you'll access the singleton row. + -- + -- We also add a constraint on this table to ensure it's not possible to + -- access the version of this table with "singleton = false". + singleton BOOL NOT NULL PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + -- Semver representation of the DB version + version STRING(64) NOT NULL, + + -- (Optional) Semver representation of the DB version to which we're upgrading + target_version STRING(64), + + CHECK (singleton = true) +); + +CREATE TYPE IF NOT EXISTS omicron.public.db_metadata_nexus_state AS ENUM ( + -- This Nexus is allowed to access this database + 'active', + + -- This Nexus is not yet allowed to access the database + 'not_yet', + + -- This Nexus has committed to no longer accessing this database + 'quiesced' +); + +-- Nexuses which may be attempting to access the database, and a state +-- which identifies if they should be allowed to do so. +-- +-- This table is used during upgrade implement handoff between old and new +-- Nexus zones. It is read by all Nexuses during initialization to identify +-- if they should have access to the database. +CREATE TABLE IF NOT EXISTS omicron.public.db_metadata_nexus ( + nexus_id UUID NOT NULL PRIMARY KEY, + last_drained_blueprint_id UUID, + state omicron.public.db_metadata_nexus_state NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS lookup_db_metadata_nexus_by_state on omicron.public.db_metadata_nexus ( + state, + nexus_id +); + +-- Keep this at the end of file so that the database does not contain a version +-- until it is fully populated. INSERT INTO omicron.public.db_metadata ( singleton, time_created, @@ -6564,7 +6590,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '184.0.0', NULL) + (TRUE, NOW(), NOW(), '185.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/populate-db-metadata-nexus/up01.sql b/schema/crdb/populate-db-metadata-nexus/up01.sql new file mode 100644 index 00000000000..25c42761e04 --- /dev/null +++ b/schema/crdb/populate-db-metadata-nexus/up01.sql @@ -0,0 +1,6 @@ +CREATE TYPE IF NOT EXISTS omicron.public.db_metadata_nexus_state AS ENUM ( + 'active', + 'not_yet', + 'quiesced' +); + diff --git a/schema/crdb/populate-db-metadata-nexus/up02.sql b/schema/crdb/populate-db-metadata-nexus/up02.sql new file mode 100644 index 00000000000..9fac217eec4 --- /dev/null +++ b/schema/crdb/populate-db-metadata-nexus/up02.sql @@ -0,0 +1,11 @@ +-- Nexuses which may be attempting to access the database, and a state +-- which identifies if they should be allowed to do so. +-- +-- This table is used during upgrade implement handoff between old and new +-- Nexus zones. +CREATE TABLE IF NOT EXISTS omicron.public.db_metadata_nexus ( + nexus_id UUID NOT NULL PRIMARY KEY, + last_drained_blueprint_id UUID, + state omicron.public.db_metadata_nexus_state NOT NULL +); + diff --git a/schema/crdb/populate-db-metadata-nexus/up03.sql b/schema/crdb/populate-db-metadata-nexus/up03.sql new file mode 100644 index 00000000000..42fbf004137 --- /dev/null +++ b/schema/crdb/populate-db-metadata-nexus/up03.sql @@ -0,0 +1,4 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_db_metadata_nexus_by_state on omicron.public.db_metadata_nexus ( + state, + nexus_id +); diff --git a/schema/crdb/populate-db-metadata-nexus/up04.sql b/schema/crdb/populate-db-metadata-nexus/up04.sql new file mode 100644 index 00000000000..36b876b9cdd --- /dev/null +++ b/schema/crdb/populate-db-metadata-nexus/up04.sql @@ -0,0 +1,16 @@ +-- Populate db_metadata_nexus records for all Nexus zones in the current target blueprint +-- +-- This migration handles backfill for existing deployments that are upgrading +-- to include db_metadata_nexus. It finds all Nexus zones in the current +-- target blueprint and marks them as 'active' in the db_metadata_nexus table. + +SET LOCAL disallow_full_table_scans = off; + +INSERT INTO omicron.public.db_metadata_nexus (nexus_id, last_drained_blueprint_id, state) +SELECT bz.id, NULL, 'active' +FROM omicron.public.bp_target bt +JOIN omicron.public.bp_omicron_zone bz ON bt.blueprint_id = bz.blueprint_id +WHERE bz.zone_type = 'nexus' + AND bz.disposition != 'expunged' + AND bt.version = (SELECT MAX(version) FROM omicron.public.bp_target) +ON CONFLICT (nexus_id) DO NOTHING; From b1a1d91b12f10c54a283db6dbe3d0c4c2ebcc701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Fri, 29 Aug 2025 19:01:20 -0700 Subject: [PATCH 22/38] Remove incorrect comment (#8952) In https://github.com/oxidecomputer/omicron/pull/8905 I extracted some of the functionality of the helper function `wait_for_stage0_next_image_check` into a new `wait_for_boot_info` function. A comment that made sense for `wait_for_stage0_next_image_check` does not make sense for `wait_for_boot_info` because the behaviour is different. This patch removes that comment. --- nexus/mgs-updates/src/rot_updater.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/nexus/mgs-updates/src/rot_updater.rs b/nexus/mgs-updates/src/rot_updater.rs index 7bf2ffd66c6..454a10d79e4 100644 --- a/nexus/mgs-updates/src/rot_updater.rs +++ b/nexus/mgs-updates/src/rot_updater.rs @@ -293,9 +293,6 @@ pub async fn wait_for_boot_info( .await { Ok(state) => match state.clone() { - // The minimum we will ever return is v3. - // Additionally, V2 does not report image errors, so we cannot - // know with certainty if a signature check came back with errors RotState::V2 { .. } | RotState::V3 { .. } => { debug!(log, "successfuly retrieved boot info"); return Ok(state.into_inner()); From 3070ceba43aa7e6323f9c969839b03ab7672a7c6 Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Fri, 29 Aug 2025 21:30:56 -0700 Subject: [PATCH 23/38] Add IP version to IP Pool API objects (#8951) - Add IP version when creating IP Pools, defaulting to IPv4. - Add version to views when listing / fetching pools - Flesh out some of the end-to-end tests in preparation for IPv6 pool support - Closes #8881 --- end-to-end-tests/src/bin/bootstrap.rs | 15 ++-- end-to-end-tests/src/bin/commtest.rs | 36 ++++++---- end-to-end-tests/src/helpers/mod.rs | 42 ++++++++++-- nexus/db-model/src/ip_pool.rs | 2 +- nexus/test-utils/src/resource_helpers.rs | 3 + nexus/tests/integration_tests/endpoints.rs | 2 + nexus/tests/integration_tests/ip_pools.rs | 80 +++++++++++----------- nexus/types/src/external_api/params.rs | 7 +- nexus/types/src/external_api/views.rs | 2 + openapi/nexus.json | 26 +++++++ 10 files changed, 148 insertions(+), 67 deletions(-) diff --git a/end-to-end-tests/src/bin/bootstrap.rs b/end-to-end-tests/src/bin/bootstrap.rs index d9193841bcf..26f7a30dc16 100644 --- a/end-to-end-tests/src/bin/bootstrap.rs +++ b/end-to-end-tests/src/bin/bootstrap.rs @@ -1,11 +1,13 @@ use anyhow::Result; use end_to_end_tests::helpers::ctx::{ClientParams, Context}; -use end_to_end_tests::helpers::{generate_name, get_system_ip_pool}; +use end_to_end_tests::helpers::{ + generate_name, get_system_ip_pool, try_create_ip_range, +}; use omicron_test_utils::dev::poll::{CondCheckError, wait_for_condition}; use oxide_client::types::{ ByteCount, DeviceAccessTokenRequest, DeviceAuthRequest, DeviceAuthVerify, - DiskCreate, DiskSource, IpPoolCreate, IpPoolLinkSilo, IpRange, Ipv4Range, - NameOrId, SiloQuotasUpdate, + DiskCreate, DiskSource, IpPoolCreate, IpPoolLinkSilo, IpVersion, NameOrId, + SiloQuotasUpdate, }; use oxide_client::{ ClientConsoleAuthExt, ClientDisksExt, ClientProjectsExt, @@ -41,13 +43,16 @@ async fn run_test() -> Result<()> { let (first, last) = get_system_ip_pool().await?; // ===== CREATE IP POOL ===== // - eprintln!("creating IP pool... {:?} - {:?}", first, last); + let ip_version = + if first.is_ipv4() { IpVersion::V4 } else { IpVersion::V6 }; + eprintln!("creating IP{} IP pool... {:?} - {:?}", ip_version, first, last); let pool_name = "default"; client .ip_pool_create() .body(IpPoolCreate { name: pool_name.parse().unwrap(), description: "Default IP pool".to_string(), + ip_version, }) .send() .await?; @@ -63,7 +68,7 @@ async fn run_test() -> Result<()> { client .ip_pool_range_add() .pool(pool_name) - .body(IpRange::V4(Ipv4Range { first, last })) + .body(try_create_ip_range(first, last)?) .send() .await?; diff --git a/end-to-end-tests/src/bin/commtest.rs b/end-to-end-tests/src/bin/commtest.rs index a2e46391025..1da1cd1c4df 100644 --- a/end-to-end-tests/src/bin/commtest.rs +++ b/end-to-end-tests/src/bin/commtest.rs @@ -1,13 +1,13 @@ use anyhow::{Result, anyhow}; use clap::{Parser, Subcommand}; -use end_to_end_tests::helpers::cli::oxide_cli_style; use end_to_end_tests::helpers::icmp::ping4_test_run; +use end_to_end_tests::helpers::{cli::oxide_cli_style, try_create_ip_range}; use oxide_client::{ ClientExperimentalExt, ClientLoginExt, ClientProjectsExt, ClientSystemHardwareExt, ClientSystemIpPoolsExt, ClientSystemStatusExt, ClientVpcsExt, types::{ - IpPoolCreate, IpPoolLinkSilo, IpRange, Ipv4Range, Name, NameOrId, + IpPoolCreate, IpPoolLinkSilo, IpRange, IpVersion, Name, NameOrId, PingStatus, ProbeCreate, ProbeInfo, ProjectCreate, UsernamePasswordCredentials, }, @@ -55,11 +55,11 @@ struct RunArgs { /// First address in the IP pool to use for testing #[arg(long)] - ip_pool_begin: Ipv4Addr, + ip_pool_begin: IpAddr, /// Last address in the IP pool to use for testing #[arg(long)] - ip_pool_end: Ipv4Addr, + ip_pool_end: IpAddr, } const API_RETRY_ATTEMPTS: usize = 15; @@ -284,11 +284,17 @@ async fn rack_prepare( if let Err(e) = oxide.ip_pool_view().pool("default").send().await { if let Some(reqwest::StatusCode::NOT_FOUND) = e.status() { print!("default ip pool does not exist, creating ..."); + let ip_version = if args.ip_pool_begin.is_ipv4() { + IpVersion::V4 + } else { + IpVersion::V6 + }; oxide .ip_pool_create() .body(IpPoolCreate { name: pool_name.parse().unwrap(), description: "Default IP pool".to_string(), + ip_version, }) .send() .await?; @@ -323,15 +329,17 @@ async fn rack_prepare( .into_inner() .items; - let range = Ipv4Range { first: args.ip_pool_begin, last: args.ip_pool_end }; - - let range_exists = pool - .iter() - .filter_map(|x| match &x.range { - IpRange::V4(r) => Some(r), - IpRange::V6(_) => None, - }) - .any(|x| x.first == range.first && x.last == range.last); + let range = try_create_ip_range(args.ip_pool_begin, args.ip_pool_end)?; + let range_exists = + pool.iter().any(|pool_range| match (&range, &pool_range.range) { + (IpRange::V4(r1), IpRange::V4(r2)) => { + r1.first == r2.first && r1.last == r2.last + } + (IpRange::V6(r1), IpRange::V6(r2)) => { + r1.first == r2.first && r1.last == r2.last + } + (_, _) => false, + }); if !range_exists { print!("ip range does not exist, creating ... "); @@ -339,7 +347,7 @@ async fn rack_prepare( oxide .ip_pool_range_add() .pool(Name::try_from("default").unwrap()) - .body(IpRange::V4(range.clone())) + .body(range.clone()) .send() .await )?; diff --git a/end-to-end-tests/src/helpers/mod.rs b/end-to-end-tests/src/helpers/mod.rs index 2c515561032..b24abd7a762 100644 --- a/end-to-end-tests/src/helpers/mod.rs +++ b/end-to-end-tests/src/helpers/mod.rs @@ -4,7 +4,7 @@ pub mod icmp; use self::ctx::nexus_addr; use anyhow::{Result, bail}; -use oxide_client::types::Name; +use oxide_client::types::{IpRange, Ipv4Range, Ipv6Range, Name}; use rand::Rng; use std::env; use std::net::{IpAddr, Ipv4Addr}; @@ -15,17 +15,21 @@ pub fn generate_name(prefix: &str) -> Result { .map_err(anyhow::Error::msg) } -pub async fn get_system_ip_pool() -> Result<(Ipv4Addr, Ipv4Addr)> { +pub async fn get_system_ip_pool() -> Result<(IpAddr, IpAddr)> { if let (Ok(s), Ok(e)) = (env::var("IPPOOL_START"), env::var("IPPOOL_END")) { return Ok(( - s.parse::().expect("IPPOOL_START is not an IP address"), - e.parse::().expect("IPPOOL_END is not an IP address"), + s.parse::() + .expect("IPPOOL_START is not an IP address") + .into(), + e.parse::() + .expect("IPPOOL_END is not an IP address") + .into(), )); } let nexus_addr = match nexus_addr().await? { IpAddr::V4(addr) => addr.octets(), - IpAddr::V6(_) => bail!("not sure what to do about IPv6 here"), + IpAddr::V6(_) => bail!("IPv6 IP Pools are not yet supported"), }; // HACK: we're picking a range that doesn't conflict with either iliana's @@ -36,5 +40,31 @@ pub async fn get_system_ip_pool() -> Result<(Ipv4Addr, Ipv4Addr)> { let first = [nexus_addr[0], nexus_addr[1], nexus_addr[2], 50].into(); let last = [nexus_addr[0], nexus_addr[1], nexus_addr[2], 90].into(); - Ok((first, last)) + Ok((IpAddr::V4(first), IpAddr::V4(last))) +} + +/// Try to construct an IP range from a pair of addresses. +/// +/// An error is returned if the addresses aren't the same version, or first > +/// last. +pub fn try_create_ip_range(first: IpAddr, last: IpAddr) -> Result { + match (first, last) { + (IpAddr::V4(first), IpAddr::V4(last)) => { + anyhow::ensure!( + first <= last, + "IP range first address must be <= last" + ); + Ok(IpRange::V4(Ipv4Range { first, last })) + } + (IpAddr::V6(first), IpAddr::V6(last)) => { + anyhow::ensure!( + first <= last, + "IP range first address must be <= last" + ); + Ok(IpRange::V6(Ipv6Range { first, last })) + } + (_, _) => anyhow::bail!( + "Invalid IP addresses for IP range: {first} and {last}" + ), + } } diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 68de06ad24d..21e7dd2e72d 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -121,7 +121,7 @@ impl IpPool { impl From for views::IpPool { fn from(pool: IpPool) -> Self { - Self { identity: pool.identity() } + Self { identity: pool.identity(), ip_version: pool.ip_version.into() } } } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index dec5559368f..bb895d61090 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -258,6 +258,9 @@ pub async fn create_ip_pool( name: pool_name.parse().unwrap(), description: String::from("an ip pool"), }, + ip_version: ip_range + .map(|r| r.version()) + .unwrap_or_else(views::IpVersion::v4), }, ) .await; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 2dd82d72c14..e892802d473 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -22,6 +22,7 @@ use nexus_test_utils::resource_helpers::test_params; use nexus_types::external_api::params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::IpRange; +use nexus_types::external_api::shared::IpVersion; use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::views::SledProvisionPolicy; use omicron_common::api::external::AddressLotKind; @@ -929,6 +930,7 @@ pub static DEMO_IP_POOL_CREATE: LazyLock = name: DEMO_IP_POOL_NAME.clone(), description: String::from("an IP pool"), }, + ip_version: IpVersion::V4, }); pub static DEMO_IP_POOL_PROJ_URL: LazyLock = LazyLock::new(|| { format!( diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 8d55655ee71..fa6fa2839ec 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -50,6 +50,7 @@ use nexus_types::external_api::shared::SiloRole; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::IpPoolSiloLink; +use nexus_types::external_api::views::IpVersion; use nexus_types::external_api::views::Silo; use nexus_types::external_api::views::SiloIpPool; use nexus_types::identity::Resource; @@ -102,14 +103,16 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { // directly let params = IpPoolCreate { identity: IdentityMetadataCreateParams { - name: String::from(pool_name).parse().unwrap(), + name: pool_name.parse().unwrap(), description: String::from(description), }, + ip_version: IpVersion::V4, }; let created_pool: IpPool = object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); assert_eq!(created_pool.identity.description, description); + assert_eq!(created_pool.ip_version, IpVersion::V4); let list = get_ip_pools(client).await; assert_eq!(list.len(), 1, "Expected exactly 1 IP pool"); @@ -122,7 +125,13 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { let error = object_create_error( client, ip_pools_url, - ¶ms, + ¶ms::IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: pool_name.parse().unwrap(), + description: String::new(), + }, + ip_version: IpVersion::V4, + }, StatusCode::BAD_REQUEST, ) .await; @@ -445,8 +454,8 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let p0 = create_pool(client, "p0").await; - let p1 = create_pool(client, "p1").await; + let p0 = create_ipv4_pool(client, "p0").await; + let p1 = create_ipv4_pool(client, "p1").await; // there should be no associations let assocs_p0 = silos_for_pool(client, "p0").await; @@ -547,7 +556,7 @@ async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) { assert_eq!(silo_pools[1].is_default, true); // creating a third pool and trying to link it as default: true should fail - create_pool(client, "p2").await; + create_ipv4_pool(client, "p2").await; let url = "/v1/system/ip-pools/p2/silos"; let error = object_create_error( client, @@ -583,7 +592,7 @@ async fn test_ip_pool_silo_list_only_discoverable( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - create_pool(client, "p0").await; + create_ipv4_pool(client, "p0").await; // there should be no linked silos let silos_p0 = silos_for_pool(client, "p0").await; @@ -608,8 +617,8 @@ async fn test_ip_pool_silo_list_only_discoverable( async fn test_ip_pool_update_default(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - create_pool(client, "p0").await; - create_pool(client, "p1").await; + create_ipv4_pool(client, "p0").await; + create_ipv4_pool(client, "p1").await; // there should be no linked silos let silos_p0 = silos_for_pool(client, "p0").await; @@ -715,7 +724,7 @@ async fn test_ip_pool_pagination(cptestctx: &ControlPlaneTestContext) { for i in 1..=8 { let name = format!("other-pool-{}", i); pool_names.push(name.clone()); - create_pool(client, &name).await; + create_ipv4_pool(client, &name).await; } let first_five_url = format!("{}?limit=5", base_url); @@ -739,7 +748,7 @@ async fn test_ip_pool_silos_pagination(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; // one pool, and there should be no linked silos - create_pool(client, "p0").await; + create_ipv4_pool(client, "p0").await; let silos_p0 = silos_for_pool(client, "p0").await; assert_eq!(silos_p0.items.len(), 0); @@ -803,12 +812,21 @@ async fn pools_for_silo( objects_list_page_authz::(client, &url).await.items } -async fn create_pool(client: &ClientTestContext, name: &str) -> IpPool { +async fn create_ipv4_pool(client: &ClientTestContext, name: &str) -> IpPool { + create_pool(client, name, IpVersion::V4).await +} + +async fn create_pool( + client: &ClientTestContext, + name: &str, + ip_version: IpVersion, +) -> IpPool { let params = IpPoolCreate { identity: IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), description: "".to_string(), }, + ip_version, }; NexusRequest::objects_post(client, "/v1/system/ip-pools", ¶ms) .authn_as(AuthnMode::PrivilegedUser) @@ -828,7 +846,7 @@ async fn test_ipv4_ip_pool_utilization_total( ) { let client = &cptestctx.external_client; - let _pool = create_pool(client, "p0").await; + let _pool = create_ipv4_pool(client, "p0").await; assert_ip_pool_utilization(client, "p0", 0, 0.0).await; @@ -932,22 +950,16 @@ async fn test_ip_pool_range_overlapping_ranges_fails( // Create the pool, verify basic properties let params = IpPoolCreate { identity: IdentityMetadataCreateParams { - name: String::from(pool_name).parse().unwrap(), + name: pool_name.parse().unwrap(), description: String::from(description), }, - // silo: None, - // is_default: false, + ip_version: IpVersion::V4, }; let created_pool: IpPool = - NexusRequest::objects_post(client, ip_pools_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); assert_eq!(created_pool.identity.description, description); + assert_eq!(created_pool.ip_version, IpVersion::V4); // Test data for IPv4 ranges that should fail due to overlap let ipv4_range = TestRange { @@ -1097,20 +1109,16 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { // Create the pool, verify basic properties let params = IpPoolCreate { identity: IdentityMetadataCreateParams { - name: String::from(pool_name).parse().unwrap(), + name: pool_name.parse().unwrap(), description: String::from(description), }, + ip_version: IpVersion::V4, }; let created_pool: IpPool = - NexusRequest::objects_post(client, ip_pools_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); assert_eq!(created_pool.identity.description, description); + assert_eq!(created_pool.ip_version, IpVersion::V4); // Add some ranges, out of order. These will be paginated by their first // address, which sorts all IPv4 before IPv6, then within protocol versions @@ -1289,16 +1297,7 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( let ip_pool_rem_range_url = format!("{}/remove", ip_pool_ranges_url); // create pool - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: String::from(pool_name).parse().unwrap(), - description: String::from("right on cue"), - }, - }; - NexusRequest::objects_post(client, ip_pools_url, ¶ms) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; + let _ = create_ipv4_pool(client, pool_name).await; // associate pool with default silo, which is the privileged user's silo let params = IpPoolLinkSilo { @@ -1515,6 +1514,7 @@ async fn test_ip_pool_service(cptestctx: &ControlPlaneTestContext) { fn assert_pools_eq(first: &IpPool, second: &IpPool) { assert_eq!(first.identity, second.identity); + assert_eq!(first.ip_version, second.ip_version); } fn assert_ranges_eq(first: &IpPoolRange, second: &IpPoolRange) { diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index cee18c8a66c..1b5e1d1f365 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::{ AddressLotKind, AffinityPolicy, AllowedSourceIps, BfdMode, BgpPeer, ByteCount, FailureDomain, Hostname, IdentityMetadataCreateParams, IdentityMetadataUpdateParams, InstanceAutoRestartPolicy, InstanceCpuCount, - LinkFec, LinkSpeed, Name, NameOrId, Nullable, PaginationOrder, + IpVersion, LinkFec, LinkSpeed, Name, NameOrId, Nullable, PaginationOrder, RouteDestination, RouteTarget, UserId, }; use omicron_common::disk::DiskVariant; @@ -1002,6 +1002,11 @@ impl std::fmt::Debug for CertificateCreate { pub struct IpPoolCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, + /// The IP version of the pool. + /// + /// The default is IPv4. + #[serde(default = "IpVersion::v4")] + pub ip_version: IpVersion, } /// Parameters for updating an IP Pool diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 625e4fa7753..24d61eac90b 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -396,6 +396,8 @@ pub struct InternetGatewayIpAddress { pub struct IpPool { #[serde(flatten)] pub identity: IdentityMetadata, + /// The IP version for the pool. + pub ip_version: IpVersion, } /// The utilization of IP addresses in a pool. diff --git a/openapi/nexus.json b/openapi/nexus.json index 01b45952a29..45e6c9b52fd 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -21187,6 +21187,14 @@ "type": "string", "format": "uuid" }, + "ip_version": { + "description": "The IP version for the pool.", + "allOf": [ + { + "$ref": "#/components/schemas/IpVersion" + } + ] + }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -21209,6 +21217,7 @@ "required": [ "description", "id", + "ip_version", "name", "time_created", "time_modified" @@ -21221,6 +21230,15 @@ "description": { "type": "string" }, + "ip_version": { + "description": "The IP version of the pool.\n\nThe default is IPv4.", + "default": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/IpVersion" + } + ] + }, "name": { "$ref": "#/components/schemas/Name" } @@ -21428,6 +21446,14 @@ } ] }, + "IpVersion": { + "description": "The IP address version.", + "type": "string", + "enum": [ + "v4", + "v6" + ] + }, "Ipv4Net": { "example": "192.168.1.0/24", "title": "An IPv4 subnet", From 4da85ab3459ce619a6678b6c648a5c5bedb8ceac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karen=20C=C3=A1rcamo?= Date: Fri, 29 Aug 2025 23:24:01 -0700 Subject: [PATCH 24/38] [mgs-updates] RoT bootloader FirstPageErased error should be ignored (#8955) As per https://github.com/oxidecomputer/omicron/issues/8044#issuecomment-2828614002 , the `RotImageError::FirstPageErased` error for the RoT bootloader is not fatal, and should not be treated as such. This patch does two things: 1. Interprets the error response from the SP as an invalid caboose when retrieving caboose information 2. Carries on updating the RoT bootloader when when it finds this error during an image signature check. Closes https://github.com/oxidecomputer/omicron/issues/8045 --- nexus/mgs-updates/src/common_sp_update.rs | 1 + nexus/mgs-updates/src/rot_bootloader_updater.rs | 13 ++++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/nexus/mgs-updates/src/common_sp_update.rs b/nexus/mgs-updates/src/common_sp_update.rs index 06ab8eefa3c..00c756e710e 100644 --- a/nexus/mgs-updates/src/common_sp_update.rs +++ b/nexus/mgs-updates/src/common_sp_update.rs @@ -322,4 +322,5 @@ pub(crate) fn error_means_caboose_is_invalid( let message = format!("{error:?}"); message.contains("the image caboose does not contain") || message.contains("the image does not include a caboose") + || message.contains("failed to read data from the caboose") } diff --git a/nexus/mgs-updates/src/rot_bootloader_updater.rs b/nexus/mgs-updates/src/rot_bootloader_updater.rs index 6a87fc3a508..3b7063db646 100644 --- a/nexus/mgs-updates/src/rot_bootloader_updater.rs +++ b/nexus/mgs-updates/src/rot_bootloader_updater.rs @@ -200,9 +200,16 @@ impl SpComponentUpdateHelperImpl for ReconfiguratorRotBootloaderUpdater { // stage0_next, the device won't let us load this image onto stage0. // We return a fatal error. if let Some(e) = stage0next_error { - return Err(PostUpdateError::FatalError { - error: InlineErrorChain::new(&e).to_string(), - }); + // With some RoT bootloaders in manufacturing, it isn't strictly + // necessary to go through our update process and stage0next will + // still be erased. This raises an FirstPageErased error, but it + // doesn't mean that we should stop the update process. We + // ignore it. + if e != RotImageError::FirstPageErased { + return Err(PostUpdateError::FatalError { + error: InlineErrorChain::new(&e).to_string(), + }); + } } // This operation is very delicate. Here, we're overwriting the From 69c5945aa6417c52ecb4912589471450506756da Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Sat, 30 Aug 2025 07:43:42 -0700 Subject: [PATCH 25/38] (2/N) Add schema for nexus generations (#8944) Adds schema for nexus generations, leaves the value at "1". These schemas will be used more earnestly in https://github.com/oxidecomputer/omicron/pull/8936 Fixes https://github.com/oxidecomputer/omicron/issues/8853 --- dev-tools/omdb/tests/successes.out | 3 + .../output/cmds-add-sled-no-disks-stdout | 1 + .../tests/output/cmds-example-stdout | 7 +++ ...ds-expunge-newly-added-external-dns-stdout | 6 ++ ...ds-expunge-newly-added-internal-dns-stdout | 4 ++ .../output/cmds-host-phase-2-source-stdout | 6 ++ .../output/cmds-mupdate-update-flow-stdout | 12 ++++ .../output/cmds-noop-image-source-stdout | 2 + .../tests/output/cmds-set-mgs-updates-stdout | 11 ++++ .../cmds-set-remove-mupdate-override-stdout | 4 ++ .../tests/output/cmds-set-zone-images-stdout | 5 ++ .../tests/output/cmds-target-release-stdout | 58 +++++++++++++++++++ nexus/db-model/src/deployment.rs | 11 ++++ nexus/db-model/src/schema_versions.rs | 3 +- .../src/db/datastore/db_metadata.rs | 3 + .../db-queries/src/db/datastore/deployment.rs | 4 ++ .../deployment/external_networking.rs | 2 + nexus/db-queries/src/db/datastore/rack.rs | 12 ++++ nexus/db-schema/src/schema.rs | 3 + nexus/reconfigurator/execution/src/dns.rs | 2 + .../planning/src/blueprint_builder/builder.rs | 5 ++ .../example_builder_zone_counts_blueprint.txt | 1 + .../output/planner_basic_add_sled_2_3.txt | 1 + .../output/planner_basic_add_sled_3_5.txt | 1 + ...dataset_settings_modified_in_place_1_2.txt | 1 + .../planner_decommissions_sleds_1_2.txt | 1 + .../planner_decommissions_sleds_bp2.txt | 1 + .../planner_deploy_all_keeper_nodes_1_2.txt | 1 + .../planner_deploy_all_keeper_nodes_3_4.txt | 1 + .../planner_deploy_all_keeper_nodes_4_5.txt | 1 + .../planner_deploy_all_keeper_nodes_5_6.txt | 1 + ...lanner_expunge_clickhouse_clusters_3_4.txt | 1 + ...lanner_expunge_clickhouse_clusters_5_6.txt | 1 + ...ouse_zones_after_policy_is_changed_3_4.txt | 1 + .../output/planner_nonprovisionable_1_2.txt | 1 + .../output/planner_nonprovisionable_2_2a.txt | 4 ++ .../output/planner_nonprovisionable_bp2.txt | 1 + .../output/zone_image_source_change_1.txt | 1 + .../background/tasks/blueprint_execution.rs | 1 + .../app/background/tasks/blueprint_load.rs | 1 + nexus/test-utils/src/lib.rs | 2 + nexus/types/src/deployment.rs | 14 +++++ nexus/types/src/deployment/blueprint_diff.rs | 7 +++ .../types/src/deployment/blueprint_display.rs | 1 + nexus/types/src/deployment/zone_type.rs | 5 ++ openapi/nexus-internal.json | 27 +++++++++ schema/crdb/dbinit.sql | 16 ++++- schema/crdb/nexus-generation/up01.sql | 1 + schema/crdb/nexus-generation/up02.sql | 5 ++ schema/crdb/nexus-generation/up03.sql | 1 + schema/crdb/nexus-generation/up04.sql | 1 + schema/crdb/nexus-generation/up05.sql | 5 ++ sled-agent/src/rack_setup/plan/service.rs | 3 +- sled-agent/src/rack_setup/service.rs | 1 + sled-agent/src/sim/server.rs | 1 + 55 files changed, 272 insertions(+), 4 deletions(-) create mode 100644 schema/crdb/nexus-generation/up01.sql create mode 100644 schema/crdb/nexus-generation/up02.sql create mode 100644 schema/crdb/nexus-generation/up03.sql create mode 100644 schema/crdb/nexus-generation/up04.sql create mode 100644 schema/crdb/nexus-generation/up05.sql diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index f5f91119769..f0a22c50ad4 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -1517,6 +1517,7 @@ parent: internal DNS version::: 1 external DNS version::: 2 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1640,6 +1641,7 @@ parent: internal DNS version::: 1 external DNS version::: 2 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1665,6 +1667,7 @@ to: blueprint ............. internal DNS version::: 1 (unchanged) external DNS version::: 2 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout index 9591408f905..9ed40ac1396 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout @@ -274,6 +274,7 @@ parent: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout index 92232e4b5e6..f72ba26bf9c 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout @@ -399,6 +399,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -518,6 +519,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -633,6 +635,7 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -710,6 +713,7 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -787,6 +791,7 @@ to: blueprint 02697f74-b14a-4418-90f0-c28b2a3a6aa9 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1023,6 +1028,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1661,6 +1667,7 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout index cf22a460ed9..c42a0d87b5c 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout @@ -331,6 +331,7 @@ parent: 06c88262-f435-410e-ba98-101bed41ec27 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -463,6 +464,7 @@ to: blueprint 366b0b68-d80e-4bc1-abd3-dc69837847e0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -826,6 +828,7 @@ parent: 3f00b694-1b16-4aaa-8f78-e6b3a527b434 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -967,6 +970,7 @@ to: blueprint 9c998c1d-1a7b-440a-ae0c-40f781dea6e2 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1333,6 +1337,7 @@ parent: 366b0b68-d80e-4bc1-abd3-dc69837847e0 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1467,6 +1472,7 @@ to: blueprint 2ac8c740-444d-42ff-8d66-9812a7e51288 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout index d18a5821897..fc85f987f5f 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout @@ -329,6 +329,7 @@ parent: 184f10b3-61cb-41ef-9b93-3489b2bac559 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -461,6 +462,7 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -624,6 +626,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -775,6 +778,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout index 1dd547e695d..83e4a21a79c 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout @@ -150,6 +150,7 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -310,6 +311,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -469,6 +471,7 @@ parent: 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -620,6 +623,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -780,6 +784,7 @@ to: blueprint df06bb57-ad42-4431-9206-abff322896c7 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -939,6 +944,7 @@ parent: af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 39c1f30e8b9..74cedfee3f1 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -687,6 +687,7 @@ to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) * target release min gen: 1 -> 3 + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -798,6 +799,7 @@ to: blueprint 626487fa-7139-45ec-8416-902271fc730b internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 3 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1034,6 +1036,7 @@ to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) * target release min gen: 3 -> 4 + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1261,6 +1264,7 @@ parent: c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS version::: 1 external DNS version::: 1 target release min gen: 4 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1338,6 +1342,7 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 4 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1578,6 +1583,7 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 external DNS version::: 1 target release min gen: 4 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1660,6 +1666,7 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 4 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1873,6 +1880,7 @@ parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS version::: 1 external DNS version::: 1 target release min gen: 4 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 1 Pending MGS-managed updates (all baseboards): @@ -1906,6 +1914,7 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 4 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2177,6 +2186,7 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 4 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2338,6 +2348,7 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) * target release min gen: 4 -> 5 + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2458,6 +2469,7 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 5 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index 7c6ef00867e..35ab76aa8c8 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -352,6 +352,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -483,6 +484,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout index 06bc28648a5..c6dc1b11171 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout @@ -205,6 +205,7 @@ parent: 6ccc786b-17f1-4562-958f-5a7d9a5a15fd internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -416,6 +417,7 @@ parent: ad97e762-7bf1-45a6-a98f-60afb7e491c0 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 1 Pending MGS-managed updates (all baseboards): @@ -441,6 +443,7 @@ to: blueprint cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -478,6 +481,7 @@ to: blueprint ad97e762-7bf1-45a6-a98f-60afb7e491c0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -707,6 +711,7 @@ parent: cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 1 Pending MGS-managed updates (all baseboards): @@ -732,6 +737,7 @@ to: blueprint 5bf974f3-81f9-455b-b24e-3099f765664c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -770,6 +776,7 @@ to: blueprint cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1001,6 +1008,7 @@ parent: 5bf974f3-81f9-455b-b24e-3099f765664c internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 2 Pending MGS-managed updates (all baseboards): @@ -1027,6 +1035,7 @@ to: blueprint 1b837a27-3be1-4fcb-8499-a921c839e1d0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1255,6 +1264,7 @@ parent: 1b837a27-3be1-4fcb-8499-a921c839e1d0 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 1 Pending MGS-managed updates (all baseboards): @@ -1280,6 +1290,7 @@ to: blueprint 3682a71b-c6ca-4b7e-8f84-16df80c85960 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout index e599ae2bdf7..200385eb7d5 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout @@ -274,6 +274,7 @@ parent: df06bb57-ad42-4431-9206-abff322896c7 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -397,6 +398,7 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -644,6 +646,7 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 external DNS version::: 1 target release min gen: 2 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -663,6 +666,7 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) * target release min gen: 1 -> 2 + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout index 14619df2366..97bca366c13 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout @@ -107,6 +107,7 @@ parent: 1b013011-2062-4b48-b544-a32b23bce83a internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -225,6 +226,7 @@ parent: 9766ca20-38d4-4380-b005-e7c43c797e7c internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -329,6 +331,7 @@ to: blueprint f714e6ea-e85a-4d7d-93c2-a018744fe176 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -455,6 +458,7 @@ parent: bb128f06-a2e1-44c1-8874-4f789d0ff896 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -559,6 +563,7 @@ to: blueprint d9c572a1-a68c-4945-b1ec-5389bd588fe9 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 7938d183877..bdfc47b82b1 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -236,6 +236,7 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -296,6 +297,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -358,6 +360,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -429,6 +432,7 @@ to: blueprint df06bb57-ad42-4431-9206-abff322896c7 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -562,6 +566,7 @@ to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -622,6 +627,7 @@ to: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -683,6 +689,7 @@ to: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -743,6 +750,7 @@ to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -805,6 +813,7 @@ to: blueprint 626487fa-7139-45ec-8416-902271fc730b internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -878,6 +887,7 @@ to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -946,6 +956,7 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1018,6 +1029,7 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1086,6 +1098,7 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1157,6 +1170,7 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1290,6 +1304,7 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1362,6 +1377,7 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1436,6 +1452,7 @@ to: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1503,6 +1520,7 @@ to: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1574,6 +1592,7 @@ to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1647,6 +1666,7 @@ to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1717,6 +1737,7 @@ to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1785,6 +1806,7 @@ to: blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1913,6 +1935,7 @@ to: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2049,6 +2072,7 @@ to: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2180,6 +2204,7 @@ to: blueprint 459a45a5-616e-421f-873b-2fb08c36205c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2317,6 +2342,7 @@ to: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2449,6 +2475,7 @@ to: blueprint 6fad8fd4-e825-433f-b76d-495484e068ce internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2596,6 +2623,7 @@ to: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2743,6 +2771,7 @@ to: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2870,6 +2899,7 @@ to: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3020,6 +3050,7 @@ to: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3171,6 +3202,7 @@ to: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3309,6 +3341,7 @@ to: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3447,6 +3480,7 @@ to: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3577,6 +3611,7 @@ to: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3697,6 +3732,7 @@ to: blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3817,6 +3853,7 @@ to: blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3956,6 +3993,7 @@ to: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -4095,6 +4133,7 @@ to: blueprint 60b55d33-5fec-4277-9864-935197eaead7 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -4216,6 +4255,7 @@ to: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -4347,6 +4387,7 @@ to: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -4479,6 +4520,7 @@ to: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -4623,6 +4665,7 @@ to: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -4768,6 +4811,7 @@ to: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -4903,6 +4947,7 @@ to: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -5038,6 +5083,7 @@ to: blueprint a2c6496d-98fc-444d-aa36-99508aa72367 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -5158,6 +5204,7 @@ to: blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -5277,6 +5324,7 @@ to: blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -5404,6 +5452,7 @@ to: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -5531,6 +5580,7 @@ to: blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -5652,6 +5702,7 @@ to: blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -5790,6 +5841,7 @@ to: blueprint 59630e63-c953-4807-9e84-9e750a79f68e internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -5929,6 +5981,7 @@ to: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -6062,6 +6115,7 @@ to: blueprint 90650737-8142-47a6-9a48-a10efc487e57 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -6194,6 +6248,7 @@ to: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -6320,6 +6375,7 @@ to: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -6463,6 +6519,7 @@ to: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -6727,6 +6784,7 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index e3e7cd50ddf..5cd9ca9e500 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -81,6 +81,7 @@ pub struct Blueprint { pub creator: String, pub comment: String, pub target_release_minimum_generation: Generation, + pub nexus_generation: Generation, } impl From<&'_ nexus_types::deployment::Blueprint> for Blueprint { @@ -100,6 +101,7 @@ impl From<&'_ nexus_types::deployment::Blueprint> for Blueprint { target_release_minimum_generation: Generation( bp.target_release_minimum_generation, ), + nexus_generation: Generation(bp.nexus_generation), } } } @@ -113,6 +115,7 @@ impl From for nexus_types::deployment::BlueprintMetadata { external_dns_version: *value.external_dns_version, target_release_minimum_generation: *value .target_release_minimum_generation, + nexus_generation: *value.nexus_generation, cockroachdb_fingerprint: value.cockroachdb_fingerprint, cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::from_optional_string( @@ -524,6 +527,7 @@ pub struct BpOmicronZone { pub image_source: DbBpZoneImageSource, pub image_artifact_sha256: Option, + pub nexus_generation: Option, } impl BpOmicronZone { @@ -585,6 +589,7 @@ impl BpOmicronZone { snat_ip: None, snat_first_port: None, snat_last_port: None, + nexus_generation: None, }; match &blueprint_zone.zone_type { @@ -716,6 +721,7 @@ impl BpOmicronZone { nic, external_tls, external_dns_servers, + nexus_generation, }) => { // Set the common fields bp_omicron_zone @@ -733,6 +739,8 @@ impl BpOmicronZone { .map(IpNetwork::from) .collect(), ); + bp_omicron_zone.nexus_generation = + Some(Generation::from(*nexus_generation)); } BlueprintZoneType::Oximeter(blueprint_zone_type::Oximeter { address, @@ -938,6 +946,9 @@ impl BpOmicronZone { .into_iter() .map(|i| i.ip()) .collect(), + nexus_generation: *self.nexus_generation.ok_or_else( + || anyhow!("expected 'nexus_generation'"), + )?, }) } ZoneType::Oximeter => { diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index c471d07e50d..5b259c0d7e3 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(185, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(186, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(186, "nexus-generation"), KnownVersion::new(185, "populate-db-metadata-nexus"), KnownVersion::new(184, "store-silo-admin-group-name"), KnownVersion::new(183, "add-ip-version-to-pools"), diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index 43a60817848..bea3638fd59 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -984,6 +984,7 @@ mod test { db.terminate().await; logctx.cleanup_successful(); } + fn create_test_blueprint( nexus_zones: Vec<(OmicronZoneUuid, BlueprintZoneDisposition)>, ) -> Blueprint { @@ -1020,6 +1021,7 @@ mod test { slot: 0, transit_ips: Vec::new(), }, + nexus_generation: Generation::new(), }), image_source: BlueprintZoneImageSource::InstallDataset, }) @@ -1048,6 +1050,7 @@ mod test { internal_dns_version: Generation::new(), external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 0e46092de95..6ce19a93ff7 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -548,6 +548,7 @@ impl DataStore { internal_dns_version, external_dns_version, target_release_minimum_generation, + nexus_generation, cockroachdb_fingerprint, cockroachdb_setting_preserve_downgrade, time_created, @@ -574,6 +575,7 @@ impl DataStore { *blueprint.internal_dns_version, *blueprint.external_dns_version, *blueprint.target_release_minimum_generation, + *blueprint.nexus_generation, blueprint.cockroachdb_fingerprint, blueprint.cockroachdb_setting_preserve_downgrade, blueprint.time_created, @@ -1325,6 +1327,7 @@ impl DataStore { internal_dns_version, external_dns_version, target_release_minimum_generation, + nexus_generation, cockroachdb_fingerprint, cockroachdb_setting_preserve_downgrade, clickhouse_cluster_config, @@ -4271,6 +4274,7 @@ mod tests { }, external_tls: false, external_dns_servers: vec![], + nexus_generation: Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, diff --git a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs index e8cb951f85b..dd525fdbbc9 100644 --- a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs +++ b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs @@ -454,6 +454,7 @@ mod tests { use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; use omicron_common::address::NTP_OPTE_IPV4_SUBNET; use omicron_common::address::NUM_SOURCE_NAT_PORTS; + use omicron_common::api::external::Generation; use omicron_common::api::external::MacAddr; use omicron_common::api::external::Vni; use omicron_common::zpool_name::ZpoolName; @@ -643,6 +644,7 @@ mod tests { nic: self.nexus_nic.clone(), external_tls: false, external_dns_servers: Vec::new(), + nexus_generation: Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 31e0ec43284..8b090bd2c69 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -1107,6 +1107,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), @@ -1531,6 +1532,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -1601,6 +1603,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), @@ -1789,6 +1792,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -1822,6 +1826,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -1866,6 +1871,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), @@ -2072,6 +2078,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -2123,6 +2130,7 @@ mod test { creator: "test suite".to_string(), comment: "test blueprint".to_string(), report: PlanningReport::new(blueprint_id), + nexus_generation: *Generation::new(), }; let rack = datastore @@ -2296,6 +2304,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -2314,6 +2323,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), @@ -2436,6 +2446,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -2456,6 +2467,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 4f8bd54a7a2..23d4aa089e5 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -1970,6 +1970,8 @@ table! { cockroachdb_setting_preserve_downgrade -> Nullable, target_release_minimum_generation -> Int8, + + nexus_generation -> Int8, } } @@ -2070,6 +2072,7 @@ table! { filesystem_pool -> Uuid, image_source -> crate::enums::BpZoneImageSourceEnum, image_artifact_sha256 -> Nullable, + nexus_generation -> Nullable, } } diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 2ce03d66421..1e3cc26ea68 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -597,6 +597,7 @@ mod test { nic, external_tls, external_dns_servers, + nexus_generation: Generation::new(), }) } OmicronZoneType::Oximeter { address } => { @@ -720,6 +721,7 @@ mod test { internal_dns_version: initial_dns_generation, external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: Generation::new(), diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 0601c852fa7..7cf11ac45ea 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -515,6 +515,7 @@ pub struct BlueprintBuilder<'a> { sled_editors: BTreeMap, cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade, target_release_minimum_generation: Generation, + nexus_generation: Generation, report: Option, creator: String, @@ -582,6 +583,7 @@ impl<'a> BlueprintBuilder<'a> { internal_dns_version: Generation::new(), external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, @@ -663,6 +665,7 @@ impl<'a> BlueprintBuilder<'a> { pending_mgs_updates: parent_blueprint.pending_mgs_updates.clone(), target_release_minimum_generation: parent_blueprint .target_release_minimum_generation, + nexus_generation: parent_blueprint.nexus_generation, report: None, creator: creator.to_owned(), operations: Vec::new(), @@ -857,6 +860,7 @@ impl<'a> BlueprintBuilder<'a> { external_dns_version: self.input.external_dns_version(), target_release_minimum_generation: self .target_release_minimum_generation, + nexus_generation: self.nexus_generation, cockroachdb_fingerprint: self .input .cockroachdb_settings() @@ -1608,6 +1612,7 @@ impl<'a> BlueprintBuilder<'a> { nic, external_tls, external_dns_servers: external_dns_servers.clone(), + nexus_generation: Generation::new(), }); let filesystem_pool = self.sled_select_zpool(sled_id, zone_type.kind())?; diff --git a/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt b/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt index 15d2c2d6a77..6125703611f 100644 --- a/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt +++ b/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt @@ -531,6 +531,7 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt index 99e950f246e..98982124114 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt @@ -71,6 +71,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt index 41df1375187..b724ec830ab 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt @@ -101,6 +101,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt index 0e936766516..b619e3f7ddf 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt @@ -122,6 +122,7 @@ to: blueprint fe13be30-94c2-4fa6-aad5-ae3c5028f6bb internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt index 488e3f69d00..e79bf2daf74 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt @@ -387,6 +387,7 @@ to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index 93d346b3170..ea1d786354f 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -322,6 +322,7 @@ parent: 516e80a3-b362-4fac-bd3c-4559717120dd internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt index 63380e2c1eb..1be47958ab0 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt @@ -320,6 +320,7 @@ to: blueprint 31ef2071-2ec9-49d9-8827-fd83b17a0e3d internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt index b2d2dee5588..af0649e7506 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt @@ -9,6 +9,7 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt index ea64f823b0a..0dc39530072 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt @@ -223,6 +223,7 @@ to: blueprint 2886dab5-61a2-46b4-87af-bc7aeb44cccb internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt index 6b4c5f48e30..e33a4d4a9f0 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt @@ -9,6 +9,7 @@ to: blueprint cb39be9d-5476-44fa-9edf-9938376219ef internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt index 9394b253cc6..64f86ecfe65 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt @@ -408,6 +408,7 @@ to: blueprint 74f2e7fd-687e-4c9e-b5d8-e474a5bb8e7c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt index 744379716fc..f5a11fe0dbc 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt @@ -9,6 +9,7 @@ to: blueprint df68d4d4-5af4-4b56-95bb-1654a6957d4f internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt index 5e439554691..fd72c3cda5c 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt @@ -338,6 +338,7 @@ to: blueprint d895ef50-9978-454c-bdfb-b8dbe2c9a918 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt index 99ccd504aaf..13f6efd866f 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt @@ -373,6 +373,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt index 8bd822a364c..162e4c1ad69 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -349,6 +349,9 @@ mismatched zone type: after: Nexus( }, external_tls: false, external_dns_servers: [], + nexus_generation: Generation( + 1, + ), }, ) @@ -368,6 +371,7 @@ mismatched zone type: after: InternalNtp( internal DNS version::: 1 (unchanged) * external DNS version::: 1 -> 2 target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 77c19780bed..e26d517c3e6 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -510,6 +510,7 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 diff --git a/nexus/reconfigurator/planning/tests/output/zone_image_source_change_1.txt b/nexus/reconfigurator/planning/tests/output/zone_image_source_change_1.txt index 440e7e28e51..1f229c2ec10 100644 --- a/nexus/reconfigurator/planning/tests/output/zone_image_source_change_1.txt +++ b/nexus/reconfigurator/planning/tests/output/zone_image_source_change_1.txt @@ -122,6 +122,7 @@ to: blueprint 1481141d-a5cf-4103-8344-738967e0f110 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/src/app/background/tasks/blueprint_execution.rs b/nexus/src/app/background/tasks/blueprint_execution.rs index 2ac61471e7c..27315202719 100644 --- a/nexus/src/app/background/tasks/blueprint_execution.rs +++ b/nexus/src/app/background/tasks/blueprint_execution.rs @@ -278,6 +278,7 @@ mod test { internal_dns_version: dns_version, external_dns_version: dns_version, target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: Generation::new(), diff --git a/nexus/src/app/background/tasks/blueprint_load.rs b/nexus/src/app/background/tasks/blueprint_load.rs index d2d9c7c380e..7b7f546388d 100644 --- a/nexus/src/app/background/tasks/blueprint_load.rs +++ b/nexus/src/app/background/tasks/blueprint_load.rs @@ -225,6 +225,7 @@ mod test { internal_dns_version: Generation::new(), external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: Generation::new(), diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index d4655b3eede..1a5a2847cdd 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -882,6 +882,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { vni: Vni::SERVICES_VNI, transit_ips: vec![], }, + nexus_generation: Generation::new(), }), image_source: BlueprintZoneImageSource::InstallDataset, }); @@ -967,6 +968,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { internal_dns_version: dns_config.generation, external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 5928daf9363..2c66a5f8575 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -227,6 +227,13 @@ pub struct Blueprint { /// driving the system to the target release. pub target_release_minimum_generation: Generation, + /// The generation of the active group of Nexuses + /// + /// If a Nexus instance notices it has a nexus_generation less than + /// this value, it will start to quiesce in preparation for handing off + /// control to the newer generation (see: RFD 588). + pub nexus_generation: Generation, + /// CockroachDB state fingerprint when this blueprint was created // See `nexus/db-queries/src/db/datastore/cockroachdb_settings.rs` for more // on this. @@ -275,6 +282,7 @@ impl Blueprint { external_dns_version: self.external_dns_version, target_release_minimum_generation: self .target_release_minimum_generation, + nexus_generation: self.nexus_generation, cockroachdb_fingerprint: self.cockroachdb_fingerprint.clone(), cockroachdb_setting_preserve_downgrade: Some( self.cockroachdb_setting_preserve_downgrade, @@ -609,6 +617,7 @@ impl BlueprintDisplay<'_> { .target_release_minimum_generation .to_string(), ), + (NEXUS_GENERATION, self.blueprint.nexus_generation.to_string()), ], ) } @@ -651,6 +660,7 @@ impl fmt::Display for BlueprintDisplay<'_> { // These six fields are handled by `make_metadata_table()`, called // below. target_release_minimum_generation: _, + nexus_generation: _, internal_dns_version: _, external_dns_version: _, time_created: _, @@ -2073,6 +2083,10 @@ pub struct BlueprintMetadata { /// /// See [`Blueprint::target_release_minimum_generation`]. pub target_release_minimum_generation: Generation, + /// The Nexus generation number + /// + /// See [`Blueprint::nexus_generation`]. + pub nexus_generation: Generation, /// CockroachDB state fingerprint when this blueprint was created pub cockroachdb_fingerprint: String, /// Whether to set `cluster.preserve_downgrade_option` and what to set it to diff --git a/nexus/types/src/deployment/blueprint_diff.rs b/nexus/types/src/deployment/blueprint_diff.rs index a29cb57317f..6a1646ddd48 100644 --- a/nexus/types/src/deployment/blueprint_diff.rs +++ b/nexus/types/src/deployment/blueprint_diff.rs @@ -64,6 +64,7 @@ impl<'a> BlueprintDiffSummary<'a> { pending_mgs_updates, clickhouse_cluster_config, target_release_minimum_generation, + nexus_generation, // Metadata fields for which changes don't reflect semantic // changes from one blueprint to the next. id: _, @@ -112,6 +113,11 @@ impl<'a> BlueprintDiffSummary<'a> { return true; } + // Did the nexus generation change? + if nexus_generation.before != nexus_generation.after { + return true; + } + // All fields checked or ignored; if we get here, there are no // meaningful changes. false @@ -1834,6 +1840,7 @@ impl<'diff, 'b> BlueprintDiffDisplay<'diff, 'b> { target_release_minimum_generation, TARGET_RELEASE_MIN_GEN ), + diff_row!(nexus_generation, NEXUS_GENERATION), ], ), ] diff --git a/nexus/types/src/deployment/blueprint_display.rs b/nexus/types/src/deployment/blueprint_display.rs index e0dc0080f95..dec9ce3e699 100644 --- a/nexus/types/src/deployment/blueprint_display.rs +++ b/nexus/types/src/deployment/blueprint_display.rs @@ -44,6 +44,7 @@ pub mod constants { pub const EXTERNAL_DNS_VERSION: &str = "external DNS version"; // Keep this a bit short to not make the key column too wide. pub const TARGET_RELEASE_MIN_GEN: &str = "target release min gen"; + pub const NEXUS_GENERATION: &str = "nexus gen"; pub const COMMENT: &str = "comment"; pub const UNCHANGED_PARENS: &str = "(unchanged)"; diff --git a/nexus/types/src/deployment/zone_type.rs b/nexus/types/src/deployment/zone_type.rs index 31e26c3a994..79cb68fb98a 100644 --- a/nexus/types/src/deployment/zone_type.rs +++ b/nexus/types/src/deployment/zone_type.rs @@ -343,6 +343,7 @@ pub mod blueprint_zone_type { use crate::deployment::OmicronZoneExternalSnatIp; use daft::Diffable; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; + use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::NetworkInterface; use schemars::JsonSchema; use serde::Deserialize; @@ -566,6 +567,10 @@ pub mod blueprint_zone_type { pub external_tls: bool, /// External DNS servers Nexus can use to resolve external hosts. pub external_dns_servers: Vec, + /// Generation number for this Nexus zone. + /// This is used to coordinate handoff between old and new Nexus instances + /// during updates. See RFD 588. + pub nexus_generation: Generation, } #[derive( diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 3d41de3fe62..bc1cd3df567 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2568,6 +2568,14 @@ } ] }, + "nexus_generation": { + "description": "The generation of the active group of Nexuses\n\nIf a Nexus instance notices it has a nexus_generation less than this value, it will start to quiesce in preparation for handing off control to the newer generation (see: RFD 588).", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, "oximeter_read_mode": { "description": "Whether oximeter should read from a single node or a cluster", "allOf": [ @@ -2638,6 +2646,7 @@ "external_dns_version", "id", "internal_dns_version", + "nexus_generation", "oximeter_read_mode", "oximeter_read_version", "pending_mgs_updates", @@ -2862,6 +2871,14 @@ } ] }, + "nexus_generation": { + "description": "The Nexus generation number\n\nSee [`Blueprint::nexus_generation`].", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, "parent_blueprint_id": { "nullable": true, "description": "which blueprint this blueprint is based on", @@ -2892,6 +2909,7 @@ "external_dns_version", "id", "internal_dns_version", + "nexus_generation", "target_release_minimum_generation", "time_created" ] @@ -3517,6 +3535,14 @@ "description": "The address at which the internal nexus server is reachable.", "type": "string" }, + "nexus_generation": { + "description": "Generation number for this Nexus zone. This is used to coordinate handoff between old and new Nexus instances during updates. See RFD 588.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, "nic": { "description": "The service vNIC providing external connectivity using OPTE.", "allOf": [ @@ -3537,6 +3563,7 @@ "external_ip", "external_tls", "internal_address", + "nexus_generation", "nic", "type" ] diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index bad381cb5c8..552d3cf3d0e 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -4524,7 +4524,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.blueprint ( -- driving the system to the target release. -- -- This is set to 1 by default in application code. - target_release_minimum_generation INT8 NOT NULL + target_release_minimum_generation INT8 NOT NULL, + + -- The generation of the active group of Nexus instances + nexus_generation INT8 NOT NULL ); -- table describing both the current and historical target blueprints of the @@ -4734,6 +4737,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_zone ( image_source omicron.public.bp_zone_image_source NOT NULL, image_artifact_sha256 STRING(64), + -- Generation for Nexus zones + nexus_generation INT8, + PRIMARY KEY (blueprint_id, id), CONSTRAINT expunged_disposition_properties CHECK ( @@ -4751,6 +4757,12 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_zone ( OR (image_source != 'artifact' AND image_artifact_sha256 IS NULL) + ), + + CONSTRAINT nexus_generation_for_nexus_zones CHECK ( + (zone_type = 'nexus' AND nexus_generation IS NOT NULL) + OR + (zone_type != 'nexus' AND nexus_generation IS NULL) ) ); @@ -6590,7 +6602,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '185.0.0', NULL) + (TRUE, NOW(), NOW(), '186.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/nexus-generation/up01.sql b/schema/crdb/nexus-generation/up01.sql new file mode 100644 index 00000000000..42d87c2f6f7 --- /dev/null +++ b/schema/crdb/nexus-generation/up01.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.bp_omicron_zone ADD COLUMN IF NOT EXISTS nexus_generation INT8; diff --git a/schema/crdb/nexus-generation/up02.sql b/schema/crdb/nexus-generation/up02.sql new file mode 100644 index 00000000000..53429df8ebe --- /dev/null +++ b/schema/crdb/nexus-generation/up02.sql @@ -0,0 +1,5 @@ +SET LOCAL disallow_full_table_scans = off; + +UPDATE omicron.public.bp_omicron_zone +SET nexus_generation = 1 +WHERE zone_type = 'nexus'; diff --git a/schema/crdb/nexus-generation/up03.sql b/schema/crdb/nexus-generation/up03.sql new file mode 100644 index 00000000000..d7623a84c80 --- /dev/null +++ b/schema/crdb/nexus-generation/up03.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.blueprint ADD COLUMN IF NOT EXISTS nexus_generation INT8 NOT NULL DEFAULT 1; diff --git a/schema/crdb/nexus-generation/up04.sql b/schema/crdb/nexus-generation/up04.sql new file mode 100644 index 00000000000..072231d9b01 --- /dev/null +++ b/schema/crdb/nexus-generation/up04.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.blueprint ALTER COLUMN nexus_generation DROP DEFAULT; diff --git a/schema/crdb/nexus-generation/up05.sql b/schema/crdb/nexus-generation/up05.sql new file mode 100644 index 00000000000..6818b887a54 --- /dev/null +++ b/schema/crdb/nexus-generation/up05.sql @@ -0,0 +1,5 @@ +ALTER TABLE omicron.public.bp_omicron_zone ADD CONSTRAINT IF NOT EXISTS nexus_generation_for_nexus_zones CHECK ( + (zone_type = 'nexus' AND nexus_generation IS NOT NULL) + OR + (zone_type != 'nexus' AND nexus_generation IS NULL) +); diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index f7941e9724c..37ea4757d9f 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -26,7 +26,7 @@ use omicron_common::address::{ RSS_RESERVED_ADDRESSES, ReservedRackSubnet, SLED_PREFIX, get_sled_address, get_switch_zone_address, }; -use omicron_common::api::external::{MacAddr, Vni}; +use omicron_common::api::external::{Generation, MacAddr, Vni}; use omicron_common::api::internal::shared::{ NetworkInterface, NetworkInterfaceKind, SourceNatConfig, SourceNatConfigError, @@ -570,6 +570,7 @@ impl Plan { // development that it might not be. external_tls: !config.external_certificates.is_empty(), external_dns_servers: config.dns_servers.clone(), + nexus_generation: Generation::new(), }, ), filesystem_pool, diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index e2eaac58500..f18e205b6f7 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -1631,6 +1631,7 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( // (including creating the recovery silo). external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), // Nexus will fill in the CockroachDB values during initialization. cockroachdb_fingerprint: String::new(), cockroachdb_setting_preserve_downgrade: diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index 12561713a75..b41bddf85ca 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -453,6 +453,7 @@ pub async fn run_standalone_server( }, external_tls: false, external_dns_servers: vec![], + nexus_generation: Generation::new(), }), filesystem_pool: get_random_zpool(), image_source: BlueprintZoneImageSource::InstallDataset, From cd38f25aafc93560877f7ec4aa8f21ec81488bd6 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 31 Aug 2025 12:23:43 -0700 Subject: [PATCH 26/38] Update Rust crate async-trait to 0.1.89 (#8846) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 12857ad65d9..663fd0a10f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -488,9 +488,9 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 495e504b205..62bcd680559 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -365,7 +365,7 @@ approx = "0.5.1" assert_matches = "1.5.0" assert_cmd = "2.0.17" async-bb8-diesel = "0.2" -async-trait = "0.1.88" +async-trait = "0.1.89" atomicwrites = "0.4.4" authz-macros = { path = "nexus/authz-macros" } backoff = { version = "0.4.0", features = [ "tokio" ] } From 6dcdd5100fbd4ff06026f1354da73707e237add0 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 20 Aug 2025 14:47:03 -0700 Subject: [PATCH 27/38] update quiesce states to reflect RFD 588 --- nexus/reconfigurator/execution/src/lib.rs | 72 ++-- nexus/src/app/background/init.rs | 5 +- .../background/tasks/blueprint_execution.rs | 66 +++- .../app/background/tasks/blueprint_planner.rs | 13 +- nexus/src/app/mod.rs | 23 +- nexus/src/app/quiesce.rs | 177 ++++++--- nexus/types/src/deployment.rs | 22 ++ nexus/types/src/internal_api/views.rs | 78 ++-- nexus/types/src/quiesce.rs | 364 +++++++++++------- 9 files changed, 522 insertions(+), 298 deletions(-) diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index adfda2e5958..76305e0d754 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -646,50 +646,36 @@ fn register_reassign_sagas_step<'a>( .into(); }; - // Re-assign sagas, but only if we're allowed to. If Nexus is - // quiescing, we don't want to assign any new sagas to - // ourselves. - let result = saga_quiesce.reassign_if_possible(async || { - // For any expunged Nexus zones, re-assign in-progress sagas - // to some other Nexus. If this fails for some reason, it - // doesn't affect anything else. - let sec_id = nexus_db_model::SecId::from(nexus_id); - let reassigned = sagas::reassign_sagas_from_expunged( - opctx, datastore, blueprint, sec_id, - ) - .await - .context("failed to re-assign sagas"); - match reassigned { - Ok(needs_saga_recovery) => ( - StepSuccess::new(needs_saga_recovery).build(), - needs_saga_recovery, - ), - Err(error) => { - // It's possible that we failed after having - // re-assigned sagas in the database. - let maybe_reassigned = true; - ( - StepWarning::new(false, error.to_string()) - .build(), - maybe_reassigned, - ) + // Re-assign sagas. + Ok(saga_quiesce + .reassign_sagas(async || { + // For any expunged Nexus zones, re-assign in-progress + // sagas to some other Nexus. If this fails for some + // reason, it doesn't affect anything else. + let sec_id = nexus_db_model::SecId::from(nexus_id); + let reassigned = sagas::reassign_sagas_from_expunged( + opctx, datastore, blueprint, sec_id, + ) + .await + .context("failed to re-assign sagas"); + match reassigned { + Ok(needs_saga_recovery) => ( + StepSuccess::new(needs_saga_recovery).build(), + needs_saga_recovery, + ), + Err(error) => { + // It's possible that we failed after having + // re-assigned sagas in the database. + let maybe_reassigned = true; + ( + StepWarning::new(false, error.to_string()) + .build(), + maybe_reassigned, + ) + } } - } - }); - - match result.await { - // Re-assignment is allowed, and we did try. It may or may - // not have succeeded. Either way, that's reflected in - // `step_result`. - Ok(step_result) => Ok(step_result), - // Re-assignment is disallowed. Report this step skipped - // with an explanation of why. - Err(error) => StepSkipped::new( - false, - InlineErrorChain::new(&error).to_string(), - ) - .into(), - } + }) + .await) }, ) .register() diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 51b6d1d8658..afaf57f82a7 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -131,6 +131,7 @@ use super::tasks::vpc_routes; use super::tasks::webhook_deliverator; use crate::Nexus; use crate::app::oximeter::PRODUCER_LEASE_DURATION; +use crate::app::quiesce::NexusQuiesceHandle; use crate::app::saga::StartSaga; use nexus_background_task_interface::Activator; use nexus_background_task_interface::BackgroundTasks; @@ -437,7 +438,7 @@ impl BackgroundTasksInitializer { nexus_id, task_saga_recovery.clone(), args.mgs_updates_tx, - args.saga_recovery.quiesce.clone(), + args.nexus_quiesce, ); let rx_blueprint_exec = blueprint_executor.watcher(); driver.register(TaskDefinition { @@ -1028,6 +1029,8 @@ pub struct BackgroundTasksData { pub webhook_delivery_client: reqwest::Client, /// Channel for configuring pending MGS updates pub mgs_updates_tx: watch::Sender, + /// handle for controlling Nexus quiesce + pub nexus_quiesce: NexusQuiesceHandle, } /// Starts the three DNS-propagation-related background tasks for either diff --git a/nexus/src/app/background/tasks/blueprint_execution.rs b/nexus/src/app/background/tasks/blueprint_execution.rs index 27315202719..b930fc51079 100644 --- a/nexus/src/app/background/tasks/blueprint_execution.rs +++ b/nexus/src/app/background/tasks/blueprint_execution.rs @@ -4,7 +4,10 @@ //! Background task for realizing a plan blueprint -use crate::app::background::{Activator, BackgroundTask}; +use crate::app::{ + background::{Activator, BackgroundTask}, + quiesce::NexusQuiesceHandle, +}; use futures::FutureExt; use futures::future::BoxFuture; use internal_dns_resolver::Resolver; @@ -13,14 +16,12 @@ use nexus_db_queries::db::DataStore; use nexus_reconfigurator_execution::{ RealizeBlueprintOutput, RequiredRealizeArgs, }; -use nexus_types::{ - deployment::{ - Blueprint, BlueprintTarget, PendingMgsUpdates, execution::EventBuffer, - }, - quiesce::SagaQuiesceHandle, +use nexus_types::deployment::{ + Blueprint, BlueprintTarget, PendingMgsUpdates, execution::EventBuffer, }; use omicron_uuid_kinds::OmicronZoneUuid; use serde_json::json; +use slog_error_chain::InlineErrorChain; use std::sync::Arc; use tokio::sync::watch; use update_engine::NestedError; @@ -35,7 +36,7 @@ pub struct BlueprintExecutor { tx: watch::Sender, saga_recovery: Activator, mgs_update_tx: watch::Sender, - saga_quiesce: SagaQuiesceHandle, + nexus_quiesce: NexusQuiesceHandle, } impl BlueprintExecutor { @@ -48,7 +49,7 @@ impl BlueprintExecutor { nexus_id: OmicronZoneUuid, saga_recovery: Activator, mgs_update_tx: watch::Sender, - saga_quiesce: SagaQuiesceHandle, + nexus_quiesce: NexusQuiesceHandle, ) -> BlueprintExecutor { let (tx, _) = watch::channel(0); BlueprintExecutor { @@ -59,7 +60,7 @@ impl BlueprintExecutor { tx, saga_recovery, mgs_update_tx, - saga_quiesce, + nexus_quiesce, } } @@ -87,6 +88,47 @@ impl BlueprintExecutor { }; let (bp_target, blueprint) = &*update; + + // Regardless of anything else: propagate whatever this blueprint + // says about our quiescing state. + // + // During startup under normal operation, the blueprint will reflect + // that we're not quiescing. Propagating this will enable sagas to + // be created elsewhere in Nexus. + // + // At some point during an upgrade, we'll encounter a blueprint that + // reflects that we are quiescing. Propagating this will disable sagas + // from being created. + // + // In all other cases, this will have no effect. + // + // We do this now, before doing anything else, for two reasons: (1) + // during startup, we want to do this ASAP to minimize unnecessary saga + // creation failures (i.e., don't wait until we try to execute the + // blueprint before enabling sagas, since we already know if we're + // quiescing or not); and (2) because we want to do it even if blueprint + // execution is disabled. + match blueprint.nexus_quiescing(self.nexus_id) { + Ok(quiescing) => { + debug!( + &opctx.log, + "blueprint execution: quiesce check"; + "quiescing" => quiescing + ); + self.nexus_quiesce.set_quiescing(quiescing); + } + Err(error) => { + // This should be impossible. But it doesn't really affect + // anything else so there's no reason to stop execution. + error!( + &opctx.log, + "blueprint execution: failed to determine if this Nexus \ + is quiescing"; + InlineErrorChain::new(&*error) + ); + } + }; + if !bp_target.enabled { warn!(&opctx.log, "Blueprint execution: skipped"; @@ -119,7 +161,7 @@ impl BlueprintExecutor { blueprint, sender, mgs_updates: self.mgs_update_tx.clone(), - saga_quiesce: self.saga_quiesce.clone(), + saga_quiesce: self.nexus_quiesce.sagas(), } .as_nexus(self.nexus_id), ) @@ -181,6 +223,7 @@ impl BackgroundTask for BlueprintExecutor { mod test { use super::BlueprintExecutor; use crate::app::background::{Activator, BackgroundTask}; + use crate::app::quiesce::NexusQuiesceHandle; use httptest::Expectation; use httptest::matchers::{not, request}; use httptest::responders::status_code; @@ -207,7 +250,6 @@ mod test { PlanningReport, blueprint_zone_type, }; use nexus_types::external_api::views::SledState; - use nexus_types::quiesce::SagaQuiesceHandle; use omicron_common::api::external; use omicron_common::api::external::Generation; use omicron_common::zpool_name::ZpoolName; @@ -390,7 +432,7 @@ mod test { OmicronZoneUuid::new_v4(), Activator::new(), dummy_tx, - SagaQuiesceHandle::new(opctx.log.clone()), + NexusQuiesceHandle::new(&opctx.log, datastore.clone()), ); // Now we're ready. diff --git a/nexus/src/app/background/tasks/blueprint_planner.rs b/nexus/src/app/background/tasks/blueprint_planner.rs index d6519fdad3b..c33766e1efb 100644 --- a/nexus/src/app/background/tasks/blueprint_planner.rs +++ b/nexus/src/app/background/tasks/blueprint_planner.rs @@ -273,18 +273,15 @@ impl BackgroundTask for BlueprintPlanner { #[cfg(test)] mod test { use super::*; - use crate::app::background::Activator; use crate::app::background::tasks::blueprint_execution::BlueprintExecutor; use crate::app::background::tasks::blueprint_load::TargetBlueprintLoader; use crate::app::background::tasks::inventory_collection::InventoryCollector; + use crate::app::{background::Activator, quiesce::NexusQuiesceHandle}; use nexus_inventory::now_db_precision; use nexus_test_utils_macros::nexus_test; - use nexus_types::{ - deployment::{ - PendingMgsUpdates, PlannerChickenSwitches, - ReconfiguratorChickenSwitches, - }, - quiesce::SagaQuiesceHandle, + use nexus_types::deployment::{ + PendingMgsUpdates, PlannerChickenSwitches, + ReconfiguratorChickenSwitches, }; use omicron_uuid_kinds::OmicronZoneUuid; @@ -423,7 +420,7 @@ mod test { OmicronZoneUuid::new_v4(), Activator::new(), dummy_tx, - SagaQuiesceHandle::new(opctx.log.clone()), + NexusQuiesceHandle::new(&opctx.log, datastore.clone()), ); let value = executor.activate(&opctx).await; let value = value.as_object().expect("response is not a JSON object"); diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index cb3ca045cf9..c8abeac6e05 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -27,7 +27,6 @@ use nexus_db_queries::db; use nexus_mgs_updates::ArtifactCache; use nexus_mgs_updates::MgsUpdateDriver; use nexus_types::deployment::PendingMgsUpdates; -use nexus_types::quiesce::SagaQuiesceHandle; use omicron_common::address::DENDRITE_PORT; use omicron_common::address::MGD_PORT; use omicron_common::address::MGS_PORT; @@ -111,11 +110,11 @@ pub(crate) mod sagas; // TODO: When referring to API types, we should try to include // the prefix unless it is unambiguous. +use crate::app::quiesce::NexusQuiesceHandle; pub(crate) use nexus_db_model::MAX_NICS_PER_INSTANCE; pub(crate) use nexus_db_queries::db::queries::disk::MAX_DISKS_PER_INSTANCE; use nexus_mgs_updates::DEFAULT_RETRY_TIMEOUT; use nexus_types::internal_api::views::MgsUpdateDriverStatus; -use nexus_types::internal_api::views::QuiesceState; use sagas::demo::CompletingDemoSagas; // XXX: Might want to recast as max *floating* IPs, we have at most one @@ -280,11 +279,8 @@ pub struct Nexus { #[allow(dead_code)] repo_depot_resolver: Box, - /// whether Nexus is quiescing, and how far it's gotten - quiesce: watch::Sender, - - /// details about saga quiescing - saga_quiesce: SagaQuiesceHandle, + /// state of overall Nexus quiesce activity + quiesce: NexusQuiesceHandle, } impl Nexus { @@ -336,6 +332,8 @@ impl Nexus { sec_store, )); + let quiesce = NexusQuiesceHandle::new(&log, db_datastore.clone()); + // It's a bit of a red flag to use an unbounded channel. // // This particular channel is used to send a Uuid from the saga executor @@ -360,14 +358,11 @@ impl Nexus { // task. If someone changed the config, they'd have to remember to // update this here. This doesn't seem worth it. let (saga_create_tx, saga_recovery_rx) = mpsc::unbounded_channel(); - let saga_quiesce = SagaQuiesceHandle::new( - log.new(o!("component" => "SagaQuiesceHandle")), - ); let sagas = Arc::new(SagaExecutor::new( Arc::clone(&sec_client), log.new(o!("component" => "SagaExecutor")), saga_create_tx, - saga_quiesce.clone(), + quiesce.sagas(), )); // Create a channel for replicating repository artifacts. 16 is a @@ -465,8 +460,6 @@ impl Nexus { let mgs_update_status_rx = mgs_update_driver.status_rx(); let _mgs_driver_task = tokio::spawn(mgs_update_driver.run()); - let (quiesce, _) = watch::channel(QuiesceState::running()); - let nexus = Nexus { id: config.deployment.id, rack_id, @@ -520,7 +513,6 @@ impl Nexus { mgs_resolver, repo_depot_resolver, quiesce, - saga_quiesce, }; // TODO-cleanup all the extra Arcs here seems wrong @@ -570,6 +562,7 @@ impl Nexus { webhook_delivery_client: task_nexus .webhook_delivery_client .clone(), + nexus_quiesce: task_nexus.quiesce.clone(), saga_recovery: SagaRecoveryHelpers { recovery_opctx: saga_recovery_opctx, @@ -577,7 +570,7 @@ impl Nexus { sec_client: sec_client.clone(), registry: sagas::ACTION_REGISTRY.clone(), sagas_started_rx: saga_recovery_rx, - quiesce: task_nexus.saga_quiesce.clone(), + quiesce: task_nexus.quiesce.sagas(), }, tuf_artifact_replication_rx, mgs_updates_tx, diff --git a/nexus/src/app/quiesce.rs b/nexus/src/app/quiesce.rs index 6c8fe05decd..a4f6e18fdfc 100644 --- a/nexus/src/app/quiesce.rs +++ b/nexus/src/app/quiesce.rs @@ -14,6 +14,7 @@ use nexus_types::internal_api::views::QuiesceStatus; use nexus_types::quiesce::SagaQuiesceHandle; use omicron_common::api::external::LookupResult; use omicron_common::api::external::UpdateResult; +use slog::Logger; use std::sync::Arc; use std::time::Instant; use tokio::sync::watch; @@ -21,26 +22,7 @@ use tokio::sync::watch; impl super::Nexus { pub async fn quiesce_start(&self, opctx: &OpContext) -> UpdateResult<()> { opctx.authorize(authz::Action::Modify, &authz::QUIESCE_STATE).await?; - let started = self.quiesce.send_if_modified(|q| { - if let QuiesceState::Running = q { - let time_requested = Utc::now(); - let time_waiting_for_sagas = Instant::now(); - *q = QuiesceState::WaitingForSagas { - time_requested, - time_waiting_for_sagas, - }; - true - } else { - false - } - }); - if started { - tokio::spawn(do_quiesce( - self.quiesce.clone(), - self.saga_quiesce.clone(), - self.datastore().clone(), - )); - } + self.quiesce.set_quiescing(true); Ok(()) } @@ -49,56 +31,163 @@ impl super::Nexus { opctx: &OpContext, ) -> LookupResult { opctx.authorize(authz::Action::Read, &authz::QUIESCE_STATE).await?; - let state = self.quiesce.borrow().clone(); - let sagas_pending = self.saga_quiesce.sagas_pending(); + let state = self.quiesce.state(); + let sagas_pending = self.quiesce.sagas().sagas_pending(); let db_claims = self.datastore().claims_held(); Ok(QuiesceStatus { state, sagas_pending, db_claims }) } } -async fn do_quiesce( - quiesce: watch::Sender, - saga_quiesce: SagaQuiesceHandle, +/// Describes the configuration and state around quiescing Nexus +#[derive(Clone)] +pub struct NexusQuiesceHandle { + log: Logger, datastore: Arc, -) { - assert_matches!(*quiesce.borrow(), QuiesceState::WaitingForSagas { .. }); - saga_quiesce.quiesce(); - saga_quiesce.wait_for_quiesced().await; - quiesce.send_modify(|q| { - let QuiesceState::WaitingForSagas { + sagas: SagaQuiesceHandle, + state: watch::Sender, +} + +impl NexusQuiesceHandle { + pub fn new(log: &Logger, datastore: Arc) -> NexusQuiesceHandle { + let my_log = log.new(o!("component" => "NexusQuiesceHandle")); + let saga_quiesce_log = log.new(o!("component" => "SagaQuiesceHandle")); + let sagas = SagaQuiesceHandle::new(saga_quiesce_log); + let (state, _) = watch::channel(QuiesceState::Undetermined); + NexusQuiesceHandle { log: my_log, datastore, sagas, state } + } + + pub fn sagas(&self) -> SagaQuiesceHandle { + self.sagas.clone() + } + + pub fn state(&self) -> QuiesceState { + self.state.borrow().clone() + } + + pub fn set_quiescing(&self, quiescing: bool) { + let new_state = if quiescing { + let time_requested = Utc::now(); + let time_draining_sagas = Instant::now(); + QuiesceState::DrainingSagas { time_requested, time_draining_sagas } + } else { + QuiesceState::Running + }; + + let changed = self.state.send_if_modified(|q| { + match q { + QuiesceState::Undetermined => { + info!(&self.log, "initial state"; "state" => ?new_state); + *q = new_state; + true + } + QuiesceState::Running if quiescing => { + info!(&self.log, "quiesce starting"); + *q = new_state; + true + } + _ => { + // All other cases are either impossible or no-ops. + false + } + } + }); + + if changed && quiescing { + // Immediately quiesce sagas. + self.sagas.set_quiescing(quiescing); + // Asynchronously complete the rest of the quiesce process. + if quiescing { + tokio::spawn(do_quiesce(self.clone())); + } + } + } +} + +async fn do_quiesce(quiesce: NexusQuiesceHandle) { + let saga_quiesce = quiesce.sagas.clone(); + let datastore = quiesce.datastore.clone(); + + // NOTE: This sequence will change as we implement RFD 588. + // We will need to use the datastore to report our saga drain status and + // also to see when other Nexus instances have finished draining their + // sagas. For now, this implementation begins quiescing its database as + // soon as its sagas are locally drained. + assert_matches!( + *quiesce.state.borrow(), + QuiesceState::DrainingSagas { .. } + ); + + // TODO per RFD 588, this is where we will enter a loop, pausing either on + // timeout or when our local quiesce state changes. At each pause: if we + // need to update our db_metadata_nexus record, do so. Then load the + // current blueprint and check the records for all nexus instances. + // + // For now, we skip the cross-Nexus coordination and simply wait for our own + // Nexus to finish what it's doing. + saga_quiesce.wait_for_drained().await; + + quiesce.state.send_modify(|q| { + let QuiesceState::DrainingSagas { time_requested, - time_waiting_for_sagas, + time_draining_sagas, } = *q else { panic!("wrong state in do_quiesce(): {:?}", q); }; - *q = QuiesceState::WaitingForDb { + let time_draining_db = Instant::now(); + *q = QuiesceState::DrainingDb { time_requested, - time_waiting_for_sagas, - duration_waiting_for_sagas: time_waiting_for_sagas.elapsed(), - time_waiting_for_db: Instant::now(), + time_draining_sagas, + duration_draining_sagas: time_draining_db - time_draining_sagas, + time_draining_db, }; }); datastore.quiesce(); datastore.wait_for_quiesced().await; - quiesce.send_modify(|q| { - let QuiesceState::WaitingForDb { + quiesce.state.send_modify(|q| { + let QuiesceState::DrainingDb { time_requested, - time_waiting_for_sagas, - duration_waiting_for_sagas, - time_waiting_for_db, + time_draining_sagas, + duration_draining_sagas, + time_draining_db, + } = *q + else { + panic!("wrong state in do_quiesce(): {:?}", q); + }; + let time_recording_quiesce = Instant::now(); + *q = QuiesceState::RecordingQuiesce { + time_requested, + time_draining_sagas, + duration_draining_sagas, + duration_draining_db: time_recording_quiesce - time_draining_db, + time_recording_quiesce, + }; + }); + + // TODO per RFD 588, this is where we will enter a loop trying to update our + // database record for the last time. + + quiesce.state.send_modify(|q| { + let QuiesceState::RecordingQuiesce { + time_requested, + time_draining_sagas, + duration_draining_sagas, + duration_draining_db, + time_recording_quiesce, } = *q else { panic!("wrong state in do_quiesce(): {:?}", q); }; + let finished = Instant::now(); *q = QuiesceState::Quiesced { time_requested, - duration_waiting_for_sagas, - duration_waiting_for_db: finished - time_waiting_for_db, - duration_total: finished - time_waiting_for_sagas, time_quiesced: Utc::now(), + duration_draining_sagas, + duration_draining_db, + duration_recording_quiesce: finished - time_recording_quiesce, + duration_total: finished - time_draining_sagas, }; }); } diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 2c66a5f8575..35cb0692304 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -76,6 +76,8 @@ mod planning_report; mod zone_type; use crate::inventory::BaseboardId; +use anyhow::anyhow; +use anyhow::bail; pub use blueprint_diff::BlueprintDiffSummary; use blueprint_display::BpPendingMgsUpdates; pub use chicken_switches::PlannerChickenSwitches; @@ -383,6 +385,26 @@ impl Blueprint { pub fn display(&self) -> BlueprintDisplay<'_> { BlueprintDisplay { blueprint: self } } + + /// Returns whether the given Nexus instance should be quiescing or quiesced + /// in preparation for handoff to the next generation + pub fn nexus_quiescing( + &self, + nexus_id: OmicronZoneUuid, + ) -> Result { + let zone = self + .all_omicron_zones(|_z| true) + .find(|(_sled_id, zone_config)| zone_config.id == nexus_id) + .ok_or_else(|| { + anyhow!("zone {} does not exist in blueprint", nexus_id) + })? + .1; + let BlueprintZoneType::Nexus(zone_config) = &zone.zone_type else { + bail!("zone {} is not a Nexus zone", nexus_id); + }; + + Ok(zone_config.nexus_generation < self.nexus_generation) + } } /// Wrapper to display a table of a `BlueprintSledConfig`'s host phase 2 diff --git a/nexus/types/src/internal_api/views.rs b/nexus/types/src/internal_api/views.rs index 49d744fc715..ab9a5922df8 100644 --- a/nexus/types/src/internal_api/views.rs +++ b/nexus/types/src/internal_api/views.rs @@ -996,19 +996,28 @@ pub struct QuiesceStatus { /// At any given time, Nexus is always in one of these states: /// /// ```text +/// Undetermined (have not loaded persistent state; don't know yet) +/// | +/// | load persistent state and find we're not quiescing +/// v /// Running (normal operation) /// | /// | quiesce starts /// v -/// WaitingForSagas (no new sagas are allowed, but some are still running) +/// DrainingSagas (no new sagas are allowed, but some are still running) /// | /// | no more sagas running /// v -/// WaitingForDb (no sagas running; no new db connections may be -/// acquired by Nexus at-large, but some are still held) +/// DrainingDb (no sagas running; no new db connections may be +/// | acquired by Nexus at-large, but some are still held) /// | /// | no more database connections held /// v +/// RecordingQuiesce (everything is quiesced aside from one connection being +/// | used to record our final quiesced state) +/// | +/// | finish recording quiesce state in database +/// v /// Quiesced (no sagas running, no database connections in use) /// ``` /// @@ -1019,58 +1028,51 @@ pub struct QuiesceStatus { #[serde(rename_all = "snake_case")] #[serde(tag = "state", content = "quiesce_details")] pub enum QuiesceState { + /// We have not yet determined based on persistent state if we're supposed + /// to be quiesced or not + Undetermined, /// Normal operation Running, - /// New sagas disallowed, but some are still running. - WaitingForSagas { + /// New sagas disallowed, but some are still running on some Nexus instances + DrainingSagas { + time_requested: DateTime, + #[serde(skip)] + time_draining_sagas: Instant, + }, + /// No sagas running on any Nexus instances + /// + /// No new database connections may be claimed, but some database + /// connections are still held. + DrainingDb { time_requested: DateTime, #[serde(skip)] - time_waiting_for_sagas: Instant, + time_draining_sagas: Instant, + duration_draining_sagas: Duration, + #[serde(skip)] + time_draining_db: Instant, }, - /// No sagas running, no new database connections may be claimed, but some - /// database connections are still held. - WaitingForDb { + /// No database connections in use except to record the final "quiesced" + /// state + RecordingQuiesce { time_requested: DateTime, #[serde(skip)] - time_waiting_for_sagas: Instant, - duration_waiting_for_sagas: Duration, + time_draining_sagas: Instant, + duration_draining_sagas: Duration, + duration_draining_db: Duration, #[serde(skip)] - time_waiting_for_db: Instant, + time_recording_quiesce: Instant, }, /// Nexus has no sagas running and is not using the database Quiesced { time_requested: DateTime, time_quiesced: DateTime, - duration_waiting_for_sagas: Duration, - duration_waiting_for_db: Duration, + duration_draining_sagas: Duration, + duration_draining_db: Duration, + duration_recording_quiesce: Duration, duration_total: Duration, }, } -impl QuiesceState { - pub fn running() -> QuiesceState { - QuiesceState::Running - } - - pub fn quiescing(&self) -> bool { - match self { - QuiesceState::Running => false, - QuiesceState::WaitingForSagas { .. } - | QuiesceState::WaitingForDb { .. } - | QuiesceState::Quiesced { .. } => true, - } - } - - pub fn fully_quiesced(&self) -> bool { - match self { - QuiesceState::Running - | QuiesceState::WaitingForSagas { .. } - | QuiesceState::WaitingForDb { .. } => false, - QuiesceState::Quiesced { .. } => true, - } - } -} - /// Describes a pending saga (for debugging why quiesce is stuck) #[derive(Debug, Clone, Serialize, JsonSchema)] pub struct PendingSagaInfo { diff --git a/nexus/types/src/quiesce.rs b/nexus/types/src/quiesce.rs index 76df318d80d..378011be2cf 100644 --- a/nexus/types/src/quiesce.rs +++ b/nexus/types/src/quiesce.rs @@ -12,6 +12,7 @@ use iddqd::IdOrdMap; use omicron_common::api::external::Error; use omicron_common::api::external::Generation; use slog::Logger; +use slog::error; use slog::info; use slog::o; use slog_error_chain::InlineErrorChain; @@ -27,15 +28,20 @@ use tokio::sync::watch; enum SagasAllowed { /// New sagas may be started (normal condition) Allowed, - /// New sagas may not be started (happens during quiesce) - Disallowed, + /// New sagas may not be started because we're quiescing or quiesced + DisallowedQuiesce, + /// New sagas may not be started because we just started up and haven't + /// determined if we're quiescing yet + DisallowedUnknown, } #[derive(Debug, Error)] -#[error( - "saga creation and reassignment are disallowed (Nexus quiescing/quiesced)" -)] -pub struct NoSagasAllowedError; +pub enum NoSagasAllowedError { + #[error("saga creation is disallowed (quiescing/quiesced)")] + Quiescing, + #[error("saga creation is disallowed (unknown yet if we're quiescing)")] + Unknown, +} impl From for Error { fn from(value: NoSagasAllowedError) -> Self { Error::unavail(&value.to_string()) @@ -80,7 +86,7 @@ pub struct SagaQuiesceHandle { // mutate the data, using it to protect data and not code. // // (2) `watch::Receiver` provides a really handy `wait_for()` method` that - // we use in `wait_for_quiesced()`. Besides being convenient, this + // we use in `wait_for_drained()`. Besides being convenient, this // would be surprisingly hard for us to implement ourselves with a // `Mutex`. Traditionally, you'd use a combination Mutex/Condvar for // this. But we'd want to use a `std` Mutex (since tokio Mutex's @@ -140,7 +146,7 @@ struct SagaQuiesceInner { impl SagaQuiesceHandle { pub fn new(log: Logger) -> SagaQuiesceHandle { let (inner, _) = watch::channel(SagaQuiesceInner { - new_sagas_allowed: SagasAllowed::Allowed, + new_sagas_allowed: SagasAllowed::DisallowedUnknown, sagas_pending: IdOrdMap::new(), first_recovery_complete: false, reassignment_generation: Generation::new(), @@ -151,26 +157,65 @@ impl SagaQuiesceHandle { SagaQuiesceHandle { log, inner } } - /// Disallow new sagas from being started or re-assigned to this Nexus + /// Set the intended quiescing state /// - /// This is currently a one-way trip. Sagas cannot be un-quiesced. - pub fn quiesce(&self) { - // Log this before changing the config to make sure this message - // appears before messages from code paths that saw this change. - info!(&self.log, "starting saga quiesce"); - self.inner - .send_modify(|q| q.new_sagas_allowed = SagasAllowed::Disallowed); + /// Quiescing is currently a one-way trip. Once we start quiescing, we + /// cannot then re-enable sagas. + pub fn set_quiescing(&self, quiescing: bool) { + self.inner.send_if_modified(|q| { + let new_state = if quiescing { + SagasAllowed::DisallowedQuiesce + } else { + SagasAllowed::Allowed + }; + + match q.new_sagas_allowed { + SagasAllowed::DisallowedUnknown => { + info!( + &self.log, + "initial quiesce state"; + "initial_state" => ?new_state + ); + q.new_sagas_allowed = new_state; + true + } + SagasAllowed::Allowed if quiescing => { + info!(&self.log, "saga quiesce starting"); + q.new_sagas_allowed = SagasAllowed::DisallowedQuiesce; + true + } + SagasAllowed::DisallowedQuiesce if !quiescing => { + // This should be impossible. Report a problem. + error!( + &self.log, + "asked to stop quiescing after previously quiescing" + ); + false + } + _ => { + // There's no transition happening in these cases: + // - SagasAllowed::Allowed and we're not quiescing + // - SagasAllowed::DisallowedQuiesce and we're now quiescing + false + } + } + }); } - /// Returns whether sagas are fully quiesced - pub fn is_fully_quiesced(&self) -> bool { - self.inner.borrow().is_fully_quiesced() + /// Returns whether sagas are fully drained + /// + /// Note that this state can change later if new sagas get assigned to this + /// Nexus. + pub fn is_fully_drained(&self) -> bool { + self.inner.borrow().is_fully_drained() } - /// Wait for sagas to be quiesced - pub async fn wait_for_quiesced(&self) { - let _ = - self.inner.subscribe().wait_for(|q| q.is_fully_quiesced()).await; + /// Wait for sagas to become drained + /// + /// Note that new sagas can still be assigned to this Nexus, resulting in it + /// no longer being fully drained. + pub async fn wait_for_drained(&self) { + let _ = self.inner.subscribe().wait_for(|q| q.is_fully_drained()).await; } /// Returns information about running sagas (involves a clone) @@ -180,13 +225,10 @@ impl SagaQuiesceHandle { /// Record an operation that might assign sagas to this Nexus /// - /// If reassignment is currently allowed, `f` will be invoked to potentially - /// re-assign sagas. `f` returns `(T, bool)`, where `T` is whatever value - /// you want and is returned back from this function. The boolean indicates - /// whether any sagas may have been assigned to the current Nexus. - /// - /// If reassignment is currently disallowed (because Nexus is quiescing), - /// `f` is not invoked and an error describing this condition is returned. + /// `f` will be invoked to potentially re-assign sagas. `f` returns `(T, + /// bool)`, where `T` is whatever value you want and is returned back from + /// this function. The boolean indicates whether any sagas may have been + /// assigned to the current Nexus. /// /// Only one of these may be outstanding at a time. It should not be called /// concurrently. This is easy today because this is only invoked by a few @@ -204,27 +246,22 @@ impl SagaQuiesceHandle { // mis-use (e.g., by forgetting to call `reassignment_done()`). But we keep // the other two functions around because it's easier to write tests against // those. - pub async fn reassign_if_possible( - &self, - f: F, - ) -> Result + pub async fn reassign_sagas(&self, f: F) -> T where F: AsyncFnOnce() -> (T, bool), { - let in_progress = self.reassignment_start()?; + let in_progress = self.reassignment_start(); let (result, maybe_reassigned) = f().await; in_progress.reassignment_done(maybe_reassigned); - Ok(result) + result } /// Record that we've begun a re-assignment operation. /// /// Only one of these may be outstanding at a time. The caller must call /// `reassignment_done()` before starting another one of these. - fn reassignment_start( - &self, - ) -> Result { - let okay = self.inner.send_if_modified(|q| { + fn reassignment_start(&self) -> SagaReassignmentInProgress { + self.inner.send_modify(|q| { assert!( !q.reassignment_pending, "two calls to reassignment_start() without intervening call \ @@ -232,21 +269,11 @@ impl SagaQuiesceHandle { reassign_if_possible()?)" ); - if q.new_sagas_allowed != SagasAllowed::Allowed { - return false; - } - q.reassignment_pending = true; - true }); - if okay { - info!(&self.log, "allowing saga re-assignment pass"); - Ok(SagaReassignmentInProgress { q: self.clone() }) - } else { - info!(&self.log, "disallowing saga re-assignment pass"); - Err(NoSagasAllowedError) - } + info!(&self.log, "starting saga re-assignment pass"); + SagaReassignmentInProgress { q: self.clone() } } /// Record that we've finished an operation that might assign new sagas to @@ -262,10 +289,10 @@ impl SagaQuiesceHandle { q.reassignment_pending = false; // If we may have assigned new sagas to ourselves, bump the - // generation number. We won't quiesce until a recovery pass has - // finished that *started* with this generation number. So this - // ensures that we won't quiesce until any sagas that may have been - // assigned to us have been recovered. + // generation number. We won't report being drained until a + // recovery pass has finished that *started* with this generation + // number. So this ensures that we won't report being drained until + // any sagas that may have been assigned to us have been recovered. if maybe_reassigned { q.reassignment_generation = q.reassignment_generation.next(); } @@ -344,7 +371,7 @@ impl SagaQuiesceHandle { /// Report that a saga has started running /// - /// This fails if sagas are quiesced. + /// This fails if sagas are quiescing or quiesced. /// /// Callers must also call `saga_completion_future()` to make sure it's /// recorded when this saga finishes. @@ -353,9 +380,18 @@ impl SagaQuiesceHandle { saga_id: steno::SagaId, saga_name: &steno::SagaName, ) -> Result { + let mut error: Option = None; let okay = self.inner.send_if_modified(|q| { - if q.new_sagas_allowed != SagasAllowed::Allowed { - return false; + match q.new_sagas_allowed { + SagasAllowed::Allowed => (), + SagasAllowed::DisallowedQuiesce => { + error = Some(NoSagasAllowedError::Quiescing); + return false; + } + SagasAllowed::DisallowedUnknown => { + error = Some(NoSagasAllowedError::Unknown); + return false; + } } q.sagas_pending @@ -379,12 +415,15 @@ impl SagaQuiesceHandle { init_finished: false, }) } else { + let error = + error.expect("error is always set when disallowing sagas"); info!( &self.log, "disallowing saga creation"; - "saga_id" => saga_id.to_string() + "saga_id" => saga_id.to_string(), + InlineErrorChain::new(&error), ); - Err(NoSagasAllowedError) + Err(error) } } @@ -403,8 +442,8 @@ impl SagaQuiesceHandle { /// sagas that might possibly have finished already.) /// /// Unlike `saga_created()`, this cannot fail as a result of sagas being - /// quiesced. That's because a saga that *needs* to be recovered is a - /// blocker for quiesce, whether it's running or not. So we need to + /// quiescing/quiesced. That's because a saga that *needs* to be recovered + /// is a blocker for quiesce, whether it's running or not. So we need to /// actually run and finish it. We do still want to prevent ourselves from /// taking on sagas needing recovery -- that's why we fail /// `reassign_if_possible()` when saga creation is disallowed. @@ -438,10 +477,13 @@ impl SagaQuiesceHandle { } impl SagaQuiesceInner { - /// Returns whether sagas are fully and permanently quiesced - pub fn is_fully_quiesced(&self) -> bool { + /// Returns whether sagas are fully drained + /// + /// This condition is not permanent. New sagas can be re-assigned to this + /// Nexus. + pub fn is_fully_drained(&self) -> bool { // No new sagas may be created - self.new_sagas_allowed == SagasAllowed::Disallowed + self.new_sagas_allowed == SagasAllowed::DisallowedQuiesce // and there are none currently running && self.sagas_pending.is_empty() // and there are none from a previous lifetime that still need to be @@ -640,32 +682,30 @@ mod test { // Set up a new handle. Complete the first saga recovery immediately so // that that doesn't block quiescing. let qq = SagaQuiesceHandle::new(log.clone()); + qq.set_quiescing(false); let recovery = qq.recovery_start(); recovery.recovery_done(true); - // It's still not fully quiesced because we haven't asked it to quiesce + // It's still not fully drained because we haven't asked it to quiesce // yet. assert!(qq.sagas_pending().is_empty()); - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); - // Now start quiescing. It should immediately report itself as - // quiesced. There's nothing asynchronous in this path. (It would be - // okay if there were.) - qq.quiesce(); - assert!(qq.is_fully_quiesced()); + // Now start quiescing. It should immediately report itself as drained. + // There's nothing asynchronous in this path. (It would be okay if + // there were.) + qq.set_quiescing(true); + assert!(qq.is_fully_drained()); - // It's not allowed to create sagas or begin re-assignment after - // quiescing has started, let alone finished. + // It's not allowed to create sagas after quiescing has started, let + // alone finished. let _ = qq .saga_create(*SAGA_ID, &SAGA_NAME) .expect_err("cannot create saga after quiescing started"); - let _ = qq - .reassignment_start() - .expect_err("cannot start re-assignment after quiescing started"); - // Waiting for quiesce should complete immediately. - qq.wait_for_quiesced().await; - assert!(qq.is_fully_quiesced()); + // Waiting for drain should complete immediately. + qq.wait_for_drained().await; + assert!(qq.is_fully_drained()); logctx.cleanup_successful(); } @@ -680,6 +720,7 @@ mod test { // Set up a new handle. Complete the first saga recovery immediately so // that that doesn't block quiescing. let qq = SagaQuiesceHandle::new(log.clone()); + qq.set_quiescing(false); let recovery = qq.recovery_start(); recovery.recovery_done(true); @@ -690,15 +731,15 @@ mod test { assert!(!qq.sagas_pending().is_empty()); // Start quiescing. - qq.quiesce(); - assert!(!qq.is_fully_quiesced()); + qq.set_quiescing(true); + assert!(!qq.is_fully_drained()); // Dropping the returned handle is as good as completing the saga. drop(started); assert!(qq.sagas_pending().is_empty()); - assert!(qq.is_fully_quiesced()); - qq.wait_for_quiesced().await; - assert!(qq.is_fully_quiesced()); + assert!(qq.is_fully_drained()); + qq.wait_for_drained().await; + assert!(qq.is_fully_drained()); logctx.cleanup_successful(); } @@ -715,6 +756,7 @@ mod test { // Set up a new handle. Complete the first saga recovery immediately so // that that doesn't block quiescing. let qq = SagaQuiesceHandle::new(log.clone()); + qq.set_quiescing(false); let recovery = qq.recovery_start(); recovery.recovery_done(true); @@ -730,8 +772,8 @@ mod test { assert!(!qq.sagas_pending().is_empty()); // Quiesce should block on the saga finishing. - qq.quiesce(); - assert!(!qq.is_fully_quiesced()); + qq.set_quiescing(true); + assert!(!qq.is_fully_drained()); // "Finish" the saga. tx.send(saga_result()).unwrap(); @@ -740,15 +782,15 @@ mod test { // able to notice that the saga finished yet. It's not that important // to assert this but it emphasizes that it really is waiting for // something to happen. - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // The consumer's completion future ought to be unblocked now. let _ = consumer_completion.await; // Wait for quiescing to finish. This should be immediate. - qq.wait_for_quiesced().await; + qq.wait_for_drained().await; assert!(qq.sagas_pending().is_empty()); - assert!(qq.is_fully_quiesced()); + assert!(qq.is_fully_drained()); logctx.cleanup_successful(); } @@ -761,24 +803,25 @@ mod test { // Set up a new handle. let qq = SagaQuiesceHandle::new(log.clone()); + qq.set_quiescing(false); - // Quiesce should block on recovery having completed successfully once. - qq.quiesce(); - assert!(!qq.is_fully_quiesced()); + // Drain should block on recovery having completed successfully once. + qq.set_quiescing(true); + assert!(!qq.is_fully_drained()); // Act like the first recovery failed. Quiescing should still be // blocked. let recovery = qq.recovery_start(); recovery.recovery_done(false); - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // Finish a normal saga recovery. Quiescing should proceed. // This happens synchronously (though it doesn't have to). let recovery = qq.recovery_start(); recovery.recovery_done(true); - assert!(qq.is_fully_quiesced()); - qq.wait_for_quiesced().await; - assert!(qq.is_fully_quiesced()); + assert!(qq.is_fully_drained()); + qq.wait_for_drained().await; + assert!(qq.is_fully_drained()); logctx.cleanup_successful(); } @@ -792,25 +835,25 @@ mod test { // Set up a new handle. Complete the first saga recovery immediately so // that that doesn't block quiescing. let qq = SagaQuiesceHandle::new(log.clone()); + qq.set_quiescing(false); let recovery = qq.recovery_start(); recovery.recovery_done(true); // Begin saga re-assignment. - let reassignment = - qq.reassignment_start().expect("can re-assign when not quiescing"); + let reassignment = qq.reassignment_start(); // Begin quiescing. - qq.quiesce(); + qq.set_quiescing(true); // Quiescing is blocked. - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // When re-assignment finishes *without* having re-assigned anything, // then we're immediately all set. reassignment.reassignment_done(false); - assert!(qq.is_fully_quiesced()); - qq.wait_for_quiesced().await; - assert!(qq.is_fully_quiesced()); + assert!(qq.is_fully_drained()); + qq.wait_for_drained().await; + assert!(qq.is_fully_drained()); logctx.cleanup_successful(); } @@ -826,36 +869,36 @@ mod test { // Set up a new handle. Complete the first saga recovery immediately so // that that doesn't block quiescing. let qq = SagaQuiesceHandle::new(log.clone()); + qq.set_quiescing(false); let recovery = qq.recovery_start(); recovery.recovery_done(true); // Begin saga re-assignment. - let reassignment = - qq.reassignment_start().expect("can re-assign when not quiescing"); + let reassignment = qq.reassignment_start(); // Begin quiescing. - qq.quiesce(); + qq.set_quiescing(true); // Quiescing is blocked. - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // When re-assignment finishes and re-assigned sagas, we're still // blocked. reassignment.reassignment_done(true); - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // If the next recovery pass fails, we're still blocked. let recovery = qq.recovery_start(); recovery.recovery_done(false); - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // Once a recovery pass succeeds, we're good. let recovery = qq.recovery_start(); recovery.recovery_done(true); - assert!(qq.is_fully_quiesced()); + assert!(qq.is_fully_drained()); - qq.wait_for_quiesced().await; - assert!(qq.is_fully_quiesced()); + qq.wait_for_drained().await; + assert!(qq.is_fully_drained()); logctx.cleanup_successful(); } @@ -874,18 +917,18 @@ mod test { // Set up a new handle. Complete the first saga recovery immediately so // that that doesn't block quiescing. let qq = SagaQuiesceHandle::new(log.clone()); + qq.set_quiescing(false); let recovery = qq.recovery_start(); recovery.recovery_done(true); // Begin saga re-assignment. - let reassignment = - qq.reassignment_start().expect("can re-assign when not quiescing"); + let reassignment = qq.reassignment_start(); // Begin quiescing. - qq.quiesce(); + qq.set_quiescing(true); // Quiescing is blocked. - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // Start a recovery pass. let recovery = qq.recovery_start(); @@ -893,25 +936,25 @@ mod test { // When re-assignment finishes and re-assigned sagas, we're still // blocked. reassignment.reassignment_done(true); - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // Even if this recovery pass succeeds, we're still blocked, because it // started before re-assignment finished and so isn't guaranteed to have // seen all the re-assigned sagas. recovery.recovery_done(true); - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // If the next pass fails, we're still blocked. let recovery = qq.recovery_start(); recovery.recovery_done(false); - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // Finally, we have a successful pass that unblocks us. let recovery = qq.recovery_start(); recovery.recovery_done(true); - assert!(qq.is_fully_quiesced()); - qq.wait_for_quiesced().await; - assert!(qq.is_fully_quiesced()); + assert!(qq.is_fully_drained()); + qq.wait_for_drained().await; + assert!(qq.is_fully_drained()); logctx.cleanup_successful(); } @@ -930,23 +973,23 @@ mod test { // Set up a new handle. Complete the first saga recovery immediately so // that that doesn't block quiescing. let qq = SagaQuiesceHandle::new(log.clone()); + qq.set_quiescing(false); let recovery = qq.recovery_start(); recovery.recovery_done(true); // Begin saga re-assignment. - let reassignment = - qq.reassignment_start().expect("can re-assign when not quiescing"); + let reassignment = qq.reassignment_start(); // Begin quiescing. - qq.quiesce(); + qq.set_quiescing(true); // Quiescing is blocked. - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // When re-assignment finishes and re-assigned sagas, we're still // blocked because we haven't run recovery. reassignment.reassignment_done(true); - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // Start a recovery pass. Pretend like we found something. let recovery = qq.recovery_start(); @@ -958,7 +1001,7 @@ mod test { recovery.recovery_done(true); // We're still not quiesced because that saga is still running. - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); // Finish the recovered saga. That should unblock quiesce. tx.send(saga_result()).unwrap(); @@ -966,8 +1009,8 @@ mod test { // The consumer's completion future ought to be unblocked now. let _ = consumer_completion.await; - qq.wait_for_quiesced().await; - assert!(qq.is_fully_quiesced()); + qq.wait_for_drained().await; + assert!(qq.is_fully_drained()); logctx.cleanup_successful(); } @@ -983,6 +1026,7 @@ mod test { // Set up a new handle. let qq = SagaQuiesceHandle::new(log.clone()); + qq.set_quiescing(false); // Start a recovery pass. Pretend like we found something. let recovery = qq.recovery_start(); let pending = recovery.record_saga_recovery(*SAGA_ID, &SAGA_NAME); @@ -993,20 +1037,66 @@ mod test { recovery.recovery_done(true); // Begin quiescing. - qq.quiesce(); + qq.set_quiescing(true); // Quiescing is blocked. - assert!(!qq.is_fully_quiesced()); + assert!(!qq.is_fully_drained()); - // Finish the recovered saga. That should unblock quiesce. + // Finish the recovered saga. That should unblock drain. tx.send(saga_result()).unwrap(); - qq.wait_for_quiesced().await; - assert!(qq.is_fully_quiesced()); + qq.wait_for_drained().await; + assert!(qq.is_fully_drained()); // The consumer's completion future ought to be unblocked now. let _ = consumer_completion.await; logctx.cleanup_successful(); } + + /// Tests that sagas are disabled at the start + #[tokio::test] + async fn test_quiesce_sagas_disabled_on_startup() { + let logctx = test_setup_log("test_quiesce_block_on_recovered_sagas"); + let log = &logctx.log; + + let qq = SagaQuiesceHandle::new(log.clone()); + assert!(!qq.is_fully_drained()); + let _ = qq + .saga_create(*SAGA_ID, &SAGA_NAME) + .expect_err("cannot create saga in initial state"); + qq.recovery_start().recovery_done(true); + qq.set_quiescing(true); + assert!(qq.is_fully_drained()); + let _ = qq + .saga_create(*SAGA_ID, &SAGA_NAME) + .expect_err("cannot create saga after quiescing"); + + // It's allowed to start a new re-assignment pass. That prevents us + // from being drained. + let reassignment = qq.reassignment_start(); + assert!(!qq.is_fully_drained()); + reassignment.reassignment_done(false); + // We're fully drained as soon as this one is done, since we know we + // didn't assign any sagas. + assert!(qq.is_fully_drained()); + + // Try again. This time, we'll act like we did reassign sagas. + let reassignment = qq.reassignment_start(); + assert!(!qq.is_fully_drained()); + reassignment.reassignment_done(true); + assert!(!qq.is_fully_drained()); + // Do a failed recovery pass. We still won't be fully drained. + let recovery = qq.recovery_start(); + assert!(!qq.is_fully_drained()); + recovery.recovery_done(false); + assert!(!qq.is_fully_drained()); + // Do a successful recovery pass. We'll be drained again. + let recovery = qq.recovery_start(); + assert!(!qq.is_fully_drained()); + recovery.recovery_done(true); + assert!(qq.is_fully_drained()); + + logctx.cleanup_successful(); + } } From 330c72102541b0c367acb46c906dbe656eae8a92 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 20 Aug 2025 16:07:49 -0700 Subject: [PATCH 28/38] self-review + regenerate API spec --- dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs | 44 +++++++++-- .../background/tasks/blueprint_execution.rs | 2 +- nexus/src/app/quiesce.rs | 21 ++--- nexus/types/src/deployment.rs | 2 +- openapi/nexus-internal.json | 79 ++++++++++++++++--- 5 files changed, 117 insertions(+), 31 deletions(-) diff --git a/dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs b/dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs index b27c2c22fb1..76c0a229c3c 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs @@ -61,10 +61,13 @@ async fn quiesce_show( .context("fetching quiesce state")? .into_inner(); match quiesce.state { + QuiesceState::Undetermined => { + println!("has not yet determined if it is quiescing"); + } QuiesceState::Running => { println!("running normally (not quiesced, not quiescing)"); } - QuiesceState::WaitingForSagas { time_requested } => { + QuiesceState::DrainingSagas { time_requested } => { println!( "quiescing since {} ({} ago)", humantime::format_rfc3339_millis(time_requested.into()), @@ -72,9 +75,9 @@ async fn quiesce_show( ); println!("details: waiting for running sagas to finish"); } - QuiesceState::WaitingForDb { + QuiesceState::DrainingDb { time_requested, - duration_waiting_for_sagas, + duration_draining_sagas, .. } => { println!( @@ -87,13 +90,34 @@ async fn quiesce_show( ); println!( " previously: waiting for sagas took {}", - format_duration_ms(duration_waiting_for_sagas.into()), + format_duration_ms(duration_draining_sagas.into()), + ); + } + QuiesceState::RecordingQuiesce { + time_requested, + duration_draining_sagas, + duration_draining_db, + .. + } => { + println!( + "quiescing since {} ({} ago)", + humantime::format_rfc3339_millis(time_requested.into()), + format_time_delta(now - time_requested), + ); + println!( + " waiting for sagas took {}", + format_duration_ms(duration_draining_sagas.into()), + ); + println!( + " waiting for db quiesce took {}", + format_duration_ms(duration_draining_db.into()), ); } QuiesceState::Quiesced { time_quiesced, - duration_waiting_for_sagas, - duration_waiting_for_db, + duration_draining_sagas, + duration_draining_db, + duration_recording_quiesce, duration_total, .. } => { @@ -104,11 +128,15 @@ async fn quiesce_show( ); println!( " waiting for sagas took {}", - format_duration_ms(duration_waiting_for_sagas.into()), + format_duration_ms(duration_draining_sagas.into()), ); println!( " waiting for db quiesce took {}", - format_duration_ms(duration_waiting_for_db.into()), + format_duration_ms(duration_draining_db.into()), + ); + println!( + " recording quiesce took {}", + format_duration_ms(duration_recording_quiesce.into()), ); println!( " total quiesce time: {}", diff --git a/nexus/src/app/background/tasks/blueprint_execution.rs b/nexus/src/app/background/tasks/blueprint_execution.rs index b930fc51079..443ec5eec18 100644 --- a/nexus/src/app/background/tasks/blueprint_execution.rs +++ b/nexus/src/app/background/tasks/blueprint_execution.rs @@ -108,7 +108,7 @@ impl BlueprintExecutor { // blueprint before enabling sagas, since we already know if we're // quiescing or not); and (2) because we want to do it even if blueprint // execution is disabled. - match blueprint.nexus_quiescing(self.nexus_id) { + match blueprint.is_nexus_quiescing(self.nexus_id) { Ok(quiescing) => { debug!( &opctx.log, diff --git a/nexus/src/app/quiesce.rs b/nexus/src/app/quiesce.rs index a4f6e18fdfc..17b28e7fc9b 100644 --- a/nexus/src/app/quiesce.rs +++ b/nexus/src/app/quiesce.rs @@ -250,24 +250,27 @@ mod test { let QuiesceState::Quiesced { time_requested, time_quiesced, - duration_waiting_for_sagas, - duration_waiting_for_db, + duration_draining_sagas, + duration_draining_db, + duration_recording_quiesce, duration_total, } = status.state else { panic!("not quiesced"); }; let duration_total = Duration::from(duration_total); - let duration_waiting_for_sagas = - Duration::from(duration_waiting_for_sagas); - let duration_waiting_for_db = Duration::from(duration_waiting_for_db); + let duration_draining_sagas = Duration::from(duration_draining_sagas); + let duration_draining_db = Duration::from(duration_draining_db); + let duration_recording_quiesce = + Duration::from(duration_recording_quiesce); assert!(time_requested >= before); assert!(time_requested <= after); assert!(time_quiesced >= before); assert!(time_quiesced <= after); assert!(time_quiesced >= time_requested); - assert!(duration_total >= duration_waiting_for_sagas); - assert!(duration_total >= duration_waiting_for_db); + assert!(duration_total >= duration_draining_sagas); + assert!(duration_total >= duration_draining_db); + assert!(duration_total >= duration_recording_quiesce); assert!(duration_total <= (after - before).to_std().unwrap()); assert!(status.sagas_pending.is_empty()); assert!(status.db_claims.is_empty()); @@ -341,7 +344,7 @@ mod test { debug!(log, "found quiesce status"; "status" => ?quiesce_status); assert_matches!( quiesce_status.state, - QuiesceState::WaitingForSagas { .. } + QuiesceState::DrainingSagas { .. } ); assert!(quiesce_status.sagas_pending.contains_key(&demo_saga.saga_id)); // We should see at least one held database claim from the one we took @@ -404,7 +407,7 @@ mod test { .map_err(|e| CondCheckError::Failed(e))? .into_inner(); debug!(log, "found quiesce state"; "state" => ?rv); - if !matches!(rv.state, QuiesceState::WaitingForDb { .. }) { + if !matches!(rv.state, QuiesceState::DrainingDb { .. }) { return Err(CondCheckError::::NotYet); } assert!(rv.sagas_pending.is_empty()); diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 35cb0692304..7d9c40b645a 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -388,7 +388,7 @@ impl Blueprint { /// Returns whether the given Nexus instance should be quiescing or quiesced /// in preparation for handoff to the next generation - pub fn nexus_quiescing( + pub fn is_nexus_quiescing( &self, nexus_id: OmicronZoneUuid, ) -> Result { diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index bc1cd3df567..caac2042246 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -7576,8 +7576,23 @@ ] }, "QuiesceState": { - "description": "See [`QuiesceStatus`] for more on Nexus quiescing.\n\nAt any given time, Nexus is always in one of these states:\n\n```text Running (normal operation) | | quiesce starts v WaitingForSagas (no new sagas are allowed, but some are still running) | | no more sagas running v WaitingForDb (no sagas running; no new db connections may be acquired by Nexus at-large, but some are still held) | | no more database connections held v Quiesced (no sagas running, no database connections in use) ```\n\nQuiescing is (currently) a one-way trip: once a Nexus process starts quiescing, it will never go back to normal operation. It will never go back to an earlier stage, either.", + "description": "See [`QuiesceStatus`] for more on Nexus quiescing.\n\nAt any given time, Nexus is always in one of these states:\n\n```text Undetermined (have not loaded persistent state; don't know yet) | | load persistent state and find we're not quiescing v Running (normal operation) | | quiesce starts v DrainingSagas (no new sagas are allowed, but some are still running) | | no more sagas running v DrainingDb (no sagas running; no new db connections may be | acquired by Nexus at-large, but some are still held) | | no more database connections held v RecordingQuiesce (everything is quiesced aside from one connection being | used to record our final quiesced state) | | finish recording quiesce state in database v Quiesced (no sagas running, no database connections in use) ```\n\nQuiescing is (currently) a one-way trip: once a Nexus process starts quiescing, it will never go back to normal operation. It will never go back to an earlier stage, either.", "oneOf": [ + { + "description": "We have not yet determined based on persistent state if we're supposed to be quiesced or not", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "undetermined" + ] + } + }, + "required": [ + "state" + ] + }, { "description": "Normal operation", "type": "object", @@ -7594,7 +7609,7 @@ ] }, { - "description": "New sagas disallowed, but some are still running.", + "description": "New sagas disallowed, but some are still running on some Nexus instances", "type": "object", "properties": { "quiesce_details": { @@ -7612,7 +7627,7 @@ "state": { "type": "string", "enum": [ - "waiting_for_sagas" + "draining_sagas" ] } }, @@ -7622,13 +7637,13 @@ ] }, { - "description": "No sagas running, no new database connections may be claimed, but some database connections are still held.", + "description": "No sagas running on any Nexus instances\n\nNo new database connections may be claimed, but some database connections are still held.", "type": "object", "properties": { "quiesce_details": { "type": "object", "properties": { - "duration_waiting_for_sagas": { + "duration_draining_sagas": { "$ref": "#/components/schemas/Duration" }, "time_requested": { @@ -7637,14 +7652,50 @@ } }, "required": [ - "duration_waiting_for_sagas", + "duration_draining_sagas", "time_requested" ] }, "state": { "type": "string", "enum": [ - "waiting_for_db" + "draining_db" + ] + } + }, + "required": [ + "quiesce_details", + "state" + ] + }, + { + "description": "No database connections in use except to record the final \"quiesced\" state", + "type": "object", + "properties": { + "quiesce_details": { + "type": "object", + "properties": { + "duration_draining_db": { + "$ref": "#/components/schemas/Duration" + }, + "duration_draining_sagas": { + "$ref": "#/components/schemas/Duration" + }, + "time_requested": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "duration_draining_db", + "duration_draining_sagas", + "time_requested" + ] + }, + "state": { + "type": "string", + "enum": [ + "recording_quiesce" ] } }, @@ -7660,13 +7711,16 @@ "quiesce_details": { "type": "object", "properties": { - "duration_total": { + "duration_draining_db": { "$ref": "#/components/schemas/Duration" }, - "duration_waiting_for_db": { + "duration_draining_sagas": { "$ref": "#/components/schemas/Duration" }, - "duration_waiting_for_sagas": { + "duration_recording_quiesce": { + "$ref": "#/components/schemas/Duration" + }, + "duration_total": { "$ref": "#/components/schemas/Duration" }, "time_quiesced": { @@ -7679,9 +7733,10 @@ } }, "required": [ + "duration_draining_db", + "duration_draining_sagas", + "duration_recording_quiesce", "duration_total", - "duration_waiting_for_db", - "duration_waiting_for_sagas", "time_quiesced", "time_requested" ] From b273427ca3152785fc81d72e263ea6dd204a1003 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 20 Aug 2025 16:29:50 -0700 Subject: [PATCH 29/38] tests need to wait for sagas to be enabled --- nexus/src/app/mod.rs | 8 ++++++++ nexus/src/app/quiesce.rs | 10 +++++----- nexus/src/lib.rs | 19 +++++++++++++++++++ nexus/types/src/quiesce.rs | 12 ++++++++++++ 4 files changed, 44 insertions(+), 5 deletions(-) diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index c8abeac6e05..c9c9e151614 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -621,6 +621,14 @@ impl Nexus { } } + // Waits for Nexus to determine whether sagas are supposed to be quiesced + // + // This is used by the test suite because most tests assume that sagas are + // operational as soon as they start. + pub(crate) async fn wait_for_saga_determination(&self) { + self.quiesce.sagas().wait_for_determination().await; + } + pub(crate) async fn external_tls_config( &self, tls_enabled: bool, diff --git a/nexus/src/app/quiesce.rs b/nexus/src/app/quiesce.rs index 17b28e7fc9b..cd5e3242ce5 100644 --- a/nexus/src/app/quiesce.rs +++ b/nexus/src/app/quiesce.rs @@ -92,13 +92,13 @@ impl NexusQuiesceHandle { } }); + // Immediately (synchronously) update the saga quiesce status. It's + // okay to do this even if there wasn't a change. + self.sagas.set_quiescing(quiescing); + if changed && quiescing { - // Immediately quiesce sagas. - self.sagas.set_quiescing(quiescing); // Asynchronously complete the rest of the quiesce process. - if quiescing { - tokio::spawn(do_quiesce(self.clone())); - } + tokio::spawn(do_quiesce(self.clone())); } } } diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index fc32a4824f5..49010a87bfb 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -138,6 +138,15 @@ impl Server { // the external server we're about to start. apictx.context.nexus.await_ip_allowlist_plumbing().await; + // Wait until Nexus has determined if sagas are supposed to be quiesced. + // This is not strictly necessary. The goal here is to prevent 503 + // errors to clients that reach this Nexus while it's starting up and + // before it's figured out that it doesn't need to quiesce. The risk of + // doing this is that Nexus gets stuck here, but that should only happen + // if it's unable to load the current blueprint, in which case + // something's pretty wrong and it's likely pretty stuck anyway. + apictx.context.nexus.wait_for_saga_determination().await; + // Launch the external server. let tls_config = apictx .context @@ -332,6 +341,16 @@ impl nexus_test_interface::NexusServer for Server { .await .expect("Could not initialize rack"); + // Now that we have a blueprint, determination of whether sagas are + // quiesced can complete. Wait for that so that tests can assume they + // can immediately kick off sagas. + internal_server + .apictx + .context + .nexus + .wait_for_saga_determination() + .await; + // Start the Nexus external API. Server::start(internal_server).await.unwrap() } diff --git a/nexus/types/src/quiesce.rs b/nexus/types/src/quiesce.rs index 378011be2cf..072a4ca3b1d 100644 --- a/nexus/types/src/quiesce.rs +++ b/nexus/types/src/quiesce.rs @@ -218,6 +218,18 @@ impl SagaQuiesceHandle { let _ = self.inner.subscribe().wait_for(|q| q.is_fully_drained()).await; } + /// Wait for the initial determination to be made about whether sagas are + /// allowed or not. + pub async fn wait_for_determination(&self) { + let _ = self + .inner + .subscribe() + .wait_for(|q| { + q.new_sagas_allowed != SagasAllowed::DisallowedUnknown + }) + .await; + } + /// Returns information about running sagas (involves a clone) pub fn sagas_pending(&self) -> IdOrdMap { self.inner.borrow().sagas_pending.clone() From 845d371e2c09ab9ab12d549e32d76a6fddcae78b Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 21 Aug 2025 11:47:43 -0700 Subject: [PATCH 30/38] need to activate blueprint loader after inserting initial blueprint --- nexus/src/app/rack.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 0c2e25bd0c6..2c5b21c0617 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -740,7 +740,8 @@ impl super::Nexus { // We've potentially updated the list of DNS servers and the DNS // configuration for both internal and external DNS, plus the Silo - // certificates. Activate the relevant background tasks. + // certificates and target blueprint. Activate the relevant background + // tasks. for task in &[ &self.background_tasks.task_internal_dns_config, &self.background_tasks.task_internal_dns_servers, @@ -748,6 +749,7 @@ impl super::Nexus { &self.background_tasks.task_external_dns_servers, &self.background_tasks.task_external_endpoints, &self.background_tasks.task_inventory_collection, + &self.background_tasks.task_blueprint_loader, ] { self.background_tasks.activate(task); } From 78ee24ff0411cb639c8784cabbdfc063e52a0047 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Tue, 2 Sep 2025 08:50:30 -0700 Subject: [PATCH 31/38] add the "second" Nexus to the test suite blueprint; fix omdb tests --- common/src/address.rs | 26 ++- dev-tools/omdb/src/bin/omdb/db.rs | 12 +- dev-tools/omdb/tests/successes.out | 224 +++++++++++++++++------- dev-tools/omdb/tests/test_all_output.rs | 42 ++--- nexus/test-utils/src/lib.rs | 99 ++++++++--- 5 files changed, 288 insertions(+), 115 deletions(-) diff --git a/common/src/address.rs b/common/src/address.rs index 9e8dd798293..08fc5cb9c0d 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -396,7 +396,9 @@ impl std::fmt::Display for IpVersion { /// /// The first address in the range is guaranteed to be no greater than the last /// address. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[derive( + Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, Ord, PartialOrd, +)] #[serde(untagged)] pub enum IpRange { V4(Ipv4Range), @@ -548,7 +550,16 @@ impl From for IpRange { /// /// The first address must be less than or equal to the last address. #[derive( - Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema, + Clone, + Copy, + Debug, + PartialEq, + Eq, + Deserialize, + Serialize, + JsonSchema, + PartialOrd, + Ord, )] #[serde(try_from = "AnyIpv4Range")] pub struct Ipv4Range { @@ -612,7 +623,16 @@ impl TryFrom for Ipv4Range { /// /// The first address must be less than or equal to the last address. #[derive( - Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema, + PartialOrd, + Ord, + Clone, + Copy, + Debug, + PartialEq, + Eq, + Deserialize, + Serialize, + JsonSchema, )] #[serde(try_from = "AnyIpv6Range")] pub struct Ipv6Range { diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 8290739a734..1ca878908bd 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -5016,7 +5016,7 @@ async fn cmd_db_dns_diff( // Load the added and removed items. use nexus_db_schema::schema::dns_name::dsl; - let added = dsl::dns_name + let mut added = dsl::dns_name .filter(dsl::dns_zone_id.eq(zone.id)) .filter(dsl::version_added.eq(version.version)) .limit(i64::from(u32::from(limit))) @@ -5026,7 +5026,7 @@ async fn cmd_db_dns_diff( .context("loading added names")?; check_limit(&added, limit, || "loading added names"); - let removed = dsl::dns_name + let mut removed = dsl::dns_name .filter(dsl::dns_zone_id.eq(zone.id)) .filter(dsl::version_removed.eq(version.version)) .limit(i64::from(u32::from(limit))) @@ -5042,6 +5042,11 @@ async fn cmd_db_dns_diff( ); println!(""); + // This is kind of stupid-expensive, but there aren't a lot of records + // here and it's helpful for this output to be stable. + added.sort_by_cached_key(|k| format!("{} {:?}", k.name, k.records())); + removed.sort_by_cached_key(|k| format!("{} {:?}", k.name, k.records())); + for a in added { print_name("+", &a.name, a.records().context("parsing records")); } @@ -5097,7 +5102,8 @@ async fn cmd_db_dns_names( } }); - for (name, records) in names { + for (name, mut records) in names { + records.sort(); print_name("", &name, Ok(records)); } } diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index f0a22c50ad4..4695c1c3ade 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -32,7 +32,9 @@ changes: names added: 3, names removed: 0 + @ NS ns1.oxide-dev.test + ns1 AAAA ::1 -+ test-suite-silo.sys A 127.0.0.1 ++ test-suite-silo.sys (records: 2) ++ A 127.0.0.1 ++ AAAA 100::1 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable @@ -46,7 +48,9 @@ External zone: oxide-dev.test NAME RECORDS @ NS ns1.oxide-dev.test ns1 AAAA ::1 - test-suite-silo.sys A 127.0.0.1 + test-suite-silo.sys (records: 2) + A 127.0.0.1 + AAAA 100::1 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable @@ -489,15 +493,33 @@ task: "nat_garbage_collector" task: "blueprint_loader" configured period: every m s +<<<<<<< HEAD last completed activation: , triggered by +||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) + last completed activation: , triggered by a periodic timer firing +======= + last completed activation: , triggered by an explicit signal +>>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms - last completion reported error: failed to read target blueprint: Internal Error: no target blueprint set + target blueprint: ............. + execution: disabled + created at: + status: first target blueprint task: "blueprint_executor" configured period: every m +<<<<<<< HEAD last completed activation: , triggered by +||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) + last completed activation: , triggered by a periodic timer firing +======= + last completed activation: , triggered by a dependent task completing +>>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms - last completion reported error: no blueprint + target blueprint: ............. + execution: disabled + status: (no event report found) + error: (none) task: "abandoned_vmm_reaper" configured period: every m @@ -531,7 +553,18 @@ task: "blueprint_rendezvous" configured period: every m last completed activation: , triggered by started at (s ago) and ran for ms - last completion reported error: no blueprint + target blueprint: ............. + inventory collection: ..................... + debug_dataset rendezvous counts: + num_inserted: 0 + num_already_exist: 0 + num_not_in_inventory: 0 + num_tombstoned: 0 + num_already_tombstoned: 0 + crucible_dataset rendezvous counts: + num_inserted: 0 + num_already_exist: 0 + num_not_in_inventory: 0 task: "chicken_switches_watcher" configured period: every s @@ -541,9 +574,15 @@ warning: unknown background task: "chicken_switches_watcher" (don't know how to task: "crdb_node_id_collector" configured period: every m +<<<<<<< HEAD last completed activation: , triggered by +||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) + last completed activation: , triggered by a periodic timer firing +======= + last completed activation: , triggered by a dependent task completing +>>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms - last completion reported error: no blueprint +warning: unknown background task: "crdb_node_id_collector" (don't know how to interpret details: Object {"errors": Array [Object {"err": String("failed to fetch node ID for zone ..................... at http://[::1]:REDACTED_PORT: Communication Error: error sending request for url (http://[::1]:REDACTED_PORT/node/id): error sending request for url (http://[::1]:REDACTED_PORT/node/id): client error (Connect): tcp connect error: Connection refused (os error 146)"), "zone_id": String(".....................")}], "nsuccess": Number(0)}) task: "decommissioned_disk_cleaner" configured period: every m @@ -842,15 +881,37 @@ termination: Exited(0) stdout: task: "blueprint_loader" configured period: every m s +<<<<<<< HEAD last completed activation: , triggered by +||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) + currently executing: no + last completed activation: , triggered by a periodic timer firing +======= + currently executing: no + last completed activation: , triggered by an explicit signal +>>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms - last completion reported error: failed to read target blueprint: Internal Error: no target blueprint set + target blueprint: ............. + execution: disabled + created at: + status: first target blueprint task: "blueprint_executor" configured period: every m +<<<<<<< HEAD last completed activation: , triggered by +||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) + currently executing: no + last completed activation: , triggered by a periodic timer firing +======= + currently executing: no + last completed activation: , triggered by a dependent task completing +>>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms - last completion reported error: no blueprint + target blueprint: ............. + execution: disabled + status: (no event report found) + error: (none) --------------------------------------------- stderr: @@ -984,15 +1045,37 @@ task: "nat_garbage_collector" task: "blueprint_loader" configured period: every m s +<<<<<<< HEAD last completed activation: , triggered by +||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) + currently executing: no + last completed activation: , triggered by a periodic timer firing +======= + currently executing: no + last completed activation: , triggered by an explicit signal +>>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms - last completion reported error: failed to read target blueprint: Internal Error: no target blueprint set + target blueprint: ............. + execution: disabled + created at: + status: first target blueprint task: "blueprint_executor" configured period: every m +<<<<<<< HEAD last completed activation: , triggered by +||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) + currently executing: no + last completed activation: , triggered by a periodic timer firing +======= + currently executing: no + last completed activation: , triggered by a dependent task completing +>>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms - last completion reported error: no blueprint + target blueprint: ............. + execution: disabled + status: (no event report found) + error: (none) task: "abandoned_vmm_reaper" configured period: every m @@ -1026,7 +1109,18 @@ task: "blueprint_rendezvous" configured period: every m last completed activation: , triggered by started at (s ago) and ran for ms - last completion reported error: no blueprint + target blueprint: ............. + inventory collection: ..................... + debug_dataset rendezvous counts: + num_inserted: 0 + num_already_exist: 0 + num_not_in_inventory: 0 + num_tombstoned: 0 + num_already_tombstoned: 0 + crucible_dataset rendezvous counts: + num_inserted: 0 + num_already_exist: 0 + num_not_in_inventory: 0 task: "chicken_switches_watcher" configured period: every s @@ -1036,9 +1130,17 @@ warning: unknown background task: "chicken_switches_watcher" (don't know how to task: "crdb_node_id_collector" configured period: every m +<<<<<<< HEAD last completed activation: , triggered by +||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) + currently executing: no + last completed activation: , triggered by a periodic timer firing +======= + currently executing: no + last completed activation: , triggered by a dependent task completing +>>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms - last completion reported error: no blueprint +warning: unknown background task: "crdb_node_id_collector" (don't know how to interpret details: Object {"errors": Array [Object {"err": String("failed to fetch node ID for zone ..................... at http://[::1]:REDACTED_PORT: Communication Error: error sending request for url (http://[::1]:REDACTED_PORT/node/id): error sending request for url (http://[::1]:REDACTED_PORT/node/id): client error (Connect): tcp connect error: Connection refused (os error 146)"), "zone_id": String(".....................")}], "nsuccess": Number(0)}) task: "decommissioned_disk_cleaner" configured period: every m @@ -1303,53 +1405,6 @@ task: "webhook_deliverator" stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ ============================================= -EXECUTING COMMAND: omdb ["nexus", "chicken-switches", "show", "current"] -termination: Exited(0) ---------------------------------------------- -stdout: -No chicken switches enabled ---------------------------------------------- -stderr: -note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ -============================================= -EXECUTING COMMAND: omdb ["-w", "nexus", "chicken-switches", "set", "--planner-enabled", "true"] -termination: Exited(0) ---------------------------------------------- -stdout: -chicken switches updated to version 1: - planner enabled: true - planner switches: - add zones with mupdate override: true ---------------------------------------------- -stderr: -note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ -============================================= -EXECUTING COMMAND: omdb ["-w", "nexus", "chicken-switches", "set", "--add-zones-with-mupdate-override", "false"] -termination: Exited(0) ---------------------------------------------- -stdout: -chicken switches updated to version 2: - planner enabled: true (unchanged) - planner switches: - * add zones with mupdate override: true -> false ---------------------------------------------- -stderr: -note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ -============================================= -EXECUTING COMMAND: omdb ["nexus", "chicken-switches", "show", "current"] -termination: Exited(0) ---------------------------------------------- -stdout: -Reconfigurator chicken switches: - version: 2 - modified time: - planner enabled: true - planner switches: - add zones with mupdate override: false ---------------------------------------------- -stderr: -note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ -============================================= EXECUTING COMMAND: omdb ["nexus", "sagas", "list"] termination: Exited(0) --------------------------------------------- @@ -1486,6 +1541,7 @@ parent: oxp_...................../crypt/zone/oxz_external_dns_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_internal_dns_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_nexus_..................... ..................... in service none none off + oxp_...................../crypt/zone/oxz_nexus_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_ntp_..................... ..................... in service none none off @@ -1500,6 +1556,7 @@ parent: external_dns ..................... install dataset in service ::1 internal_dns ..................... install dataset in service ::1 nexus ..................... install dataset in service ::ffff:127.0.0.1 + nexus ..................... install dataset in service ::1 COCKROACHDB SETTINGS: @@ -1610,6 +1667,7 @@ parent: oxp_...................../crypt/zone/oxz_external_dns_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_internal_dns_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_nexus_..................... ..................... in service none none off + oxp_...................../crypt/zone/oxz_nexus_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_ntp_..................... ..................... in service none none off @@ -1624,6 +1682,7 @@ parent: external_dns ..................... install dataset in service ::1 internal_dns ..................... install dataset in service ::1 nexus ..................... install dataset in service ::ffff:127.0.0.1 + nexus ..................... install dataset in service ::1 COCKROACHDB SETTINGS: @@ -1687,6 +1746,53 @@ stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ Error: `blueprint2_id` was not specified and blueprint1 has no parent ============================================= +EXECUTING COMMAND: omdb ["nexus", "chicken-switches", "show", "current"] +termination: Exited(0) +--------------------------------------------- +stdout: +No chicken switches enabled +--------------------------------------------- +stderr: +note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ +============================================= +EXECUTING COMMAND: omdb ["-w", "nexus", "chicken-switches", "set", "--planner-enabled", "true"] +termination: Exited(0) +--------------------------------------------- +stdout: +chicken switches updated to version 1: + planner enabled: true + planner switches: + add zones with mupdate override: true +--------------------------------------------- +stderr: +note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ +============================================= +EXECUTING COMMAND: omdb ["-w", "nexus", "chicken-switches", "set", "--add-zones-with-mupdate-override", "false"] +termination: Exited(0) +--------------------------------------------- +stdout: +chicken switches updated to version 2: + planner enabled: true (unchanged) + planner switches: + * add zones with mupdate override: true -> false +--------------------------------------------- +stderr: +note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ +============================================= +EXECUTING COMMAND: omdb ["nexus", "chicken-switches", "show", "current"] +termination: Exited(0) +--------------------------------------------- +stdout: +Reconfigurator chicken switches: + version: 2 + modified time: + planner enabled: true + planner switches: + add zones with mupdate override: false +--------------------------------------------- +stderr: +note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ +============================================= EXECUTING COMMAND: omdb ["reconfigurator", "export", ""] termination: Exited(0) --------------------------------------------- diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 8dcadc1cc30..3d7b01896f3 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -224,27 +224,6 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { "--no-executing-info", ], &["nexus", "background-tasks", "show", "all", "--no-executing-info"], - // chicken switches: show and set - &["nexus", "chicken-switches", "show", "current"], - &[ - "-w", - "nexus", - "chicken-switches", - "set", - "--planner-enabled", - "true", - ], - &[ - "-w", - "nexus", - "chicken-switches", - "set", - "--add-zones-with-mupdate-override", - "false", - ], - // After the set commands above, we should see chicken switches - // populated. - &["nexus", "chicken-switches", "show", "current"], &["nexus", "sagas", "list"], &["--destructive", "nexus", "sagas", "demo-create"], &["nexus", "sagas", "list"], @@ -267,6 +246,27 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { ], // This one should fail because it has no parent. &["nexus", "blueprints", "diff", &initial_blueprint_id], + // chicken switches: show and set + &["nexus", "chicken-switches", "show", "current"], + &[ + "-w", + "nexus", + "chicken-switches", + "set", + "--planner-enabled", + "true", + ], + &[ + "-w", + "nexus", + "chicken-switches", + "set", + "--add-zones-with-mupdate-override", + "false", + ], + // After the set commands above, we should see chicken switches + // populated. + &["nexus", "chicken-switches", "show", "current"], &["reconfigurator", "export", tmppath.as_str()], // We can't easily test the sled agent output because that's only // provided by a real sled agent, which is not available in the diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 1a5a2847cdd..724e9e2181d 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -305,8 +305,6 @@ pub fn load_test_config() -> NexusConfig { // - the CockroachDB TCP listen port be 0, and // - if the log will go to a file then the path must be the sentinel value // "UNUSED". - // - each Nexus created for testing gets its own id so they don't see each - // others sagas and try to recover them // // (See LogContext::new() for details.) Given these restrictions, it may // seem barely worth reading a config file at all. However, developers can @@ -314,10 +312,8 @@ pub fn load_test_config() -> NexusConfig { // configuration options, we expect many of those can be usefully configured // (and reconfigured) for the test suite. let config_file_path = Utf8Path::new("tests/config.test.toml"); - let mut config = NexusConfig::from_file(config_file_path) - .expect("failed to load config.test.toml"); - config.deployment.id = OmicronZoneUuid::new_v4(); - config + NexusConfig::from_file(config_file_path) + .expect("failed to load config.test.toml") } pub async fn test_setup( @@ -835,47 +831,97 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { 0, ); - let mac = self - .rack_init_builder - .mac_addrs - .next() - .expect("ran out of MAC addresses"); - let external_address = - self.config.deployment.dropshot_external.dropshot.bind_address.ip(); - let nexus_id = self.config.deployment.id; self.rack_init_builder.add_service_to_dns( - nexus_id, + self.config.deployment.id, address, ServiceName::Nexus, ); + self.record_nexus_zone(self.config.clone(), address, 0); + self.nexus_internal = Some(nexus_internal); + self.nexus_internal_addr = Some(nexus_internal_addr); + + // Besides the Nexus that we just started, add an entry in the blueprint + // for the Nexus that developers can start using + // nexus/examples/config-second.toml. + // + // The details in its BlueprintZoneType mostly don't matter because + // those are mostly used for DNS (which we don't usually need here) and + // to tell sled agent how to start the zone (which isn't what's going on + // here). But it does need to be present for it to be able to determine + // on startup if it needs to quiesce. + let second_nexus_config_path = + Utf8Path::new(env!("CARGO_MANIFEST_DIR")) + .join("../examples/config-second.toml"); + let mut second_nexus_config = + NexusConfig::from_file(&second_nexus_config_path).unwrap(); + // Okay, this is particularly awful. The system does not allow multiple + // zones to use the same external IP -- makes sense. But it actually is + // fine here because the IP is localhost and we're using host + // networking, and we've already ensured that the ports will be unique. + // Avoid tripping up the validation by using some other IP. This won't + // be used for anything. Pick something that's not in use anywhere + // else. This range is guaranteed by RFC 6666 to discard traffic. + second_nexus_config + .deployment + .dropshot_external + .dropshot + .bind_address + .set_ip("100::1".parse().unwrap()); + let SocketAddr::V6(second_internal_address) = + second_nexus_config.deployment.dropshot_internal.bind_address + else { + panic!( + "expected IPv6 address for dropshot_internal in \ + nexus/examples/config-second.toml" + ); + }; + self.record_nexus_zone(second_nexus_config, second_internal_address, 1); + Ok(()) + } + fn record_nexus_zone( + &mut self, + config: NexusConfig, + internal_address: SocketAddrV6, + which: usize, + ) { + let id = config.deployment.id; + let mac = self + .rack_init_builder + .mac_addrs + .next() + .expect("ran out of MAC addresses"); self.blueprint_zones.push(BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, - id: nexus_id, + id, filesystem_pool: ZpoolName::new_external(ZpoolUuid::new_v4()), zone_type: BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { - external_dns_servers: self - .config + external_dns_servers: config .deployment .external_dns_servers .clone(), external_ip: OmicronZoneExternalFloatingIp { id: ExternalIpUuid::new_v4(), - ip: external_address, + ip: config + .deployment + .dropshot_external + .dropshot + .bind_address + .ip(), }, - external_tls: self.config.deployment.dropshot_external.tls, - internal_address: address, + external_tls: config.deployment.dropshot_external.tls, + internal_address, nic: NetworkInterface { id: Uuid::new_v4(), ip: NEXUS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1 + which) .unwrap() .into(), kind: NetworkInterfaceKind::Service { - id: nexus_id.into_untyped_uuid(), + id: id.into_untyped_uuid(), }, mac, - name: format!("nexus-{}", nexus_id).parse().unwrap(), + name: format!("nexus-{}", id).parse().unwrap(), primary: true, slot: 0, subnet: (*NEXUS_OPTE_IPV4_SUBNET).into(), @@ -886,11 +932,6 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { }), image_source: BlueprintZoneImageSource::InstallDataset, }); - - self.nexus_internal = Some(nexus_internal); - self.nexus_internal_addr = Some(nexus_internal_addr); - - Ok(()) } pub async fn populate_internal_dns(&mut self) { From 8af16b6984889a0271df67013c7d6ce973f2d94d Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Fri, 22 Aug 2025 16:27:02 -0700 Subject: [PATCH 32/38] review feedback --- nexus/reconfigurator/execution/src/lib.rs | 5 +- nexus/src/app/quiesce.rs | 21 +++++-- nexus/types/src/quiesce.rs | 67 +++++++++++------------ 3 files changed, 51 insertions(+), 42 deletions(-) diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index 76305e0d754..e099f0c461b 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -650,8 +650,9 @@ fn register_reassign_sagas_step<'a>( Ok(saga_quiesce .reassign_sagas(async || { // For any expunged Nexus zones, re-assign in-progress - // sagas to some other Nexus. If this fails for some - // reason, it doesn't affect anything else. + // sagas to `nexus_id` (which, in practice, is + // ourselves). If this fails for some reason, it + // doesn't affect anything else. let sec_id = nexus_db_model::SecId::from(nexus_id); let reassigned = sagas::reassign_sagas_from_expunged( opctx, datastore, blueprint, sec_id, diff --git a/nexus/src/app/quiesce.rs b/nexus/src/app/quiesce.rs index cd5e3242ce5..3b36ec97991 100644 --- a/nexus/src/app/quiesce.rs +++ b/nexus/src/app/quiesce.rs @@ -80,13 +80,22 @@ impl NexusQuiesceHandle { *q = new_state; true } - QuiesceState::Running if quiescing => { - info!(&self.log, "quiesce starting"); - *q = new_state; - true + QuiesceState::Running => { + if quiescing { + info!(&self.log, "quiesce starting"); + *q = new_state; + true + } else { + // We're not quiescing and not being asked to quiesce. + // Nothing to do. + false + } } - _ => { - // All other cases are either impossible or no-ops. + QuiesceState::DrainingSagas { .. } + | QuiesceState::DrainingDb { .. } + | QuiesceState::RecordingQuiesce { .. } + | QuiesceState::Quiesced { .. } => { + // Once we start quiescing, we never go back. false } } diff --git a/nexus/types/src/quiesce.rs b/nexus/types/src/quiesce.rs index 072a4ca3b1d..7c2b3ad42dd 100644 --- a/nexus/types/src/quiesce.rs +++ b/nexus/types/src/quiesce.rs @@ -163,14 +163,13 @@ impl SagaQuiesceHandle { /// cannot then re-enable sagas. pub fn set_quiescing(&self, quiescing: bool) { self.inner.send_if_modified(|q| { - let new_state = if quiescing { - SagasAllowed::DisallowedQuiesce - } else { - SagasAllowed::Allowed - }; - match q.new_sagas_allowed { SagasAllowed::DisallowedUnknown => { + let new_state = if quiescing { + SagasAllowed::DisallowedQuiesce + } else { + SagasAllowed::Allowed + }; info!( &self.log, "initial quiesce state"; @@ -179,23 +178,25 @@ impl SagaQuiesceHandle { q.new_sagas_allowed = new_state; true } - SagasAllowed::Allowed if quiescing => { - info!(&self.log, "saga quiesce starting"); - q.new_sagas_allowed = SagasAllowed::DisallowedQuiesce; - true + SagasAllowed::Allowed => { + if quiescing { + info!(&self.log, "saga quiesce starting"); + q.new_sagas_allowed = SagasAllowed::DisallowedQuiesce; + true + } else { + false + } } - SagasAllowed::DisallowedQuiesce if !quiescing => { - // This should be impossible. Report a problem. - error!( - &self.log, - "asked to stop quiescing after previously quiescing" - ); - false - } - _ => { - // There's no transition happening in these cases: - // - SagasAllowed::Allowed and we're not quiescing - // - SagasAllowed::DisallowedQuiesce and we're now quiescing + SagasAllowed::DisallowedQuiesce => { + if !quiescing { + // This should be impossible. Report a problem. + error!( + &self.log, + "asked to stop quiescing after previously quiescing" + ); + } + + // Either way, we're not changing anything. false } } @@ -393,7 +394,7 @@ impl SagaQuiesceHandle { saga_name: &steno::SagaName, ) -> Result { let mut error: Option = None; - let okay = self.inner.send_if_modified(|q| { + self.inner.send_if_modified(|q| { match q.new_sagas_allowed { SagasAllowed::Allowed => (), SagasAllowed::DisallowedQuiesce => { @@ -417,7 +418,15 @@ impl SagaQuiesceHandle { true }); - if okay { + if let Some(error) = error { + info!( + &self.log, + "disallowing saga creation"; + "saga_id" => saga_id.to_string(), + InlineErrorChain::new(&error), + ); + Err(error) + } else { let log = self.log.new(o!("saga_id" => saga_id.to_string())); info!(&log, "tracking newly created saga"); Ok(NewlyPendingSagaRef { @@ -426,16 +435,6 @@ impl SagaQuiesceHandle { saga_id, init_finished: false, }) - } else { - let error = - error.expect("error is always set when disallowing sagas"); - info!( - &self.log, - "disallowing saga creation"; - "saga_id" => saga_id.to_string(), - InlineErrorChain::new(&error), - ); - Err(error) } } From 48483d05a69b7847c8163a7aa0a632cc72fd4ec6 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Fri, 22 Aug 2025 17:03:18 -0700 Subject: [PATCH 33/38] fix tests on GNU/Linux --- dev-tools/omdb/tests/successes.out | 4 ++-- dev-tools/omdb/tests/test_all_output.rs | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 4695c1c3ade..39ebd4d2696 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -582,7 +582,7 @@ task: "crdb_node_id_collector" last completed activation: , triggered by a dependent task completing >>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms -warning: unknown background task: "crdb_node_id_collector" (don't know how to interpret details: Object {"errors": Array [Object {"err": String("failed to fetch node ID for zone ..................... at http://[::1]:REDACTED_PORT: Communication Error: error sending request for url (http://[::1]:REDACTED_PORT/node/id): error sending request for url (http://[::1]:REDACTED_PORT/node/id): client error (Connect): tcp connect error: Connection refused (os error 146)"), "zone_id": String(".....................")}], "nsuccess": Number(0)}) +warning: unknown background task: "crdb_node_id_collector" (don't know how to interpret details: Object {"errors": Array [Object {"err": String("failed to fetch node ID for zone ..................... at http://[::1]:REDACTED_PORT: Communication Error: error sending request for url (http://[::1]:REDACTED_PORT/node/id): error sending request for url (http://[::1]:REDACTED_PORT/node/id): client error (Connect): tcp connect error: Connection refused (os error )"), "zone_id": String(".....................")}], "nsuccess": Number(0)}) task: "decommissioned_disk_cleaner" configured period: every m @@ -1140,7 +1140,7 @@ task: "crdb_node_id_collector" last completed activation: , triggered by a dependent task completing >>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms -warning: unknown background task: "crdb_node_id_collector" (don't know how to interpret details: Object {"errors": Array [Object {"err": String("failed to fetch node ID for zone ..................... at http://[::1]:REDACTED_PORT: Communication Error: error sending request for url (http://[::1]:REDACTED_PORT/node/id): error sending request for url (http://[::1]:REDACTED_PORT/node/id): client error (Connect): tcp connect error: Connection refused (os error 146)"), "zone_id": String(".....................")}], "nsuccess": Number(0)}) +warning: unknown background task: "crdb_node_id_collector" (don't know how to interpret details: Object {"errors": Array [Object {"err": String("failed to fetch node ID for zone ..................... at http://[::1]:REDACTED_PORT: Communication Error: error sending request for url (http://[::1]:REDACTED_PORT/node/id): error sending request for url (http://[::1]:REDACTED_PORT/node/id): client error (Connect): tcp connect error: Connection refused (os error )"), "zone_id": String(".....................")}], "nsuccess": Number(0)}) task: "decommissioned_disk_cleaner" configured period: every m diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 3d7b01896f3..03b2f79a391 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -280,7 +280,9 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { .extra_variable_length( "cockroachdb_fingerprint", &initial_blueprint.cockroachdb_fingerprint, - ); + ) + // Error numbers vary between operating systems. + .field("os error", r"\d+"); let crdb_version = initial_blueprint.cockroachdb_setting_preserve_downgrade.to_string(); From cdfafb0b1dbd668e32d37b917b40bf58caa23dad Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Tue, 2 Sep 2025 08:51:04 -0700 Subject: [PATCH 34/38] fix end to end dns test --- dev-tools/omicron-dev/src/main.rs | 4 ++-- nexus/test-utils/src/lib.rs | 23 ++++++++++++++++++++++- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/dev-tools/omicron-dev/src/main.rs b/dev-tools/omicron-dev/src/main.rs index a7207488759..fb62f1dd49f 100644 --- a/dev-tools/omicron-dev/src/main.rs +++ b/dev-tools/omicron-dev/src/main.rs @@ -100,8 +100,8 @@ impl RunAllArgs { println!("omicron-dev: services are running."); // Print out basic information about what was started. - // NOTE: The stdout strings here are not intended to be stable, but they are - // used by the test suite. + // NOTE: The stdout strings here are not intended to be stable, but they + // are used by the test suite. let addr = cptestctx.external_client.bind_address; println!("omicron-dev: nexus external API: {:?}", addr); println!( diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 724e9e2181d..84b2bce4576 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -839,7 +839,12 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { self.record_nexus_zone(self.config.clone(), address, 0); self.nexus_internal = Some(nexus_internal); self.nexus_internal_addr = Some(nexus_internal_addr); + Ok(()) + } + pub async fn configure_second_nexus(&mut self) { + let log = &self.logctx.log; + debug!(log, "Configuring second Nexus (not to run)"); // Besides the Nexus that we just started, add an entry in the blueprint // for the Nexus that developers can start using // nexus/examples/config-second.toml. @@ -876,7 +881,6 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { ); }; self.record_nexus_zone(second_nexus_config, second_internal_address, 1); - Ok(()) } fn record_nexus_zone( @@ -1658,6 +1662,7 @@ pub async fn omicron_dev_setup_with_config( None, extra_sled_agents, gateway_config_file, + true, ) .await) } @@ -1679,6 +1684,7 @@ pub async fn test_setup_with_config( initial_cert, extra_sled_agents, gateway_config_file, + false, ) .await } @@ -1690,6 +1696,7 @@ async fn setup_with_config_impl( initial_cert: Option, extra_sled_agents: u16, gateway_config_file: Utf8PathBuf, + second_nexus: bool, ) -> ControlPlaneTestContext { const STEP_TIMEOUT: Duration = Duration::from_secs(60); @@ -1832,6 +1839,20 @@ async fn setup_with_config_impl( ) .await; + if second_nexus { + builder + .init_with_steps( + vec![( + "configure_second_nexus", + Box::new(|builder| { + builder.configure_second_nexus().boxed() + }), + )], + STEP_TIMEOUT, + ) + .await; + } + // The first and second sled agents have special UUIDs, and any extra ones // after that are random. From f0a31b8dc6c65e49bb3fb7170e00af8a9ff6f1cf Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Sat, 23 Aug 2025 14:38:37 -0700 Subject: [PATCH 35/38] fix omdb test --- dev-tools/omdb/tests/successes.out | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 39ebd4d2696..7327069161f 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -32,9 +32,7 @@ changes: names added: 3, names removed: 0 + @ NS ns1.oxide-dev.test + ns1 AAAA ::1 -+ test-suite-silo.sys (records: 2) -+ A 127.0.0.1 -+ AAAA 100::1 ++ test-suite-silo.sys A 127.0.0.1 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable @@ -48,9 +46,7 @@ External zone: oxide-dev.test NAME RECORDS @ NS ns1.oxide-dev.test ns1 AAAA ::1 - test-suite-silo.sys (records: 2) - A 127.0.0.1 - AAAA 100::1 + test-suite-silo.sys A 127.0.0.1 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable @@ -1541,7 +1537,6 @@ parent: oxp_...................../crypt/zone/oxz_external_dns_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_internal_dns_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_nexus_..................... ..................... in service none none off - oxp_...................../crypt/zone/oxz_nexus_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_ntp_..................... ..................... in service none none off @@ -1556,7 +1551,6 @@ parent: external_dns ..................... install dataset in service ::1 internal_dns ..................... install dataset in service ::1 nexus ..................... install dataset in service ::ffff:127.0.0.1 - nexus ..................... install dataset in service ::1 COCKROACHDB SETTINGS: @@ -1667,7 +1661,6 @@ parent: oxp_...................../crypt/zone/oxz_external_dns_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_internal_dns_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_nexus_..................... ..................... in service none none off - oxp_...................../crypt/zone/oxz_nexus_..................... ..................... in service none none off oxp_...................../crypt/zone/oxz_ntp_..................... ..................... in service none none off @@ -1682,7 +1675,6 @@ parent: external_dns ..................... install dataset in service ::1 internal_dns ..................... install dataset in service ::1 nexus ..................... install dataset in service ::ffff:127.0.0.1 - nexus ..................... install dataset in service ::1 COCKROACHDB SETTINGS: From d2b1f6878ed2e5e49d80dcc63fa0b8bc3a63c821 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 25 Aug 2025 10:53:24 -0700 Subject: [PATCH 36/38] add test that Nexus quiesces when reading a blueprint saying so --- nexus/tests/integration_tests/mod.rs | 1 + nexus/tests/integration_tests/quiesce.rs | 139 +++++++++++++++++++++++ 2 files changed, 140 insertions(+) create mode 100644 nexus/tests/integration_tests/quiesce.rs diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index 497585cceb8..c0ea06dcb7d 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -35,6 +35,7 @@ mod pantry; mod password_login; mod probe; mod projects; +mod quiesce; mod quotas; mod rack; mod role_assignments; diff --git a/nexus/tests/integration_tests/quiesce.rs b/nexus/tests/integration_tests/quiesce.rs new file mode 100644 index 00000000000..d5ef6bb7e1c --- /dev/null +++ b/nexus/tests/integration_tests/quiesce.rs @@ -0,0 +1,139 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use anyhow::{Context, anyhow}; +use nexus_auth::context::OpContext; +use nexus_client::types::QuiesceState; +use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; +use nexus_reconfigurator_planning::planner::PlannerRng; +use nexus_reconfigurator_preparation::PlanningInputFromDb; +use nexus_test_interface::NexusServer; +use nexus_test_utils_macros::nexus_test; +use nexus_types::deployment::BlueprintTargetSet; +use nexus_types::deployment::PlannerChickenSwitches; +use omicron_common::api::external::Error; +use omicron_test_utils::dev::poll::CondCheckError; +use omicron_test_utils::dev::poll::wait_for_condition; +use omicron_uuid_kinds::GenericUuid; +use std::time::Duration; + +type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + +/// Tests that Nexus quiesces when the blueprint says that it should +#[nexus_test] +async fn test_quiesce(cptestctx: &ControlPlaneTestContext) { + let log = &cptestctx.logctx.log; + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests(log.clone(), datastore.clone()); + let nexus_internal_url = format!( + "http://{}", + cptestctx.server.get_http_server_internal_address().await + ); + let nexus_client = + nexus_client::Client::new(&nexus_internal_url, log.clone()); + + // Collect what we need to modify the blueprint. + let collection = wait_for_condition( + || async { + let collection = datastore + .inventory_get_latest_collection(&opctx) + .await + .map_err(CondCheckError::Failed)?; + match collection { + Some(s) => Ok(s), + None => Err(CondCheckError::::NotYet), + } + }, + &Duration::from_secs(1), + &Duration::from_secs(60), + ) + .await + .expect("initial inventory collection"); + + let chicken_switches = datastore + .reconfigurator_chicken_switches_get_latest(&opctx) + .await + .expect("obtained latest chicken switches") + .map_or_else(PlannerChickenSwitches::default, |cs| { + cs.switches.planner_switches + }); + let planning_input = PlanningInputFromDb::assemble( + &opctx, + &datastore, + chicken_switches, + None, + ) + .await + .expect("planning input"); + let target_blueprint = nexus + .blueprint_target_view(&opctx) + .await + .expect("fetch current target config"); + let blueprint1 = nexus + .blueprint_view(&opctx, *target_blueprint.target_id.as_untyped_uuid()) + .await + .expect("fetch current target blueprint"); + + // Now, update the target blueprint to reflect that Nexus should quiesce. + // We don't need it to be enabled to still reflect quiescing. + let mut builder = BlueprintBuilder::new_based_on( + log, + &blueprint1, + &planning_input, + &collection, + "test-suite", + PlannerRng::from_entropy(), + ) + .expect("creating BlueprintBuilder"); + builder + .set_nexus_generation( + blueprint1.nexus_generation, + blueprint1.nexus_generation.next(), + ) + .expect("failed to set blueprint's Nexus generation"); + let blueprint2 = builder.build(); + nexus + .blueprint_import(&opctx, blueprint2.clone()) + .await + .expect("importing new blueprint"); + nexus + .blueprint_target_set( + &opctx, + BlueprintTargetSet { enabled: false, target_id: blueprint2.id }, + ) + .await + .expect("setting new target"); + + // Wait for Nexus to quiesce. + let _ = wait_for_condition( + || async { + let quiesce = nexus_client + .quiesce_get() + .await + .context("fetching quiesce state") + .map_err(CondCheckError::Failed)? + .into_inner(); + eprintln!("quiesce state: {:#?}\n", quiesce); + match quiesce.state { + QuiesceState::Undetermined => { + Err(CondCheckError::Failed(anyhow!( + "quiesce state should have been determined before \ + test started" + ))) + } + QuiesceState::Running => Err(CondCheckError::NotYet), + QuiesceState::DrainingSagas { .. } + | QuiesceState::DrainingDb { .. } + | QuiesceState::RecordingQuiesce { .. } + | QuiesceState::Quiesced { .. } => Ok(()), + } + }, + &Duration::from_millis(50), + &Duration::from_secs(30), + ) + .await + .expect("Nexus should have quiesced"); +} From 3ce8d59b0ca9cdd0723d135d8e764a82a3695f1a Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Tue, 2 Sep 2025 08:58:17 -0700 Subject: [PATCH 37/38] fixup conflict --- dev-tools/omdb/tests/successes.out | 58 ------------------------------ 1 file changed, 58 deletions(-) diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 7327069161f..2bd070b5d8b 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -489,13 +489,7 @@ task: "nat_garbage_collector" task: "blueprint_loader" configured period: every m s -<<<<<<< HEAD last completed activation: , triggered by -||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) - last completed activation: , triggered by a periodic timer firing -======= - last completed activation: , triggered by an explicit signal ->>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms target blueprint: ............. execution: disabled @@ -504,13 +498,7 @@ task: "blueprint_loader" task: "blueprint_executor" configured period: every m -<<<<<<< HEAD last completed activation: , triggered by -||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) - last completed activation: , triggered by a periodic timer firing -======= - last completed activation: , triggered by a dependent task completing ->>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms target blueprint: ............. execution: disabled @@ -570,13 +558,7 @@ warning: unknown background task: "chicken_switches_watcher" (don't know how to task: "crdb_node_id_collector" configured period: every m -<<<<<<< HEAD last completed activation: , triggered by -||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) - last completed activation: , triggered by a periodic timer firing -======= - last completed activation: , triggered by a dependent task completing ->>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms warning: unknown background task: "crdb_node_id_collector" (don't know how to interpret details: Object {"errors": Array [Object {"err": String("failed to fetch node ID for zone ..................... at http://[::1]:REDACTED_PORT: Communication Error: error sending request for url (http://[::1]:REDACTED_PORT/node/id): error sending request for url (http://[::1]:REDACTED_PORT/node/id): client error (Connect): tcp connect error: Connection refused (os error )"), "zone_id": String(".....................")}], "nsuccess": Number(0)}) @@ -877,15 +859,7 @@ termination: Exited(0) stdout: task: "blueprint_loader" configured period: every m s -<<<<<<< HEAD last completed activation: , triggered by -||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) - currently executing: no - last completed activation: , triggered by a periodic timer firing -======= - currently executing: no - last completed activation: , triggered by an explicit signal ->>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms target blueprint: ............. execution: disabled @@ -894,15 +868,7 @@ task: "blueprint_loader" task: "blueprint_executor" configured period: every m -<<<<<<< HEAD last completed activation: , triggered by -||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) - currently executing: no - last completed activation: , triggered by a periodic timer firing -======= - currently executing: no - last completed activation: , triggered by a dependent task completing ->>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms target blueprint: ............. execution: disabled @@ -1041,15 +1007,7 @@ task: "nat_garbage_collector" task: "blueprint_loader" configured period: every m s -<<<<<<< HEAD last completed activation: , triggered by -||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) - currently executing: no - last completed activation: , triggered by a periodic timer firing -======= - currently executing: no - last completed activation: , triggered by an explicit signal ->>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms target blueprint: ............. execution: disabled @@ -1058,15 +1016,7 @@ task: "blueprint_loader" task: "blueprint_executor" configured period: every m -<<<<<<< HEAD last completed activation: , triggered by -||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) - currently executing: no - last completed activation: , triggered by a periodic timer firing -======= - currently executing: no - last completed activation: , triggered by a dependent task completing ->>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms target blueprint: ............. execution: disabled @@ -1126,15 +1076,7 @@ warning: unknown background task: "chicken_switches_watcher" (don't know how to task: "crdb_node_id_collector" configured period: every m -<<<<<<< HEAD last completed activation: , triggered by -||||||| parent of 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) - currently executing: no - last completed activation: , triggered by a periodic timer firing -======= - currently executing: no - last completed activation: , triggered by a dependent task completing ->>>>>>> 127d5a805 (add the "second" Nexus to the test suite blueprint; fix omdb tests) started at (s ago) and ran for ms warning: unknown background task: "crdb_node_id_collector" (don't know how to interpret details: Object {"errors": Array [Object {"err": String("failed to fetch node ID for zone ..................... at http://[::1]:REDACTED_PORT: Communication Error: error sending request for url (http://[::1]:REDACTED_PORT/node/id): error sending request for url (http://[::1]:REDACTED_PORT/node/id): client error (Connect): tcp connect error: Connection refused (os error )"), "zone_id": String(".....................")}], "nsuccess": Number(0)}) From 6c84ded4703ff018a01f5a019a7891a3988246c4 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Tue, 2 Sep 2025 09:20:29 -0700 Subject: [PATCH 38/38] pull in BlueprintBuilder.set_nexus_generation() --- .../planning/src/blueprint_builder/builder.rs | 27 +++++++++++++++++++ nexus/tests/integration_tests/quiesce.rs | 19 ++++--------- 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 7cf11ac45ea..90bf09fe1ba 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -353,6 +353,10 @@ pub(crate) enum Operation { num_datasets_expunged: usize, num_zones_expunged: usize, }, + SetNexusGeneration { + current_generation: Generation, + new_generation: Generation, + }, SetTargetReleaseMinimumGeneration { current_generation: Generation, new_generation: Generation, @@ -465,6 +469,13 @@ impl fmt::Display for Operation { {current_generation} to {new_generation}" ) } + Self::SetNexusGeneration { current_generation, new_generation } => { + write!( + f, + "updated nexus generation from \ + {current_generation} to {new_generation}" + ) + } } } } @@ -2187,6 +2198,22 @@ impl<'a> BlueprintBuilder<'a> { Ok(()) } + /// Get the value of `nexus_generation`. + pub fn nexus_generation(&self) -> Generation { + self.nexus_generation + } + + /// Given the current value of `nexus_generation`, set the new value for + /// this blueprint. + pub fn set_nexus_generation(&mut self, new_generation: Generation) { + let current_generation = self.nexus_generation; + self.nexus_generation = new_generation; + self.record_operation(Operation::SetNexusGeneration { + current_generation, + new_generation, + }); + } + /// Allow a test to manually add an external DNS address, which could /// ordinarily only come from RSS. /// diff --git a/nexus/tests/integration_tests/quiesce.rs b/nexus/tests/integration_tests/quiesce.rs index d5ef6bb7e1c..916169ccd72 100644 --- a/nexus/tests/integration_tests/quiesce.rs +++ b/nexus/tests/integration_tests/quiesce.rs @@ -60,14 +60,10 @@ async fn test_quiesce(cptestctx: &ControlPlaneTestContext) { .map_or_else(PlannerChickenSwitches::default, |cs| { cs.switches.planner_switches }); - let planning_input = PlanningInputFromDb::assemble( - &opctx, - &datastore, - chicken_switches, - None, - ) - .await - .expect("planning input"); + let planning_input = + PlanningInputFromDb::assemble(&opctx, &datastore, chicken_switches) + .await + .expect("planning input"); let target_blueprint = nexus .blueprint_target_view(&opctx) .await @@ -88,12 +84,7 @@ async fn test_quiesce(cptestctx: &ControlPlaneTestContext) { PlannerRng::from_entropy(), ) .expect("creating BlueprintBuilder"); - builder - .set_nexus_generation( - blueprint1.nexus_generation, - blueprint1.nexus_generation.next(), - ) - .expect("failed to set blueprint's Nexus generation"); + builder.set_nexus_generation(blueprint1.nexus_generation.next()); let blueprint2 = builder.build(); nexus .blueprint_import(&opctx, blueprint2.clone())