From 44a2d0219f2b01cba36d29b36ca7c90e11471b73 Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Thu, 4 Sep 2025 18:00:29 +0000 Subject: [PATCH 1/8] Internal support for linking any number of IP Pools and Silos. - Add database queries for linking any number of IP Pools to the Oxide internal Silo, rather than assuming there's just one. - Rework queries for linking / unlinking IP Pool and Silo, making them concurrency-safe and preventing deletion when there are IPs or we're trying to delete the last internal IP Pool. - The public API does not change here. We're still assuming exactly one IP Pool per IP version for internal usage, and not allowing the link or pool to appear in the API. That'll be a follow-up commit. - Fixes #8948 --- common/src/api/external/http_pagination.rs | 9 + nexus/db-model/src/ip_pool.rs | 4 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 1420 +++++++++++++++-- nexus/db-queries/src/db/datastore/rack.rs | 8 +- nexus/db-queries/src/db/queries/oximeter.rs | 8 +- nexus/db-queries/src/db/raw_query_builder.rs | 7 + .../output/ip_pool_external_silo_link.sql | 32 + nexus/tests/integration_tests/ip_pools.rs | 96 ++ 8 files changed, 1433 insertions(+), 151 deletions(-) create mode 100644 nexus/db-queries/tests/output/ip_pool_external_silo_link.sql diff --git a/common/src/api/external/http_pagination.rs b/common/src/api/external/http_pagination.rs index 710de77f483..1fb18874b6a 100644 --- a/common/src/api/external/http_pagination.rs +++ b/common/src/api/external/http_pagination.rs @@ -382,6 +382,15 @@ pub enum PaginatedBy<'a> { Name(DataPageParams<'a, Name>), } +impl<'a> PaginatedBy<'a> { + pub fn limit(&self) -> NonZeroU32 { + match self { + PaginatedBy::Id(inner) => inner.limit, + PaginatedBy::Name(inner) => inner.limit, + } + } +} + impl ScanParams for ScanByNameOrId { diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 819be85ec8e..d575aa38486 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -77,7 +77,9 @@ impl From for shared::IpVersion { /// IP pools can be external or internal. External IP pools can be associated /// with a silo or project so that instance IP allocation draws from that pool /// instead of a system pool. -#[derive(Queryable, Insertable, Selectable, Clone, Debug, Resource)] +#[derive( + Queryable, Insertable, Selectable, Clone, Debug, Resource, PartialEq, +)] #[diesel(table_name = ip_pool)] pub struct IpPool { #[diesel(embed)] diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index f31ae4181fa..189a0cd04fb 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -13,7 +13,6 @@ use crate::db::collection_insert::DatastoreCollection; use crate::db::datastore::SERVICE_IPV4_POOL_NAME; use crate::db::datastore::SERVICE_IPV6_POOL_NAME; use crate::db::identity::Resource; -use crate::db::model::ExternalIp; use crate::db::model::IpKind; use crate::db::model::IpPool; use crate::db::model::IpPoolRange; @@ -24,11 +23,15 @@ use crate::db::model::Name; use crate::db::pagination::Paginator; use crate::db::pagination::paginated; use crate::db::queries::ip_pool::FilterOverlappingIpRanges; +use crate::db::raw_query_builder::QueryBuilder; +use crate::db::raw_query_builder::SelectableSql; +use crate::db::raw_query_builder::TypedSqlQuery; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use diesel::result::DatabaseErrorKind; use diesel::result::Error as DieselError; +use diesel::sql_types; use ipnetwork::IpNetwork; use nexus_db_errors::ErrorHandler; use nexus_db_errors::OptionalError; @@ -42,7 +45,9 @@ use nexus_db_model::InternetGatewayIpPool; use nexus_db_model::IpVersion; use nexus_db_model::Project; use nexus_db_model::Vpc; +use nexus_db_schema::enums::IpKindEnum; use nexus_types::external_api::shared::IpRange; +use nexus_types::silo::INTERNAL_SILO_ID; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; @@ -97,40 +102,216 @@ const INTERNAL_SILO_DEFAULT_CONSTRAINT: &'static str = const INTERNAL_SILO_DEFAULT_ERROR: &'static str = "The internal Silo cannot have a default IP Pool"; +// Error message emitted when a user attempts to link an IP Pool and internal +// Silo, but the pool is already linked to an external Silo, or vice versa. +const BAD_SILO_LINK_ERROR: &str = "IP Pools cannot be both linked to external \ + Silos and delegated for internal Oxide usage."; + +// Error message emitted when a user attempts to unlink an IP Pool from a Silo +// while the pool has external IP addresses allocated from it. +const POOL_HAS_IPS_ERROR: &str = + "IP addresses from this pool are in use in the linked silo"; + +// Error message emitted when a user attempts to unlink an IP Pool from the +// Oxide internal Silo, without at least one other IP Pool linked to it. +const LAST_POOL_ERROR: &str = "IP Pool delegated to Oxide internal usage \ + cannot be unlinked without at least one other IP Pool delegated. Create \ + and delegate at least one more IP Pool before deleting this one."; + +/// Helper trait for checking if an argument to a Silo <-> IP Pool linking +/// operation refers to the Oxide internal Silo. +trait IsInternalSilo { + fn is_internal_silo(&self) -> bool; +} + +impl IsInternalSilo for authz::Silo { + fn is_internal_silo(&self) -> bool { + self.id() == INTERNAL_SILO_ID + } +} + +/// Describes how IP Pools are reserved. +#[derive(Clone, Copy)] +enum PoolReservationType { + /// The pool is reserved for Oxide's internal usage. + Internal, + /// The pool is reserved for external, customer usage. + External, +} + +// TODO-cleanup: This is essentially the `paginated` function in macro form. +// It's extremely time-consuming to figure out the correct trait bounds which +// allow that function to work on the result of a join query. We could try to +// factor that out if it becomes useful, but since this is only used directly in +// this module, it's fine as-is for now. +macro_rules! paginated_table_like { + ($query:expr, $pagparams:expr) => { + match $pagparams { + ::omicron_common::api::external::http_pagination::PaginatedBy::Id(by_id) => { + let maybe_marker = by_id.marker.copied(); + match by_id.direction { + ::dropshot::PaginationOrder::Ascending => { + if let Some(marker) = maybe_marker { + $query = $query.filter(::nexus_db_schema::schema::ip_pool::id.gt(marker)); + } + $query.order(::nexus_db_schema::schema::ip_pool::id.asc()) + } + ::dropshot::PaginationOrder::Descending => { + if let Some(marker) = maybe_marker { + $query = $query.filter(::nexus_db_schema::schema::ip_pool::id.lt(marker)); + } + $query.order(::nexus_db_schema::schema::ip_pool::id.desc()) + } + } + } + ::omicron_common::api::external::http_pagination::PaginatedBy::Name(by_name) => { + let maybe_marker = by_name.marker.map(|n| Name::ref_cast(n)).cloned(); + match by_name.direction { + ::dropshot::PaginationOrder::Ascending => { + if let Some(marker) = maybe_marker { + $query = $query.filter(::nexus_db_schema::schema::ip_pool::name.gt(marker)); + } + $query.order(::nexus_db_schema::schema::ip_pool::name.asc()) + } + ::dropshot::PaginationOrder::Descending => { + if let Some(marker) = maybe_marker { + $query = $query.filter(::nexus_db_schema::schema::ip_pool::name.lt(marker)); + } + $query.order(::nexus_db_schema::schema::ip_pool::name.desc()) + } + } + } + } + }; +} + impl DataStore { - /// List IP Pools - pub async fn ip_pools_list( + /// List IP Pools by their reservation type and optionally IP version, paginated. + async fn ip_pools_list_paginated( &self, opctx: &OpContext, + reservation: PoolReservationType, + version: Option, pagparams: &PaginatedBy<'_>, ) -> ListResultVec { - use nexus_db_schema::schema::ip_pool; - opctx .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; - match pagparams { - PaginatedBy::Id(pagparams) => { - paginated(ip_pool::table, ip_pool::id, pagparams) + match reservation { + PoolReservationType::Internal => { + self.list_ip_pools_for_internal(opctx, version, pagparams).await + } + PoolReservationType::External => { + self.list_ip_pools_for_external(opctx, version, pagparams).await } - PaginatedBy::Name(pagparams) => paginated( - ip_pool::table, - ip_pool::name, - &pagparams.map_name(|n| Name::ref_cast(n)), - ), } - .filter(ip_pool::name.ne(SERVICE_IPV4_POOL_NAME)) - .filter(ip_pool::name.ne(SERVICE_IPV6_POOL_NAME)) - .filter(ip_pool::time_deleted.is_null()) - .select(IpPool::as_select()) - .get_results_async(&*self.pool_connection_authorized(opctx).await?) + } + + /// List IP Pools reserved for Oxide. + async fn list_ip_pools_for_internal( + &self, + opctx: &OpContext, + version: Option, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + use nexus_db_schema::schema::ip_pool; + use nexus_db_schema::schema::ip_pool_resource; + // IP Pools reserved for internal usage are, by definition, linked to + // the internal Silo. So we can do a natural join between the `ip_pool` + // and `ip_pool_resource` table, filtering to those pools linked to our + // silo. + let mut query = ip_pool::table + .inner_join( + ip_pool_resource::table + .on(ip_pool::id.eq(ip_pool_resource::ip_pool_id)), + ) + .filter( + ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo) + .and(ip_pool_resource::resource_id.eq(INTERNAL_SILO_ID)), + ) + .into_boxed(); + let query = paginated_table_like!(query, pagparams); + let query = match version { + Some(ver) => query.filter(ip_pool::ip_version.eq(ver)), + None => query, + }; + query + .filter(ip_pool::time_deleted.is_null()) + .limit(pagparams.limit().get().into()) + .select(IpPool::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// List IP Pools reserved for external usage. + async fn list_ip_pools_for_external( + &self, + opctx: &OpContext, + version: Option, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + use nexus_db_schema::schema::ip_pool; + use nexus_db_schema::schema::ip_pool_resource; + // IP Pools reserved for external usage are either unlinked to any Silo, + // or linked to a Silo other than the internal one. Use a left join to + // express that. + let mut query = ip_pool::table + .left_join( + ip_pool_resource::table.on(ip_pool::id + .eq(ip_pool_resource::ip_pool_id) + .and( + ip_pool_resource::resource_type + .eq(IpPoolResourceType::Silo), + ) + .and(ip_pool_resource::resource_id.eq(INTERNAL_SILO_ID))), + ) + .filter(ip_pool_resource::resource_id.is_null()) + .into_boxed(); + let query = paginated_table_like!(query, pagparams); + let query = match version { + Some(ver) => query.filter(ip_pool::ip_version.eq(ver)), + None => query, + }; + query + .filter(ip_pool::time_deleted.is_null()) + .limit(pagparams.limit().get().into()) + .select(IpPool::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// List IP Pools + pub async fn ip_pools_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.ip_pools_list_paginated( + opctx, + PoolReservationType::External, + None, + pagparams, + ) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Look up whether the given pool is available to users in the current /// silo, i.e., whether there is an entry in the association table linking /// the pool with that silo + // + // TODO-correctness: This seems difficult to use without TOCTOU issues. It's + // currently used to ensure there's a link between a Silo and an IP Pool + // when allocating an external address for an instance in that Silo. But + // that works by checking that the link exists, and then in a separate + // query, allocating an address out of it. Suppose the silo was unlinked + // after this check, but before the external address allocation query ran. + // Then one could end up with an address from an unlinked IP Pool, which + // seems wrong. + // + // See https://github.com/oxidecomputer/omicron/issues/8992 pub async fn ip_pool_fetch_link( &self, opctx: &OpContext, @@ -219,14 +400,30 @@ impl DataStore { }) } + /// List IP Pools reserved for Oxide's internal usage. + pub async fn ip_pools_service_list( + &self, + opctx: &OpContext, + version: Option, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.ip_pools_list_paginated( + opctx, + PoolReservationType::Internal, + version, + pagparams, + ) + .await + } + /// Look up internal service IP Pools for both IP versions. /// /// This is useful when you need to handle resources like external IPs where /// the actual address might be from either IP version. // - // NOTE: It'd be better to do one roundtrip to the DB, but this is - // rarely-used right now. We also want to return the authz and database - // objects, so we need the lookup-path mechanism. + // TODO-remove: Use list_ip_pools_for_internal instead. + // + // See https://github.com/oxidecomputer/omicron/issues/8947. pub async fn ip_pools_service_lookup_both_versions( &self, opctx: &OpContext, @@ -243,6 +440,10 @@ impl DataStore { /// names. There are separate IP Pools for IPv4 and IPv6 address ranges. /// /// This method may require an index by Availability Zone in the future. + // + // TODO-remove: Use list_ip_pools_for_internal instead. + // + // See https://github.com/oxidecomputer/omicron/issues/8947. pub async fn ip_pools_service_lookup( &self, opctx: &OpContext, @@ -282,6 +483,9 @@ impl DataStore { }) } + /// Delete an IP Pool, and any links between it an any Silos. + /// + /// This fails if there are still IP Ranges in the pool. pub async fn ip_pool_delete( &self, opctx: &OpContext, @@ -351,6 +555,11 @@ impl DataStore { /// Check whether the pool is internal by checking that it exists and is /// associated with the internal silo + // + // TODO-remove: This should probably go away when we let operators link any + // IP Pools to the internal Silo. The pool belongs to them, even if they've + // delegated it to us. See + // https://github.com/oxidecomputer/omicron/issues/8947. pub async fn ip_pool_is_internal( &self, opctx: &OpContext, @@ -526,6 +735,7 @@ impl DataStore { Ok(count) } + /// List Silos linked to the given IP Pool. pub async fn ip_pool_silo_list( &self, opctx: &OpContext, @@ -555,6 +765,8 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// List IP Pools linked to the given Silo. + /// /// Returns (IpPool, IpPoolResource) so we can know in the calling code /// whether the pool is default for the silo pub async fn silo_ip_pool_list( @@ -586,20 +798,18 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// Insert a link between an IP Pool and a Silo. pub async fn ip_pool_link_silo( &self, opctx: &OpContext, ip_pool_resource: IpPoolResource, ) -> CreateResult { - use nexus_db_schema::schema::ip_pool_resource::dsl; opctx .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; let conn = self.pool_connection_authorized(opctx).await?; - - let result = diesel::insert_into(dsl::ip_pool_resource) - .values(ip_pool_resource) + let result = link_ip_pool_to_silo_query(&ip_pool_resource) .get_result_async(&*conn) .await .map_err(|e| { @@ -625,6 +835,28 @@ impl DataStore { ) ) } + // Handle intentional errors in the query. + DieselError::DatabaseError(DatabaseErrorKind::Unknown, ref info) => { + let is_uuid_cast_error = |msg: &str, sentinel: &str| -> bool { + let expected = format!( + "could not parse \"{}\" as type uuid: uuid: \ + incorrect UUID length: {}", + sentinel, + sentinel, + ); + msg == expected + }; + let msg = info.message(); + if is_uuid_cast_error(msg, BAD_SILO_LINK_SENTINEL) { + Error::invalid_request(BAD_SILO_LINK_ERROR) + } else if is_uuid_cast_error(msg, IP_POOL_DELETED_SENTINEL) { + Error::not_found_by_id(ResourceType::IpPool, &ip_pool_resource.ip_pool_id) + } else if is_uuid_cast_error(msg, SILO_DELETED_SENTINEL) { + Error::not_found_by_id(ResourceType::Silo, &ip_pool_resource.resource_id) + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + } _ => public_error_from_diesel(e, ErrorHandler::Server), } })?; @@ -642,6 +874,11 @@ impl DataStore { Ok(result) } + // TODO-correctness: This seems like it should be in a transaction. At + // least, the nested-loops can mostly be re-expressed as a join between the + // silos, projects, vpcs, and Internet gateway tables. + // + // See https://github.com/oxidecomputer/omicron/issues/8992. async fn link_default_gateway( &self, opctx: &OpContext, @@ -735,6 +972,10 @@ impl DataStore { Ok(()) } + // TODO-correctness: This should probably be in a transaction, collecting + // all the Internet gateway IDs via a JOIN and then soft-deleting them all. + // + // See https://github.com/oxidecomputer/omicron/issues/8992. async fn unlink_ip_pool_gateway( &self, opctx: &OpContext, @@ -953,100 +1194,6 @@ impl DataStore { }) } - /// Ephemeral and snat IPs are associated with a silo through an instance, - /// so in order to see if there are any such IPs outstanding in the given - /// silo, we have to join IP -> Instance -> Project -> Silo - async fn ensure_no_instance_ips_outstanding( - &self, - opctx: &OpContext, - authz_pool: &authz::IpPool, - authz_silo: &authz::Silo, - ) -> Result<(), Error> { - use nexus_db_schema::schema::external_ip; - use nexus_db_schema::schema::instance; - use nexus_db_schema::schema::project; - - let existing_ips = external_ip::table - .inner_join( - instance::table - .on(external_ip::parent_id.eq(instance::id.nullable())), - ) - .inner_join(project::table.on(instance::project_id.eq(project::id))) - .filter(external_ip::is_service.eq(false)) - .filter(external_ip::parent_id.is_not_null()) - .filter(external_ip::time_deleted.is_null()) - .filter(external_ip::ip_pool_id.eq(authz_pool.id())) - // important, floating IPs are handled separately - .filter(external_ip::kind.eq(IpKind::Ephemeral).or(external_ip::kind.eq(IpKind::SNat))) - .filter(instance::time_deleted.is_null()) - // we have to join through IPs to instances to projects to get the silo ID - .filter(project::silo_id.eq(authz_silo.id())) - .select(ExternalIp::as_select()) - .limit(1) - .load_async::( - &*self.pool_connection_authorized(opctx).await?, - ) - .await - .map_err(|e| { - Error::internal_error(&format!( - "error checking for outstanding IPs before deleting IP pool association to resource: {:?}", - e - )) - })?; - - if !existing_ips.is_empty() { - return Err(Error::invalid_request( - "IP addresses from this pool are in use in the linked silo", - )); - } - - Ok(()) - } - - /// Floating IPs are associated with a silo through a project, so this one - /// is a little simpler than ephemeral. We join IP -> Project -> Silo. - async fn ensure_no_floating_ips_outstanding( - &self, - opctx: &OpContext, - authz_pool: &authz::IpPool, - authz_silo: &authz::Silo, - ) -> Result<(), Error> { - use nexus_db_schema::schema::external_ip; - use nexus_db_schema::schema::project; - - let existing_ips = external_ip::table - .inner_join(project::table.on(external_ip::project_id.eq(project::id.nullable()))) - .filter(external_ip::is_service.eq(false)) - .filter(external_ip::time_deleted.is_null()) - // all floating IPs have a project - .filter(external_ip::project_id.is_not_null()) - .filter(external_ip::ip_pool_id.eq(authz_pool.id())) - .filter(external_ip::kind.eq(IpKind::Floating)) - // we have to join through IPs to projects to get the silo ID - .filter(project::silo_id.eq(authz_silo.id())) - .filter(project::time_deleted.is_null()) - .select(ExternalIp::as_select()) - .limit(1) - .load_async::( - &*self.pool_connection_authorized(opctx).await?, - ) - .await - .map_err(|e| { - Error::internal_error(&format!( - "error checking for outstanding IPs before deleting IP pool association to resource: {:?}", - e - )) - })?; - - if !existing_ips.is_empty() { - return Err(Error::invalid_request( - "IP addresses from this pool are in use in the linked silo", - )); - } - - Ok(()) - } - /// Delete IP pool assocation with resource unless there are outstanding /// IPs allocated from the pool in the associated silo pub async fn ip_pool_unlink_silo( @@ -1055,33 +1202,51 @@ impl DataStore { authz_pool: &authz::IpPool, authz_silo: &authz::Silo, ) -> DeleteResult { - use nexus_db_schema::schema::ip_pool_resource; - opctx.authorize(authz::Action::Modify, authz_pool).await?; opctx.authorize(authz::Action::Modify, authz_silo).await?; - // We can only delete the association if there are no IPs allocated - // from this pool in the associated resource. - self.ensure_no_instance_ips_outstanding(opctx, authz_pool, authz_silo) - .await?; - self.ensure_no_floating_ips_outstanding(opctx, authz_pool, authz_silo) - .await?; - let conn = self.pool_connection_authorized(opctx).await?; + if authz_silo.is_internal_silo() { + unlink_ip_pool_from_internal_silo_query(authz_pool.id()) + } else { + unlink_ip_pool_from_external_silo_query( + authz_pool.id(), + authz_silo.id(), + ) + } + .execute_async(&*conn) + .await + .map_err(|e| match e { + DieselError::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, + ) => { + let msg = info.message(); + // Handle intentional parsing errors. + if msg.contains("could not parse") { + // Intentional bool-parsing error, which we use to detect + // when there are still external IPs in the Silo. + if msg.ends_with("invalid bool value") { + Error::invalid_request(POOL_HAS_IPS_ERROR) + // Intentional int-parsing error, which we use to detect + // attempting to unlink the last IP Pool linked to the + // internal Oxide Silo. + } else if msg.contains("strconv.ParseInt") + && msg.ends_with("invalid syntax") + { + Error::invalid_request(LAST_POOL_ERROR) + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + } + _ => public_error_from_diesel(e, ErrorHandler::Server), + })?; - diesel::delete(ip_pool_resource::table) - .filter(ip_pool_resource::ip_pool_id.eq(authz_pool.id())) - .filter(ip_pool_resource::resource_id.eq(authz_silo.id())) - .execute_async(&*conn) - .await - .map(|_rows_deleted| ()) - .map_err(|e| { - Error::internal_error(&format!( - "error deleting IP pool association to resource: {:?}", - e - )) - })?; - + // TODO-correctness: We probably want to do this in the same transaction + // as above. See https://github.com/oxidecomputer/omicron/issues/8992. self.unlink_ip_pool_gateway( opctx, authz_silo.id(), @@ -1313,26 +1478,440 @@ impl DataStore { } } +// Sentinel we try to cast as a UUID in the database, when linking an IP Pool to +// a Silo of the wrong "type" -- i.e., linking to an external Silo if the Pool +// is already linked to our internal Silo, or vice versa. +const BAD_SILO_LINK_SENTINEL: &str = "bad-link-type"; + +// Sentinel we try to cast as a UUID in the database when the IP Pool is +// deleted between selecting it and trying to insert the link. +const IP_POOL_DELETED_SENTINEL: &str = "ip-pool-deleted"; + +// Sentinel we try to cast as a UUID in the database when the Silo is deleted +// between selecting it and trying to insert the link. +const SILO_DELETED_SENTINEL: &str = "silo-deleted"; + +// Query to conditionally link an IP Pool to a Silo. +// +// This method returns a SQL query to conditionally insert a link between an IP +// Pool and a Silo. It maintains the invariant that an IP Pool can be linked to +// any number of customer silos, OR the Oxide-internal Silo, but not both. It +// also internally checks that a previously-fetched IP Pool and Silo still exist +// at the time the query is run. +// +// The full query is: +// +// ```sql +// WITH +// -- Select the IP Pool by ID, used to ensure it still exists when we run +// -- this query. +// ip_pool AS (SELECT id FROM ip_pool WHERE id = $1 AND time_deleted IS NULL), +// -- Select the Silo by ID, used to ensure it still exists when we run this +// -- query +// silo AS (SELECT id FROM silo WHERE id = $2 AND time_deleted IS NULL), +// -- Get the ID of any Silo this pool is currently linked to. This is used to +// -- verify that we don't link a pool to both an internal and external silo. +// existing_linked_silo +// AS ( +// SELECT resource_id FROM ip_pool_resource WHERE ip_pool_id = $3 AND resource_type = $4 LIMIT 1 +// ) +// INSERT +// INTO +// ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) +// SELECT +// -- If the pool exists, take its ID as a string. If it does not exist, take +// -- the string `'ip-pool-deleted'`. Attempt to cast the result to a UUID. +// -- This is the "true or cast error" trick we use in many places. +// CAST(COALESCE(CAST(ip.id AS STRING), 'ip-pool-deleted') AS UUID), +// -- The resource type, always 'silo' here. +// $5, +// -- Check that we don't link a pool to both an internal and external silo. +// CAST( +// CASE +// -- If the Silo was actually deleted, return a const that fails to cast to +// -- a UUID. This is the "true or cast error" trick. +// WHEN s.id IS NULL THEN 'silo-deleted' +// -- The main check, which is true if +// -- 1. The Silo we're trying to link and the existing link are both the +// -- internal Silo ID. +// -- 2. The Silo we're trying to link and any existing link are _not_ the +// -- internal Silo ID. +// -- 3. There is no existing link. +// -- Logically this is equivalent to checking that the pool is either +// -- unlinked, or linked only to the internal silo or only external silos, +// -- but not both. +// -- +// -- In that case, use the input Silo ID. +// WHEN (s.id = $6 AND els.resource_id = $7) +// OR (s.id != $8 AND els.resource_id != $9) +// OR els.resource_id IS NULL +// THEN $10 +// -- If the above fails, it's because we're trying to link a pool to both +// -- an internal and external silo. Generate an un-castable string. +// ELSE 'bad-link-type' +// END +// AS UUID +// ), +// $11 +// FROM +// (SELECT 1) AS dummy +// LEFT JOIN ip_pool AS ip ON true +// LEFT JOIN silo AS s ON true +// LEFT JOIN existing_linked_silo AS els ON true +// RETURNING +// * +// ``` +fn link_ip_pool_to_silo_query( + ip_pool_resource: &IpPoolResource, +) -> TypedSqlQuery> { + let mut builder = QueryBuilder::new(); + builder + .sql("WITH ip_pool AS (SELECT id FROM ip_pool WHERE id = ") + .param() + .bind::(ip_pool_resource.ip_pool_id) + .sql( + " \ + AND time_deleted IS NULL), \ + silo AS (SELECT id FROM silo WHERE id = ", + ) + .param() + .bind::(ip_pool_resource.resource_id) + .sql( + " \ + AND time_deleted IS NULL), \ + existing_linked_silo AS (\ + SELECT resource_id \ + FROM ip_pool_resource \ + WHERE ip_pool_id = ", + ) + .param() + .bind::(ip_pool_resource.ip_pool_id) + .sql(" AND resource_type = ") + .param() + .bind::( + ip_pool_resource.resource_type, + ) + .sql( + " LIMIT 1) \ + INSERT INTO ip_pool_resource (\ + ip_pool_id, \ + resource_type, \ + resource_id, \ + is_default\ + ) SELECT CAST(COALESCE(CAST(ip.id AS STRING), '", + ) + .sql(IP_POOL_DELETED_SENTINEL) + .sql("') AS UUID), ") + .param() + .bind::( + ip_pool_resource.resource_type, + ) + .sql(", CAST(CASE ") + .sql("WHEN s.id IS NULL THEN '") + .sql(SILO_DELETED_SENTINEL) + .sql("' WHEN (s.id = ") + .param() + .bind::(INTERNAL_SILO_ID) + .sql(" AND els.resource_id = ") + .param() + .bind::(INTERNAL_SILO_ID) + .sql(") OR (s.id != ") + .param() + .bind::(INTERNAL_SILO_ID) + .sql(" AND els.resource_id != ") + .param() + .bind::(INTERNAL_SILO_ID) + .sql(") OR els.resource_id IS NULL THEN ") + .param() + .bind::(ip_pool_resource.resource_id.to_string()) + .sql(" ELSE '") + .sql(BAD_SILO_LINK_SENTINEL) + .sql("' END AS UUID), ") + .param() + .bind::(ip_pool_resource.is_default) + .sql( + " FROM (SELECT 1) AS dummy \ + LEFT JOIN ip_pool AS ip ON TRUE \ + LEFT JOIN silo AS s ON TRUE \ + LEFT JOIN existing_linked_silo AS els ON TRUE \ + RETURNING *", + ); + builder.query() +} + +// Query to conditionally unlink an IP Pool from an external / customer Silo. +// +// This deletes the link iff there are no outstanding instance external IPs or +// floating IPs allocated out of the pool, to objects in the Silo. +// +// The full query is: +// +// ``` +// -- This CTE returns one row if there are any external IPs attached to +// -- instances, in any projects in the Silo. +// WITH instance_ips AS ( +// SELECT 1 +// FROM external_ip +// INNER JOIN instance ON external_ip.parent_id = instance.id +// INNER JOIN project ON instance.project_id = project.id +// WHERE +// external_ip.is_service = FALSE AND +// external_ip.parent_id IS NOT NULL AND +// external_ip.time_deleted IS NULL AND +// external_ip.ip_pool_id = $1 AND +// external_ip.kind != 'floating' AND +// instance.time_deleted IS NULL AND +// project.silo_id = $2 +// LIMIT 1 +// ), +// -- This CTE returns one row if there are any Floating IPs in the Silo, +// -- whether they're attached or not. +// floating_ips AS ( +// SELECT 1 +// FROM external_ip +// INNER JOIN project ON external_ip.project_id = project.id +// WHERE +// external_ip.is_service = FALSE AND +// external_ip.time_deleted IS NULL AND +// external_ip.project_id IS NOT NULL AND +// external_ip.ip_pool_id = $3 AND +// external_ip.kind = 'floating' AND +// project.silo_id = $4 AND +// project.time_deleted IS NULL +// LIMIT 1 +// ) +// -- Delete the requested link by primary key, but conditionally. +// DELETE FROM ip_pool_resource +// WHERE +// ip_pool_id = $7 AND +// resource_type = 'silo' AND +// resource_id = $8 AND +// -- If there are any external IPs, this generates an error casting 'eips' to +// -- a boolean, which we detect and handle. +// CAST(IF(EXISTS( +// SELECT 1 FROM instance_ips +// UNION ALL +// SELECT 1 FROM floating_ips +// ), 'eips', 'true') AS BOOL) +// ``` +fn unlink_ip_pool_from_external_silo_query( + ip_pool_id: Uuid, + silo_id: Uuid, +) -> TypedSqlQuery<()> { + let mut builder = QueryBuilder::new(); + builder + .sql( + "WITH instance_ips AS (\ + SELECT 1 \ + FROM external_ip \ + INNER JOIN instance ON external_ip.parent_id = instance.id \ + INNER JOIN project ON instance.project_id = project.id \ + WHERE \ + external_ip.is_service = FALSE AND \ + external_ip.parent_id IS NOT NULL AND \ + external_ip.time_deleted IS NULL AND \ + external_ip.ip_pool_id = ", + ) + .param() + .bind::(ip_pool_id) + .sql(" AND external_ip.kind != ") + .param() + .bind::(IpKind::Floating) + .sql(" AND instance.time_deleted IS NULL AND project.silo_id = ") + .param() + .bind::(silo_id) + .sql( + " LIMIT 1), floating_ips AS (\ + SELECT 1 \ + FROM external_ip \ + INNER JOIN project ON external_ip.project_id = project.id \ + WHERE \ + external_ip.is_service = FALSE AND \ + external_ip.time_deleted IS NULL AND \ + external_ip.project_id IS NOT NULL AND \ + external_ip.ip_pool_id = ", + ) + .param() + .bind::(ip_pool_id) + .sql(" AND external_ip.kind = ") + .param() + .bind::(IpKind::Floating) + .sql(" AND project.silo_id = ") + .param() + .bind::(silo_id) + .sql( + " AND project.time_deleted IS NULL LIMIT 1) \ + DELETE FROM ip_pool_resource \ + WHERE ip_pool_id = ", + ) + .param() + .bind::(ip_pool_id) + .sql(" AND resource_type = ") + .param() + .bind::( + IpPoolResourceType::Silo, + ) + .sql(" AND resource_id = ") + .param() + .bind::(silo_id) + .sql( + " AND \ + CAST(IF(\ + EXISTS(\ + SELECT 1 FROM instance_ips \ + UNION ALL \ + SELECT 1 FROM floating_ips\ + ), \ + 'has-eips', \ + 'true'\ + ) AS BOOL)", + ); + builder.query() +} + +// Query to conditionally unlink a pool from the internal Oxide Silo. +// +// In addition to checking for external IPs, this also ensures there is at least +// one other pool linked to the Silo. +// +// This query is similar to the one above for unlinking an external silo, with +// slightly different checks for external addresses. We don't need to join the +// `instance` table, and so look only at the `external_ip` table for IPs used in +// the provided IP Pool. +// +// It has one additional CTE and intentional cast error, which prevents deleting +// the link if this is the last pool linked to the internal silo. That would +// prevent Oxide services allocating any new addresses. +// +// The CTE is: +// +// ```sql +// internally_linked_pools AS ( +// SELECT 1 +// FROM ip_pool_resource +// WHERE resource_type = 'silo' AND resource_id = $INTERNAL_SILO_ID +// LIMIT 2 +// ) +// ``` +// +// And the cast error is: +// +// ```sql +// CAST(IF( +// (SELECT COUNT(*) FROM internally_linked_pools) < 2, +// 'last-pool', +// '1' +// ) AS INT) = 1 +// ``` +// +// So we're counting internally linked pools, checking that there are at least 2 +// when we attempt the deletion, and failing with an integer-cast error if not. +fn unlink_ip_pool_from_internal_silo_query( + ip_pool_id: Uuid, +) -> TypedSqlQuery<()> { + let mut builder = QueryBuilder::new(); + builder + .sql( + "WITH external_ips AS (\ + SELECT 1 \ + FROM external_ip \ + WHERE \ + external_ip.is_service = TRUE AND \ + external_ip.time_deleted IS NULL AND \ + external_ip.ip_pool_id = ", + ) + .param() + .bind::(ip_pool_id) + .sql( + " LIMIT 1), \ + internally_linked_pools AS (\ + SELECT 1 \ + FROM ip_pool_resource \ + WHERE resource_type = ", + ) + .param() + .bind::( + IpPoolResourceType::Silo, + ) + .sql(" AND resource_id = ") + .param() + .bind::(INTERNAL_SILO_ID) + .sql( + " LIMIT 2) \ + DELETE FROM ip_pool_resource \ + WHERE ip_pool_id = ", + ) + .param() + .bind::(ip_pool_id) + .sql(" AND resource_type = ") + .param() + .bind::( + IpPoolResourceType::Silo, + ) + .sql(" AND resource_id = ") + .param() + .bind::(INTERNAL_SILO_ID) + .sql( + " AND \ + CAST(IF(\ + EXISTS(SELECT 1 FROM external_ips), \ + 'has-eips', \ + 'true'\ + ) AS BOOL)", + ) + // Generate an error casting the string 'last-pool' to an int when there are + // fewer than 2 pools still linked to the silo. + .sql( + " AND \ + CAST(IF(\ + (SELECT COUNT(*) FROM internally_linked_pools) < 2, \ + 'last-pool', \ + '1'\ + ) AS INT) = 1", + ); + builder.query() +} + #[cfg(test)] mod test { + use std::net::Ipv4Addr; use std::num::NonZeroU32; use crate::authz; - use crate::db::datastore::ip_pool::INTERNAL_SILO_DEFAULT_ERROR; + use crate::db::datastore::ip_pool::{ + INTERNAL_SILO_DEFAULT_ERROR, PoolReservationType, + link_ip_pool_to_silo_query, unlink_ip_pool_from_external_silo_query, + unlink_ip_pool_from_internal_silo_query, + }; + use crate::db::explain::ExplainableAsync as _; use crate::db::model::{ IpPool, IpPoolResource, IpPoolResourceType, Project, }; + use crate::db::pagination::Paginator; use crate::db::pub_test_utils::TestDatabase; + use crate::db::raw_query_builder::expectorate_query_contents; use assert_matches::assert_matches; + use async_bb8_diesel::AsyncRunQueryDsl as _; + use diesel::{ + ExpressionMethods as _, QueryDsl as _, SelectableHelper as _, + }; use nexus_db_model::{IpPoolIdentity, IpVersion}; + use nexus_sled_agent_shared::inventory::ZoneKind; + use nexus_types::deployment::{ + OmicronZoneExternalFloatingIp, OmicronZoneExternalIp, + }; use nexus_types::external_api::params; use nexus_types::identity::Resource; + use nexus_types::silo::INTERNAL_SILO_ID; use omicron_common::address::{IpRange, Ipv4Range, Ipv6Range}; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::{ DataPageParams, Error, IdentityMetadataCreateParams, LookupType, }; use omicron_test_utils::dev; + use omicron_uuid_kinds::{ + ExternalIpUuid, GenericUuid as _, OmicronZoneUuid, + }; use uuid::Uuid; #[tokio::test] @@ -1481,11 +2060,44 @@ mod test { // now remove the association and we should get nothing again let authz_silo = authz::Silo::new(authz::Fleet, silo_id, LookupType::ById(silo_id)); + let q = + nexus_db_schema::schema::ip_pool_resource::dsl::ip_pool_resource + .select(IpPoolResource::as_select()) + .filter( + nexus_db_schema::schema::ip_pool_resource::dsl::resource_id + .eq(authz_silo.id()), + ) + .get_results_async( + &*datastore + .pool_connection_authorized(opctx) + .await + .unwrap(), + ) + .await + .unwrap(); + println!("{q:#?}"); datastore .ip_pool_unlink_silo(&opctx, &authz_pool1_for_silo, &authz_silo) .await .expect("Failed to unlink IP pool from silo"); + let q = + nexus_db_schema::schema::ip_pool_resource::dsl::ip_pool_resource + .select(IpPoolResource::as_select()) + .filter( + nexus_db_schema::schema::ip_pool_resource::dsl::resource_id + .eq(authz_silo.id()), + ) + .get_results_async( + &*datastore + .pool_connection_authorized(opctx) + .await + .unwrap(), + ) + .await + .unwrap(); + println!("{q:#?}"); + // no default let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); assert_matches!(error, Error::ObjectNotFound { .. }); @@ -1934,4 +2546,526 @@ mod test { db.terminate().await; logctx.cleanup_successful(); } + + #[tokio::test] + async fn paginate_ip_pools_by_reservation() { + let logctx = dev::test_setup_log("paginate_ip_pools_by_reservation"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Insert a bunch of pools, not linked to any silo, and so reserved for + // customer use. + const N_POOLS: usize = 20; + let mut customer_pools = Vec::with_capacity(N_POOLS); + for i in 0..N_POOLS { + // Create the pool + let identity = IdentityMetadataCreateParams { + name: format!("ip-pool-{i}").parse().unwrap(), + description: "".to_string(), + }; + let pool = datastore + .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .await + .expect("Failed to create IP pool"); + customer_pools.push(pool); + } + customer_pools.sort_by_key(|pool| pool.id()); + + // Create a bunch which _are_ reserved for Oxide's usage. + let mut oxide_pools = Vec::with_capacity(N_POOLS); + for i in 0..N_POOLS { + // Create the pool + let identity = IdentityMetadataCreateParams { + name: format!("oxide-ip-pool-{i}").parse().unwrap(), + description: "".to_string(), + }; + let pool = datastore + .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .await + .expect("Failed to create IP pool"); + + // Link it to the internal silo + let link = IpPoolResource { + ip_pool_id: pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: INTERNAL_SILO_ID, + is_default: false, + }; + datastore + .ip_pool_link_silo(opctx, link) + .await + .expect("Should be able to link pool to internal silo"); + oxide_pools.push(pool); + } + assert_eq!(oxide_pools.len(), N_POOLS); + + let fetch_paginated = |delegated| async move { + let mut found = Vec::with_capacity(N_POOLS); + let mut paginator = Paginator::new( + NonZeroU32::new(5).unwrap(), + dropshot::PaginationOrder::Ascending, + ); + while let Some(page) = paginator.next() { + let batch = datastore + .ip_pools_list_paginated( + opctx, + delegated, + None, + &PaginatedBy::Id(page.current_pagparams()), + ) + .await + .expect("Should be able to list pools with pagination"); + paginator = page.found_batch(&batch, &|pool| pool.id()); + found.extend(batch.into_iter()); + } + found + }; + + // Paginate all the customer-reserved. + let customer_pools_found = + fetch_paginated(PoolReservationType::External).await; + assert_eq!(customer_pools.len(), customer_pools_found.len()); + assert_eq!(customer_pools, customer_pools_found); + + // Paginate all the Oxide-reserved. + // + // Note that we have 2 extra pools today, which are the builtin service + // pools. These will go away in the future, so we'll unfortunately need + // to update this test at that time. Until then, fetch those service + // pools explicitly and add them. + let oxide_reserved_found = + fetch_paginated(PoolReservationType::Internal).await; + let pools = datastore + .ip_pools_service_lookup_both_versions(opctx) + .await + .unwrap(); + oxide_pools.push(pools.ipv4.db_pool); + oxide_pools.push(pools.ipv6.db_pool); + oxide_pools.sort_by_key(|pool| pool.id()); + assert_eq!(oxide_pools.len(), oxide_reserved_found.len()); + assert_eq!(oxide_pools, oxide_reserved_found); + + db.terminate().await; + logctx.cleanup_successful(); + } + + // Ensure we have the right query contents. + #[tokio::test] + async fn expectorate_insert_ip_pool_external_silo_link() { + let res = IpPoolResource { + ip_pool_id: uuid::uuid!("aaa84fbd-85a5-4fcb-b34f-23b7e56145c7"), + resource_type: IpPoolResourceType::Silo, + resource_id: INTERNAL_SILO_ID, + is_default: false, + }; + let query = link_ip_pool_to_silo_query(&res); + expectorate_query_contents( + &query, + "tests/output/ip_pool_external_silo_link.sql", + ) + .await; + } + + // Explain the SQL query inserting an IP Pool link to a customer silo + #[tokio::test] + async fn can_explain_link_ip_pool_to_silo_query() { + let logctx = + dev::test_setup_log("can_explain_link_ip_pool_to_silo_query"); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = pool.claim().await.unwrap(); + + let res = IpPoolResource { + ip_pool_id: uuid::uuid!("aaa84fbd-85a5-4fcb-b34f-23b7e56145c7"), + resource_type: IpPoolResourceType::Silo, + resource_id: INTERNAL_SILO_ID, + is_default: false, + }; + + let query = link_ip_pool_to_silo_query(&res); + let _ = query + .explain_async(&conn) + .await + .expect("Failed to explain query - is it valid SQL?"); + + db.terminate().await; + logctx.cleanup_successful(); + } + + // Ensure that we fail to insert a link between an external Silo and IP + // Pool, if the pool is already linked to the internal IP Pool. + #[tokio::test] + async fn cannot_link_pool_to_external_silo_if_linked_internally() { + let logctx = dev::test_setup_log( + "cannot_link_pool_to_external_silo_if_linked_internally", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create the pool + let identity = IdentityMetadataCreateParams { + name: "internal-ip-pool".parse().unwrap(), + description: "".to_string(), + }; + let ip_pool = datastore + .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .await + .expect("Failed to create IP pool"); + + // Link to the internal silo. + let internal_link = IpPoolResource { + ip_pool_id: ip_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: INTERNAL_SILO_ID, + is_default: false, + }; + let _ = datastore + .ip_pool_link_silo(&opctx, internal_link) + .await + .expect("Should be able to link unlinked pool to internal silo"); + + // We should fail to link it to some other silo now. + let link = IpPoolResource { + ip_pool_id: ip_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: uuid::uuid!("cfb16a9d-764e-4c5d-8d0d-cf737885b84a"), + is_default: false, + }; + let err = datastore.ip_pool_link_silo(&opctx, link).await.expect_err( + "Expected to fail linking an IP Pool to an external Silo \ + when it's already linked to the internal silo", + ); + println!("{err:#?}"); + + db.terminate().await; + logctx.cleanup_successful(); + } + + // Ensure that we fail to insert a link between the internal Silo and IP + // Pool, if the pool is already linked to an external IP Pool. + #[tokio::test] + async fn cannot_link_pool_to_internal_silo_if_linked_externally() { + let logctx = dev::test_setup_log( + "cannot_link_pool_to_internal_silo_if_linked_externally", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create the pool + let identity = IdentityMetadataCreateParams { + name: "external-ip-pool".parse().unwrap(), + description: "".to_string(), + }; + let ip_pool = datastore + .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .await + .expect("Failed to create IP pool"); + + // Link to an external silo. + let external_link = IpPoolResource { + ip_pool_id: ip_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: nexus_types::silo::DEFAULT_SILO_ID, + is_default: false, + }; + let _ = datastore + .ip_pool_link_silo(&opctx, external_link) + .await + .expect("Should be able to link unlinked pool to default silo"); + + // We should fail to link it the internal silo. + let link = IpPoolResource { + ip_pool_id: ip_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: INTERNAL_SILO_ID, + is_default: false, + }; + let err = datastore.ip_pool_link_silo(&opctx, link).await.expect_err( + "Expected to fail linking an IP Pool to the internal Silo \ + when it's already linked to an external silo", + ); + assert_matches!(err, Error::InvalidRequest { .. }); + + db.terminate().await; + logctx.cleanup_successful(); + } + + // Ensure that we fail to link to a silo that is deleted. + #[tokio::test] + async fn cannot_link_pool_to_deleted_silo() { + let logctx = dev::test_setup_log("cannot_link_pool_to_deleted_silo"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create the pool + let identity = IdentityMetadataCreateParams { + name: "external-ip-pool".parse().unwrap(), + description: "".to_string(), + }; + let ip_pool = datastore + .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .await + .expect("Failed to create IP pool"); + + // Delete the silo, directly to avoid a bunch of machinery. + use nexus_db_schema::schema::silo::dsl; + let c = + diesel::update(dsl::silo.find(nexus_types::silo::DEFAULT_SILO_ID)) + .set(dsl::time_deleted.eq(diesel::dsl::now)) + .execute_async( + &*datastore + .pool_connection_authorized(opctx) + .await + .unwrap(), + ) + .await + .expect("Should be able to soft-delete silo"); + assert_eq!(c, 1, "Should have deleted something"); + + // Now try link to it. + let external_link = IpPoolResource { + ip_pool_id: ip_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: nexus_types::silo::DEFAULT_SILO_ID, + is_default: false, + }; + let err = datastore + .ip_pool_link_silo(&opctx, external_link) + .await + .expect_err("Should have failed to link IP Pool to deleted Silo"); + assert_matches!(err, Error::ObjectNotFound { .. }); + + db.terminate().await; + logctx.cleanup_successful(); + } + + // Ensure that we fail to link a deleted pool to a silo. + #[tokio::test] + async fn cannot_link_silo_to_deleted_pool() { + let logctx = dev::test_setup_log("cannot_link_silo_to_deleted_pool"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create the pool + let identity = IdentityMetadataCreateParams { + name: "external-ip-pool".parse().unwrap(), + description: "".to_string(), + }; + let ip_pool = datastore + .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .await + .expect("Failed to create IP pool"); + + // Delete the pool, directly to avoid a bunch of machinery. + use nexus_db_schema::schema::ip_pool::dsl; + let c = diesel::update(dsl::ip_pool.find(ip_pool.id())) + .set(dsl::time_deleted.eq(diesel::dsl::now)) + .execute_async( + &*datastore.pool_connection_authorized(opctx).await.unwrap(), + ) + .await + .expect("Should be able to soft-delete IP Pool"); + assert_eq!(c, 1, "Should have deleted something"); + + // Now try link to it. + let external_link = IpPoolResource { + ip_pool_id: ip_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: nexus_types::silo::DEFAULT_SILO_ID, + is_default: false, + }; + let err = datastore + .ip_pool_link_silo(&opctx, external_link) + .await + .expect_err("Should have failed to link deleted IP Pool to Silo"); + assert_matches!(err, Error::ObjectNotFound { .. }); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn can_explain_unlink_ip_pool_from_external_silo_query() { + let logctx = dev::test_setup_log( + "can_explain_unlink_ip_pool_from_external_silo_query", + ); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = pool.claim().await.unwrap(); + + let ip_pool_id = uuid::uuid!("aaa84fbd-85a5-4fcb-b34f-23b7e56145c7"); + let silo_id = nexus_types::silo::DEFAULT_SILO_ID; + let query = + unlink_ip_pool_from_external_silo_query(ip_pool_id, silo_id); + let _ = query + .explain_async(&conn) + .await + .expect("Failed to explain query - is it valid SQL?"); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn can_explain_unlink_ip_pool_from_internal_silo_query() { + let logctx = dev::test_setup_log( + "can_explain_unlink_ip_pool_from_internal_silo_query", + ); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = pool.claim().await.unwrap(); + + let ip_pool_id = uuid::uuid!("aaa84fbd-85a5-4fcb-b34f-23b7e56145c7"); + let query = unlink_ip_pool_from_internal_silo_query(ip_pool_id); + let _ = query + .explain_async(&conn) + .await + .expect("Failed to explain query - is it valid SQL?"); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn cannot_unlink_last_linked_internal_ip_pool() { + let logctx = + dev::test_setup_log("cannot_unlink_last_linked_internal_ip_pool"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Get some basic data. + let (authz_silo, _) = + nexus_db_lookup::LookupPath::new(opctx, datastore) + .silo_id(INTERNAL_SILO_ID) + .fetch() + .await + .unwrap(); + let pools = datastore + .ip_pools_service_lookup_both_versions(opctx) + .await + .unwrap(); + + // We should be able to delete one of these. + let _ = datastore.ip_pool_unlink_silo( + opctx, + &pools.ipv4.authz_pool, + &authz_silo, + ).await + .expect("Should be able to delete internal IP Pool when more than one remains"); + + let pagparams = &PaginatedBy::Id(DataPageParams { + marker: None, + direction: dropshot::PaginationOrder::Ascending, + limit: 100.try_into().unwrap(), + }); + let l = datastore + .ip_pools_service_list(opctx, None, &pagparams) + .await + .unwrap(); + assert_eq!(l.len(), 1); + + // We should _not_ be able to delete the other now, because there's only + // one left. + let err = datastore.ip_pool_unlink_silo( + opctx, + &pools.ipv6.authz_pool, + &authz_silo, + ).await + .expect_err("Should not be able to delete internal IP Pool when only one remains"); + assert_matches!(err, Error::InvalidRequest { .. }); + + let l = datastore + .ip_pools_service_list(opctx, None, &pagparams) + .await + .unwrap(); + assert_eq!(l.len(), 1); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn cannot_unlink_internal_ip_pool_with_outstanding_external_ips() { + let logctx = dev::test_setup_log( + "cannot_unlink_internal_ip_pool_with_outstanding_external_ips", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Get some basic data. + let (authz_silo, _) = + nexus_db_lookup::LookupPath::new(opctx, datastore) + .silo_id(INTERNAL_SILO_ID) + .fetch() + .await + .unwrap(); + + // Get pool, add a range, allocate an external IP. + let pools = datastore + .ip_pools_service_lookup_both_versions(opctx) + .await + .unwrap(); + let ip_range = IpRange::V4(Ipv4Range { + first: Ipv4Addr::new(1, 1, 1, 1), + last: Ipv4Addr::new(1, 1, 1, 10), + }); + datastore + .ip_pool_add_range( + opctx, + &pools.ipv4.authz_pool, + &pools.ipv4.db_pool, + &ip_range, + ) + .await + .unwrap(); + + // Create an IP for an Omicron zone. + let eip = datastore + .external_ip_allocate_omicron_zone( + opctx, + OmicronZoneUuid::from_untyped_uuid(uuid::uuid!( + "b7b641d6-f52c-4fd5-b5a5-66ac3918c8b4" + )), + ZoneKind::BoundaryNtp, + OmicronZoneExternalIp::Floating( + OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::from_untyped_uuid(uuid::uuid!( + "4a7f86aa-5cab-42dd-afa5-7eee6304cb8c" + )), + ip: ip_range.first_address(), + }, + ), + ) + .await + .expect("Should be able to create zone external IP"); + + // Should not be able to delete the IPv4 pool now, since we've got an + // address in use. + let _ = datastore.ip_pool_unlink_silo( + opctx, + &pools.ipv4.authz_pool, + &authz_silo, + ).await + .expect_err("Should not be able to delete internal IP Pool when an address is in use"); + + // Delete the address, and now we can delete the link. + let _ = datastore + .deallocate_external_ip(opctx, eip.id) + .await + .expect("Should be able to delete external IP"); + + // We should be able to delete one of these. + let _ = datastore.ip_pool_unlink_silo( + opctx, + &pools.ipv4.authz_pool, + &authz_silo, + ).await + .expect( + "Should be able to delete internal IP Pool when more than one remains, \ + after deleting external IP address" + ); + + db.terminate().await; + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index e4e559ce755..6059a30e008 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -675,8 +675,12 @@ impl DataStore { opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; - // We may need to populate external IP records for both IPv4 and IPv6 - // service pools, so fetch both now. + // The `RackInit` request will eventually be modified to include the + // full details of the IP Pool(s) delegated to Oxide at RSS time. For + // now, we still rely on the pre-populated IP Pools. There's one for + // IPv4 and one for IPv6. + // + // See https://github.com/oxidecomputer/omicron/issues/8946. let service_ip_pools = self.ip_pools_service_lookup_both_versions(&opctx).await?; diff --git a/nexus/db-queries/src/db/queries/oximeter.rs b/nexus/db-queries/src/db/queries/oximeter.rs index cea319ccc5d..c04f28a5d13 100644 --- a/nexus/db-queries/src/db/queries/oximeter.rs +++ b/nexus/db-queries/src/db/queries/oximeter.rs @@ -5,8 +5,9 @@ //! Implementation of queries for Oximeter collectors and producers. use crate::db::column_walker::AllColumnsOf; -use crate::db::raw_query_builder::{QueryBuilder, TypedSqlQuery}; -use diesel::pg::Pg; +use crate::db::raw_query_builder::{ + QueryBuilder, SelectableSql, TypedSqlQuery, +}; use diesel::sql_types; use ipnetwork::IpNetwork; use nexus_db_model::{OximeterInfo, ProducerKind, SqlU16}; @@ -16,9 +17,6 @@ use uuid::Uuid; type AllColumnsOfOximeterInfo = AllColumnsOf; -type SelectableSql = < - >::SelectExpression as diesel::Expression ->::SqlType; /// Upsert a metric producer. /// diff --git a/nexus/db-queries/src/db/raw_query_builder.rs b/nexus/db-queries/src/db/raw_query_builder.rs index 2c42177aa13..f855dbd6ec9 100644 --- a/nexus/db-queries/src/db/raw_query_builder.rs +++ b/nexus/db-queries/src/db/raw_query_builder.rs @@ -195,3 +195,10 @@ pub async fn expectorate_query_contents>( expectorate::assert_contents(path, &s); } + +/// Type alias for the SQL type of a selectable expression. +/// +/// This is useful for the return type of a query built using [`QueryBuilder`]. +pub type SelectableSql = < + >::SelectExpression as diesel::Expression +>::SqlType; diff --git a/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql b/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql new file mode 100644 index 00000000000..a5f0cba9fb8 --- /dev/null +++ b/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql @@ -0,0 +1,32 @@ +WITH + ip_pool AS (SELECT id FROM ip_pool WHERE id = $1 AND time_deleted IS NULL), + silo AS (SELECT id FROM silo WHERE id = $2 AND time_deleted IS NULL), + existing_linked_silo + AS ( + SELECT resource_id FROM ip_pool_resource WHERE ip_pool_id = $3 AND resource_type = $4 LIMIT 1 + ) +INSERT +INTO + ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) +SELECT + CAST(COALESCE(CAST(ip.id AS STRING), 'ip-pool-deleted') AS UUID), + $5, + CAST( + CASE + WHEN s.id IS NULL THEN 'silo-deleted' + WHEN (s.id = $6 AND els.resource_id = $7) + OR (s.id != $8 AND els.resource_id != $9) + OR els.resource_id IS NULL + THEN $10 + ELSE 'bad-link-type' + END + AS UUID + ), + $11 +FROM + (SELECT 1) AS dummy + LEFT JOIN ip_pool AS ip ON true + LEFT JOIN silo AS s ON true + LEFT JOIN existing_linked_silo AS els ON true +RETURNING + * diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index fa6fa2839ec..881ef3eb817 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -6,6 +6,7 @@ use std::net::Ipv4Addr; +use crate::integration_tests::instances::create_project_and_pool; use crate::integration_tests::instances::instance_wait_for_state; use dropshot::HttpErrorResponseBody; use dropshot::ResultsPage; @@ -20,6 +21,7 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::resource_helpers::assert_ip_pool_utilization; +use nexus_test_utils::resource_helpers::create_floating_ip; use nexus_test_utils::resource_helpers::create_instance; use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_local_user; @@ -584,6 +586,100 @@ async fn test_ip_pool_silo_link(cptestctx: &ControlPlaneTestContext) { object_delete(client, "/v1/system/ip-pools/p1").await; } +#[nexus_test] +async fn cannot_unlink_ip_pool_with_outstanding_instance_ips( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let apictx = &cptestctx.server.server_context(); + let nexus = &apictx.nexus; + + // Create a project and add a default IP Pool. + const POOL_NAME: &str = "default"; + let proj = create_project_and_pool(client).await; + + // Create an instance, which allocates an IP address. + const INSTANCE_NAME: &str = "myinst"; + let instance = + create_instance(client, proj.name().as_str(), INSTANCE_NAME).await; + + // We cannot delete the link between the silo and pool until the instance is + // gone. + let url = format!( + "/v1/system/ip-pools/{}/silos/{}", + POOL_NAME, + DEFAULT_SILO.name().as_str() + ); + object_delete_error(client, &url, StatusCode::BAD_REQUEST).await; + + // Stop the instance and wait until it's actually stopped. + let instance_stop_url = format!( + "/v1/instances/{}/stop?project={}", + INSTANCE_NAME, + proj.name().as_str(), + ); + NexusRequest::new( + RequestBuilder::new(client, Method::POST, &instance_stop_url) + .body(None as Option<&serde_json::Value>) + .expect_status(Some(StatusCode::ACCEPTED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Failed to stop instance"); + + // Simulate the transition, wait until it is in fact stopped. + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); + let info = nexus + .active_instance_info(&instance_id, None) + .await + .unwrap() + .expect("running instance should be on a sled"); + info.sled_client.vmm_finish_transition(info.propolis_id).await; + instance_wait_for_state(client, instance_id, InstanceState::Stopped).await; + + // Now that it's stopped, delete the instance, and then assert that we can + // actually unlink the IP Pool from the Silo. + object_delete(client, &format!("/v1/instances/{}", instance_id)).await; + object_delete(client, &url).await; +} + +#[nexus_test] +async fn cannot_unlink_ip_pool_with_outstanding_floating_ips( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Create a project and add a default IP Pool. + const POOL_NAME: &str = "default"; + let proj = create_project_and_pool(client).await; + + // Create a floating IP from the pool. + let fip = create_floating_ip( + client, + "fip", + proj.name().as_str(), + None, + Some(POOL_NAME), + ) + .await; + + // We cannot delete the link between the silo and pool until the floating IP + // has been deleted. + let url = format!( + "/v1/system/ip-pools/{}/silos/{}", + POOL_NAME, + DEFAULT_SILO.name().as_str() + ); + object_delete_error(client, &url, StatusCode::BAD_REQUEST).await; + + // Now delete the FIP. + object_delete(client, &format!("/v1/floating-ips/{}", fip.id())).await; + + // Now we can do the dew. + object_delete(client, &url).await; +} + /// Non-discoverable silos can be linked to a pool, but they do not show up /// in the list of silos for that pool, just as they do not show up in the /// top-level list of silos From 05dfc8007045c9d09500d6d679ea6095aca20251 Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Thu, 2 Oct 2025 21:58:11 +0000 Subject: [PATCH 2/8] Track IP Pool delegation directly in the `ip_pool` table - Add an `is_delegated` column to the `ip_pool` table - Enforce that pools are delegated XOR linkable to customer silos by looking at this table and using conditional update queries. --- nexus/db-model/src/ip_pool.rs | 29 + nexus/db-model/src/schema_versions.rs | 3 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 1028 +++++++---------- nexus/db-queries/src/db/datastore/rack.rs | 31 +- .../output/ip_pool_external_silo_link.sql | 35 +- nexus/db-schema/src/schema.rs | 1 + .../add-ip-pool-is-delegated-column/up01.sql | 3 + .../add-ip-pool-is-delegated-column/up02.sql | 4 + .../add-ip-pool-is-delegated-column/up03.sql | 3 + .../add-ip-pool-is-delegated-column/up04.sql | 2 + schema/crdb/dbinit.sql | 17 +- 11 files changed, 470 insertions(+), 686 deletions(-) create mode 100644 schema/crdb/add-ip-pool-is-delegated-column/up01.sql create mode 100644 schema/crdb/add-ip-pool-is-delegated-column/up02.sql create mode 100644 schema/crdb/add-ip-pool-is-delegated-column/up03.sql create mode 100644 schema/crdb/add-ip-pool-is-delegated-column/up04.sql diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index d575aa38486..83ab169de7b 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -91,9 +91,15 @@ pub struct IpPool { /// Child resource generation number, for optimistic concurrency control of /// the contained ranges. pub rcgen: i64, + + /// True if the IP Pool has been delegated for Oxide use. + pub is_delegated: bool, } impl IpPool { + /// Create a new IP Pool. + /// + /// The pool is not delegated to Oxide. pub fn new( pool_identity: &external::IdentityMetadataCreateParams, ip_version: IpVersion, @@ -105,15 +111,38 @@ impl IpPool { ), ip_version, rcgen: 0, + is_delegated: false, + } + } + + /// Create a new pool delegated for Oxide's internal use. + pub fn new_delegated( + pool_identity: &external::IdentityMetadataCreateParams, + ip_version: IpVersion, + ) -> Self { + Self { + identity: IpPoolIdentity::new( + Uuid::new_v4(), + pool_identity.clone(), + ), + ip_version, + rcgen: 0, + is_delegated: true, } } + /// Create a new IPv4 IP Pool. + /// + /// The pool is not delegated to Oxide. pub fn new_v4( pool_identity: &external::IdentityMetadataCreateParams, ) -> Self { Self::new(pool_identity, IpVersion::V4) } + /// Create a new IPv6 IP Pool. + /// + /// The pool is not delegated to Oxide. pub fn new_v6( pool_identity: &external::IdentityMetadataCreateParams, ) -> Self { diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 7b5281a7a60..7a28bf883b9 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(193, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(194, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(194, "add-ip-pool-is-delegated-column"), KnownVersion::new(193, "nexus-lockstep-port"), KnownVersion::new(192, "blueprint-source"), KnownVersion::new(191, "debug-log-blueprint-planner"), diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 189a0cd04fb..7e379ed54c5 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -92,16 +92,6 @@ impl ServiceIpPools { } } -// Constraint used to ensure we don't set a default IP Pool for the internal -// silo. -const INTERNAL_SILO_DEFAULT_CONSTRAINT: &'static str = - "internal_silo_has_no_default_pool"; - -// Error message emitted when we attempt to set a default IP Pool for the -// internal silo. -const INTERNAL_SILO_DEFAULT_ERROR: &'static str = - "The internal Silo cannot have a default IP Pool"; - // Error message emitted when a user attempts to link an IP Pool and internal // Silo, but the pool is already linked to an external Silo, or vice versa. const BAD_SILO_LINK_ERROR: &str = "IP Pools cannot be both linked to external \ @@ -114,168 +104,40 @@ const POOL_HAS_IPS_ERROR: &str = // Error message emitted when a user attempts to unlink an IP Pool from the // Oxide internal Silo, without at least one other IP Pool linked to it. -const LAST_POOL_ERROR: &str = "IP Pool delegated to Oxide internal usage \ - cannot be unlinked without at least one other IP Pool delegated. Create \ - and delegate at least one more IP Pool before deleting this one."; - -/// Helper trait for checking if an argument to a Silo <-> IP Pool linking -/// operation refers to the Oxide internal Silo. -trait IsInternalSilo { - fn is_internal_silo(&self) -> bool; -} - -impl IsInternalSilo for authz::Silo { - fn is_internal_silo(&self) -> bool { - self.id() == INTERNAL_SILO_ID - } -} - -/// Describes how IP Pools are reserved. -#[derive(Clone, Copy)] -enum PoolReservationType { - /// The pool is reserved for Oxide's internal usage. - Internal, - /// The pool is reserved for external, customer usage. - External, -} - -// TODO-cleanup: This is essentially the `paginated` function in macro form. -// It's extremely time-consuming to figure out the correct trait bounds which -// allow that function to work on the result of a join query. We could try to -// factor that out if it becomes useful, but since this is only used directly in -// this module, it's fine as-is for now. -macro_rules! paginated_table_like { - ($query:expr, $pagparams:expr) => { - match $pagparams { - ::omicron_common::api::external::http_pagination::PaginatedBy::Id(by_id) => { - let maybe_marker = by_id.marker.copied(); - match by_id.direction { - ::dropshot::PaginationOrder::Ascending => { - if let Some(marker) = maybe_marker { - $query = $query.filter(::nexus_db_schema::schema::ip_pool::id.gt(marker)); - } - $query.order(::nexus_db_schema::schema::ip_pool::id.asc()) - } - ::dropshot::PaginationOrder::Descending => { - if let Some(marker) = maybe_marker { - $query = $query.filter(::nexus_db_schema::schema::ip_pool::id.lt(marker)); - } - $query.order(::nexus_db_schema::schema::ip_pool::id.desc()) - } - } - } - ::omicron_common::api::external::http_pagination::PaginatedBy::Name(by_name) => { - let maybe_marker = by_name.marker.map(|n| Name::ref_cast(n)).cloned(); - match by_name.direction { - ::dropshot::PaginationOrder::Ascending => { - if let Some(marker) = maybe_marker { - $query = $query.filter(::nexus_db_schema::schema::ip_pool::name.gt(marker)); - } - $query.order(::nexus_db_schema::schema::ip_pool::name.asc()) - } - ::dropshot::PaginationOrder::Descending => { - if let Some(marker) = maybe_marker { - $query = $query.filter(::nexus_db_schema::schema::ip_pool::name.lt(marker)); - } - $query.order(::nexus_db_schema::schema::ip_pool::name.desc()) - } - } - } - } - }; -} +const LAST_POOL_ERROR: &str = "Cannot delete the last IP Pool delegated to \ + Oxide internal usage. Create and delegate at least one more IP Pool \ + before deleting this one."; impl DataStore { - /// List IP Pools by their reservation type and optionally IP version, paginated. + /// List IP Pools by their delegation state and optionally IP version, paginated. async fn ip_pools_list_paginated( &self, opctx: &OpContext, - reservation: PoolReservationType, + is_delegated: bool, version: Option, pagparams: &PaginatedBy<'_>, ) -> ListResultVec { + use nexus_db_schema::schema::ip_pool; opctx .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; - match reservation { - PoolReservationType::Internal => { - self.list_ip_pools_for_internal(opctx, version, pagparams).await + let mut query = match pagparams { + PaginatedBy::Id(by_id) => { + paginated(ip_pool::table, ip_pool::id, by_id) } - PoolReservationType::External => { - self.list_ip_pools_for_external(opctx, version, pagparams).await - } - } - } - - /// List IP Pools reserved for Oxide. - async fn list_ip_pools_for_internal( - &self, - opctx: &OpContext, - version: Option, - pagparams: &PaginatedBy<'_>, - ) -> ListResultVec { - use nexus_db_schema::schema::ip_pool; - use nexus_db_schema::schema::ip_pool_resource; - // IP Pools reserved for internal usage are, by definition, linked to - // the internal Silo. So we can do a natural join between the `ip_pool` - // and `ip_pool_resource` table, filtering to those pools linked to our - // silo. - let mut query = ip_pool::table - .inner_join( - ip_pool_resource::table - .on(ip_pool::id.eq(ip_pool_resource::ip_pool_id)), - ) - .filter( - ip_pool_resource::resource_type - .eq(IpPoolResourceType::Silo) - .and(ip_pool_resource::resource_id.eq(INTERNAL_SILO_ID)), - ) - .into_boxed(); - let query = paginated_table_like!(query, pagparams); - let query = match version { - Some(ver) => query.filter(ip_pool::ip_version.eq(ver)), - None => query, + PaginatedBy::Name(by_name) => paginated( + ip_pool::table, + ip_pool::name, + &by_name.map_name(|n| Name::ref_cast(n)), + ), }; - query - .filter(ip_pool::time_deleted.is_null()) - .limit(pagparams.limit().get().into()) - .select(IpPool::as_select()) - .get_results_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } - - /// List IP Pools reserved for external usage. - async fn list_ip_pools_for_external( - &self, - opctx: &OpContext, - version: Option, - pagparams: &PaginatedBy<'_>, - ) -> ListResultVec { - use nexus_db_schema::schema::ip_pool; - use nexus_db_schema::schema::ip_pool_resource; - // IP Pools reserved for external usage are either unlinked to any Silo, - // or linked to a Silo other than the internal one. Use a left join to - // express that. - let mut query = ip_pool::table - .left_join( - ip_pool_resource::table.on(ip_pool::id - .eq(ip_pool_resource::ip_pool_id) - .and( - ip_pool_resource::resource_type - .eq(IpPoolResourceType::Silo), - ) - .and(ip_pool_resource::resource_id.eq(INTERNAL_SILO_ID))), - ) - .filter(ip_pool_resource::resource_id.is_null()) - .into_boxed(); - let query = paginated_table_like!(query, pagparams); - let query = match version { + query = match version { Some(ver) => query.filter(ip_pool::ip_version.eq(ver)), None => query, }; query .filter(ip_pool::time_deleted.is_null()) + .filter(ip_pool::is_delegated.eq(is_delegated)) .limit(pagparams.limit().get().into()) .select(IpPool::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) @@ -284,18 +146,14 @@ impl DataStore { } /// List IP Pools + /// + /// This returns the pools available for external customer use. pub async fn ip_pools_list( &self, opctx: &OpContext, pagparams: &PaginatedBy<'_>, ) -> ListResultVec { - self.ip_pools_list_paginated( - opctx, - PoolReservationType::External, - None, - pagparams, - ) - .await + self.ip_pools_list_paginated(opctx, false, None, pagparams).await } /// Look up whether the given pool is available to users in the current @@ -400,20 +258,14 @@ impl DataStore { }) } - /// List IP Pools reserved for Oxide's internal usage. - pub async fn ip_pools_service_list( + /// List IP Pools delegated for Oxide's internal usage. + pub async fn ip_pools_delegated_list( &self, opctx: &OpContext, version: Option, pagparams: &PaginatedBy<'_>, ) -> ListResultVec { - self.ip_pools_list_paginated( - opctx, - PoolReservationType::Internal, - version, - pagparams, - ) - .await + self.ip_pools_list_paginated(opctx, true, version, pagparams).await } /// Look up internal service IP Pools for both IP versions. @@ -441,7 +293,7 @@ impl DataStore { /// /// This method may require an index by Availability Zone in the future. // - // TODO-remove: Use list_ip_pools_for_internal instead. + // TODO-remove: Use ip_pools_delegated_list instead. // // See https://github.com/oxidecomputer/omicron/issues/8947. pub async fn ip_pools_service_lookup( @@ -485,7 +337,8 @@ impl DataStore { /// Delete an IP Pool, and any links between it an any Silos. /// - /// This fails if there are still IP Ranges in the pool. + /// This fails if there are still IP Ranges in the pool, or if we're + /// deleting the last delegated pool. pub async fn ip_pool_delete( &self, opctx: &OpContext, @@ -515,6 +368,15 @@ impl DataStore { )); } + // Add a small subquery, if needed, to ensure that we don't delete this + // IP Pool if it's the last delegated pool. There has to always be at + // least one of these. + let ensure_delegated = if db_pool.is_delegated { + diesel::dsl::sql::(ENSURE_DELEGATED_COUNT_SUBQUERY) + } else { + diesel::dsl::sql::("TRUE") + }; + // Delete the pool, conditional on the rcgen not having changed. This // protects the delete from occuring if clients created a new IP range // in between the above check for children and this query. @@ -523,14 +385,21 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::rcgen.eq(db_pool.rcgen)) + .filter(ensure_delegated) .set(dsl::time_deleted.eq(now)) .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel( + .map_err(|e| match e { + DieselError::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, + ) if info.message().ends_with("invalid bool value") => { + Error::invalid_request(LAST_POOL_ERROR) + } + _ => public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_pool), - ) + ), })?; if updated_rows == 0 { @@ -566,25 +435,17 @@ impl DataStore { authz_pool: &authz::IpPool, ) -> LookupResult { use nexus_db_schema::schema::ip_pool; - ip_pool::table - .filter(ip_pool::id.eq(authz_pool.id())) - .filter( - ip_pool::name - .eq(SERVICE_IPV4_POOL_NAME) - .or(ip_pool::name.eq(SERVICE_IPV6_POOL_NAME)), - ) + .find(authz_pool.id()) .filter(ip_pool::time_deleted.is_null()) - .select(ip_pool::id) - .first_async::( + .select(ip_pool::is_delegated) + .first_async::( &*self.pool_connection_authorized(opctx).await?, ) .await .optional() - // if there is a result, the pool is associated with the internal silo, - // which makes it the internal pool - .map(|result| Ok(result.is_some())) - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .map(|result| result.unwrap_or(false)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn ip_pool_update( @@ -611,6 +472,82 @@ impl DataStore { }) } + /// Delegate an IP Pool for Oxide internal use. + pub async fn ip_pool_delegate( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + db_pool: &IpPool, + ) -> UpdateResult<()> { + if db_pool.is_delegated { + return Err(Error::invalid_request("IP Pool is already delegated")); + } + let n_rows = delegate_ip_pool_query(authz_pool.id()) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| match e { + DieselError::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, + ) if info.message().ends_with("invalid bool value") => { + Error::invalid_request(BAD_SILO_LINK_ERROR) + } + _ => public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_pool), + ), + })?; + if n_rows == 0 { + Err(Error::invalid_request( + "update failed due to concurrent modification", + )) + } else { + Ok(()) + } + } + + /// Revoke an IP Pool previously delegated for Oxide internal use. + pub async fn ip_pool_revoke( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + db_pool: &IpPool, + ) -> UpdateResult<()> { + if !db_pool.is_delegated { + return Err(Error::invalid_request( + "Cannot revoke a pool that has not been previously delegated", + )); + } + let n_rows = revoke_ip_pool_query(authz_pool.id()) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| match e { + DieselError::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, + ) => { + if info.message().ends_with("invalid bool value") { + Error::invalid_request(LAST_POOL_ERROR) + } else if info.message().contains("division by zero") { + Error::invalid_request(POOL_HAS_IPS_ERROR) + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + } + _ => public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_pool), + ), + })?; + if n_rows == 0 { + Err(Error::invalid_request( + "update failed to due concurrent modification", + )) + } else { + Ok(()) + } + } + /// Return the number of IPs allocated from and the capacity of the provided /// IP Pool. pub async fn ip_pool_utilization( @@ -804,23 +741,22 @@ impl DataStore { opctx: &OpContext, ip_pool_resource: IpPoolResource, ) -> CreateResult { + if ip_pool_resource.resource_id == INTERNAL_SILO_ID { + return Err(Error::internal_error( + "IP Pools should not be linked to the internal silo. \ + Set the `is_delegated` column to true instead.", + )); + } opctx .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; let conn = self.pool_connection_authorized(opctx).await?; - let result = link_ip_pool_to_silo_query(&ip_pool_resource) + let result = link_ip_pool_to_external_silo_query(&ip_pool_resource) .get_result_async(&*conn) .await .map_err(|e| { match e { - // Catch the check constraint ensuring the internal silo has - // no default pool - DieselError::DatabaseError(DatabaseErrorKind::CheckViolation, ref info) - if info.constraint_name() == Some(INTERNAL_SILO_DEFAULT_CONSTRAINT) => - { - Error::invalid_request(INTERNAL_SILO_DEFAULT_ERROR) - } DieselError::DatabaseError(DatabaseErrorKind::UniqueViolation, _) => { public_error_from_diesel( e, @@ -1149,48 +1085,18 @@ impl DataStore { IpPoolResourceUpdateError::FailedToUnsetDefault(err), )) => public_error_from_diesel(err, ErrorHandler::Server), Some(TxnError::Database(err)) => { - match err { - // Catch the check constraint ensuring the internal silo has - // no default pool - DieselError::DatabaseError( - DatabaseErrorKind::CheckViolation, - ref info, - ) if info.constraint_name() - == Some(INTERNAL_SILO_DEFAULT_CONSTRAINT) => - { - Error::invalid_request(INTERNAL_SILO_DEFAULT_ERROR) - } - _ => { - public_error_from_diesel(err, ErrorHandler::Server) - } - } - } - None => { - match e { - // Catch the check constraint ensuring the internal silo has - // no default pool - DieselError::DatabaseError( - DatabaseErrorKind::CheckViolation, - ref info, - ) if info.constraint_name() - == Some(INTERNAL_SILO_DEFAULT_CONSTRAINT) => - { - Error::invalid_request(INTERNAL_SILO_DEFAULT_ERROR) - } - _ => public_error_from_diesel( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::IpPoolResource, - // TODO: would be nice to put the actual names and/or ids in - // here but LookupType on each of the two silos doesn't have - // a nice to_string yet or a way of composing them - LookupType::ByCompositeId( - "(pool, silo)".to_string(), - ), - ), - ), - } + public_error_from_diesel(err, ErrorHandler::Server) } + None => public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::IpPoolResource, + // TODO: would be nice to put the actual names and/or ids in + // here but LookupType on each of the two silos doesn't have + // a nice to_string yet or a way of composing them + LookupType::ByCompositeId("(pool, silo)".to_string()), + ), + ), }) } @@ -1205,15 +1111,18 @@ impl DataStore { opctx.authorize(authz::Action::Modify, authz_pool).await?; opctx.authorize(authz::Action::Modify, authz_silo).await?; - let conn = self.pool_connection_authorized(opctx).await?; - if authz_silo.is_internal_silo() { - unlink_ip_pool_from_internal_silo_query(authz_pool.id()) - } else { - unlink_ip_pool_from_external_silo_query( - authz_pool.id(), - authz_silo.id(), - ) + if authz_silo.id() == INTERNAL_SILO_ID { + return Err(Error::internal_error( + "Cannot unlink a delegated IP Pool. \ + Use the `is_delegated` column instead.", + )); } + + let conn = self.pool_connection_authorized(opctx).await?; + unlink_ip_pool_from_external_silo_query( + authz_pool.id(), + authz_silo.id(), + ) .execute_async(&*conn) .await .map_err(|e| match e { @@ -1222,22 +1131,12 @@ impl DataStore { ref info, ) => { let msg = info.message(); - // Handle intentional parsing errors. - if msg.contains("could not parse") { - // Intentional bool-parsing error, which we use to detect - // when there are still external IPs in the Silo. - if msg.ends_with("invalid bool value") { - Error::invalid_request(POOL_HAS_IPS_ERROR) - // Intentional int-parsing error, which we use to detect - // attempting to unlink the last IP Pool linked to the - // internal Oxide Silo. - } else if msg.contains("strconv.ParseInt") - && msg.ends_with("invalid syntax") - { - Error::invalid_request(LAST_POOL_ERROR) - } else { - public_error_from_diesel(e, ErrorHandler::Server) - } + // Intentional bool-parsing error, which we use to detect + // when there are still external IPs in the Silo. + if msg.contains("could not parse") + && msg.ends_with("invalid bool value") + { + Error::invalid_request(POOL_HAS_IPS_ERROR) } else { public_error_from_diesel(e, ErrorHandler::Server) } @@ -1491,30 +1390,28 @@ const IP_POOL_DELETED_SENTINEL: &str = "ip-pool-deleted"; // between selecting it and trying to insert the link. const SILO_DELETED_SENTINEL: &str = "silo-deleted"; -// Query to conditionally link an IP Pool to a Silo. +// Query to conditionally link an IP Pool to an external customer Silo. // // This method returns a SQL query to conditionally insert a link between an IP -// Pool and a Silo. It maintains the invariant that an IP Pool can be linked to -// any number of customer silos, OR the Oxide-internal Silo, but not both. It -// also internally checks that a previously-fetched IP Pool and Silo still exist -// at the time the query is run. +// Pool and a Silo. It maintains the invariant that a pool can be delegated for +// Oxide internal usage XOR linked to customer silos. It also checks that the +// pool and silo still exist when the query is run. // // The full query is: // // ```sql // WITH // -- Select the IP Pool by ID, used to ensure it still exists when we run -// -- this query. -// ip_pool AS (SELECT id FROM ip_pool WHERE id = $1 AND time_deleted IS NULL), +// -- this query. Also select the delegation state, and fail if the pool is +// -- currently delegated to Oxide. +// ip_pool AS ( +// SELECT CAST(IF(is_delegated, 'bad-link-type', 'id') AS UUID) +// FROM ip_pool +// WHERE id = $1 AND time_deleted IS NULL +// ) // -- Select the Silo by ID, used to ensure it still exists when we run this // -- query // silo AS (SELECT id FROM silo WHERE id = $2 AND time_deleted IS NULL), -// -- Get the ID of any Silo this pool is currently linked to. This is used to -// -- verify that we don't link a pool to both an internal and external silo. -// existing_linked_silo -// AS ( -// SELECT resource_id FROM ip_pool_resource WHERE ip_pool_id = $3 AND resource_type = $4 LIMIT 1 -// ) // INSERT // INTO // ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) @@ -1525,48 +1422,29 @@ const SILO_DELETED_SENTINEL: &str = "silo-deleted"; // CAST(COALESCE(CAST(ip.id AS STRING), 'ip-pool-deleted') AS UUID), // -- The resource type, always 'silo' here. // $5, -// -- Check that we don't link a pool to both an internal and external silo. -// CAST( -// CASE -// -- If the Silo was actually deleted, return a const that fails to cast to -// -- a UUID. This is the "true or cast error" trick. -// WHEN s.id IS NULL THEN 'silo-deleted' -// -- The main check, which is true if -// -- 1. The Silo we're trying to link and the existing link are both the -// -- internal Silo ID. -// -- 2. The Silo we're trying to link and any existing link are _not_ the -// -- internal Silo ID. -// -- 3. There is no existing link. -// -- Logically this is equivalent to checking that the pool is either -// -- unlinked, or linked only to the internal silo or only external silos, -// -- but not both. -// -- -// -- In that case, use the input Silo ID. -// WHEN (s.id = $6 AND els.resource_id = $7) -// OR (s.id != $8 AND els.resource_id != $9) -// OR els.resource_id IS NULL -// THEN $10 -// -- If the above fails, it's because we're trying to link a pool to both -// -- an internal and external silo. Generate an un-castable string. -// ELSE 'bad-link-type' -// END -// AS UUID -// ), +// -- If the silo exists, take its ID as a string. If it does not exist, take +// -- the string `'silo-deleted'`. Attempt to cast the result to a UUID. +// -- This is the "true or cast error" trick we use in many places. +// CAST(COALESCE(CAST(s.id AS STRING), 'silo-deleted') AS UUID), // $11 // FROM // (SELECT 1) AS dummy // LEFT JOIN ip_pool AS ip ON true // LEFT JOIN silo AS s ON true -// LEFT JOIN existing_linked_silo AS els ON true // RETURNING // * // ``` -fn link_ip_pool_to_silo_query( +fn link_ip_pool_to_external_silo_query( ip_pool_resource: &IpPoolResource, ) -> TypedSqlQuery> { let mut builder = QueryBuilder::new(); builder - .sql("WITH ip_pool AS (SELECT id FROM ip_pool WHERE id = ") + .sql("WITH ip_pool AS (SELECT CAST(IF(is_delegated, '") + .sql(BAD_SILO_LINK_SENTINEL) + .sql("', ") + .param() + .bind::(ip_pool_resource.ip_pool_id.to_string()) + .sql(") AS UUID) AS id FROM ip_pool WHERE id = ") .param() .bind::(ip_pool_resource.ip_pool_id) .sql( @@ -1577,22 +1455,7 @@ fn link_ip_pool_to_silo_query( .param() .bind::(ip_pool_resource.resource_id) .sql( - " \ - AND time_deleted IS NULL), \ - existing_linked_silo AS (\ - SELECT resource_id \ - FROM ip_pool_resource \ - WHERE ip_pool_id = ", - ) - .param() - .bind::(ip_pool_resource.ip_pool_id) - .sql(" AND resource_type = ") - .param() - .bind::( - ip_pool_resource.resource_type, - ) - .sql( - " LIMIT 1) \ + " AND time_deleted IS NULL) \ INSERT INTO ip_pool_resource (\ ip_pool_id, \ resource_type, \ @@ -1606,34 +1469,15 @@ fn link_ip_pool_to_silo_query( .bind::( ip_pool_resource.resource_type, ) - .sql(", CAST(CASE ") - .sql("WHEN s.id IS NULL THEN '") + .sql(", CAST(COALESCE(CAST(s.id AS STRING), '") .sql(SILO_DELETED_SENTINEL) - .sql("' WHEN (s.id = ") - .param() - .bind::(INTERNAL_SILO_ID) - .sql(" AND els.resource_id = ") - .param() - .bind::(INTERNAL_SILO_ID) - .sql(") OR (s.id != ") - .param() - .bind::(INTERNAL_SILO_ID) - .sql(" AND els.resource_id != ") - .param() - .bind::(INTERNAL_SILO_ID) - .sql(") OR els.resource_id IS NULL THEN ") - .param() - .bind::(ip_pool_resource.resource_id.to_string()) - .sql(" ELSE '") - .sql(BAD_SILO_LINK_SENTINEL) - .sql("' END AS UUID), ") + .sql("') AS UUID), ") .param() .bind::(ip_pool_resource.is_default) .sql( " FROM (SELECT 1) AS dummy \ LEFT JOIN ip_pool AS ip ON TRUE \ LEFT JOIN silo AS s ON TRUE \ - LEFT JOIN existing_linked_silo AS els ON TRUE \ RETURNING *", ); builder.query() @@ -1768,107 +1612,76 @@ fn unlink_ip_pool_from_external_silo_query( ); builder.query() } - -// Query to conditionally unlink a pool from the internal Oxide Silo. -// -// In addition to checking for external IPs, this also ensures there is at least -// one other pool linked to the Silo. -// -// This query is similar to the one above for unlinking an external silo, with -// slightly different checks for external addresses. We don't need to join the -// `instance` table, and so look only at the `external_ip` table for IPs used in -// the provided IP Pool. -// -// It has one additional CTE and intentional cast error, which prevents deleting -// the link if this is the last pool linked to the internal silo. That would -// prevent Oxide services allocating any new addresses. -// -// The CTE is: -// -// ```sql -// internally_linked_pools AS ( -// SELECT 1 -// FROM ip_pool_resource -// WHERE resource_type = 'silo' AND resource_id = $INTERNAL_SILO_ID -// LIMIT 2 -// ) -// ``` -// -// And the cast error is: -// -// ```sql -// CAST(IF( -// (SELECT COUNT(*) FROM internally_linked_pools) < 2, -// 'last-pool', -// '1' -// ) AS INT) = 1 -// ``` -// -// So we're counting internally linked pools, checking that there are at least 2 -// when we attempt the deletion, and failing with an integer-cast error if not. -fn unlink_ip_pool_from_internal_silo_query( - ip_pool_id: Uuid, -) -> TypedSqlQuery<()> { +// Helper subquery which fails with a bool-cast error if there are fewer +// than two delegated IP Pools. This is useful when we're trying to delete +// or revoke a delegated pool, to ensure there's at least one left. +const ENSURE_DELEGATED_COUNT_SUBQUERY: &str = "(SELECT CAST(IF((\ + SELECT COUNT(1) \ + FROM ip_pool \ + WHERE time_deleted IS NULL AND is_delegated LIMIT 2\ + ) >= 2, \ + 'true', \ + 'last-pool') \ + AS BOOL) \ + )"; + +fn revoke_ip_pool_query(ip_pool_id: Uuid) -> TypedSqlQuery<()> { let mut builder = QueryBuilder::new(); builder .sql( - "WITH external_ips AS (\ - SELECT 1 \ - FROM external_ip \ - WHERE \ - external_ip.is_service = TRUE AND \ - external_ip.time_deleted IS NULL AND \ - external_ip.ip_pool_id = ", + "\ + UPDATE ip_pool \ + SET is_delegated = FALSE, time_modified = NOW() \ + WHERE id = ", ) .param() .bind::(ip_pool_id) .sql( - " LIMIT 1), \ - internally_linked_pools AS (\ - SELECT 1 \ - FROM ip_pool_resource \ - WHERE resource_type = ", + " AND time_deleted IS NULL AND is_delegated = TRUE AND \ + CAST(IF((\ + SELECT COUNT(1) \ + FROM ip_pool \ + WHERE time_deleted is NULL AND is_delegated LIMIT 2\ + ) >= 2, \ + 'true', \ + 'last-pool') \ + AS BOOL) AND \ + CAST(IF(EXISTS(\ + SELECT 1 \ + FROM external_ip \ + WHERE time_deleted IS NULL AND ip_pool_id = ", ) .param() - .bind::( - IpPoolResourceType::Silo, - ) - .sql(" AND resource_id = ") - .param() - .bind::(INTERNAL_SILO_ID) + .bind::(ip_pool_id) + .sql("), 1/0, 1) AS BOOL)"); + builder.query() +} + +// Conditionally delegate an IP Pool, checking that the pool isn't linked to any +// external silos. +fn delegate_ip_pool_query(ip_pool_id: Uuid) -> TypedSqlQuery<()> { + let mut builder = QueryBuilder::new(); + builder .sql( - " LIMIT 2) \ - DELETE FROM ip_pool_resource \ - WHERE ip_pool_id = ", + "\ + UPDATE ip_pool \ + SET is_delegated = TRUE, time_modified = NOW() \ + WHERE id = ", ) .param() .bind::(ip_pool_id) - .sql(" AND resource_type = ") - .param() - .bind::( - IpPoolResourceType::Silo, - ) - .sql(" AND resource_id = ") - .param() - .bind::(INTERNAL_SILO_ID) .sql( - " AND \ - CAST(IF(\ - EXISTS(SELECT 1 FROM external_ips), \ - 'has-eips', \ - 'true'\ - ) AS BOOL)", + " AND time_deleted IS NULL AND is_delegated = FALSE AND (\ + SELECT CAST(IF(EXISTS(\ + SELECT 1 \ + FROM ip_pool_resource \ + WHERE ip_pool_id = ", ) - // Generate an error casting the string 'last-pool' to an int when there are - // fewer than 2 pools still linked to the silo. - .sql( - " AND \ - CAST(IF(\ - (SELECT COUNT(*) FROM internally_linked_pools) < 2, \ - 'last-pool', \ - '1'\ - ) AS INT) = 1", - ); + .param() + .bind::(ip_pool_id) + .sql("), '") + .sql(BAD_SILO_LINK_SENTINEL) + .sql("', 'TRUE') AS BOOL))"); builder.query() } @@ -1879,9 +1692,9 @@ mod test { use crate::authz; use crate::db::datastore::ip_pool::{ - INTERNAL_SILO_DEFAULT_ERROR, PoolReservationType, - link_ip_pool_to_silo_query, unlink_ip_pool_from_external_silo_query, - unlink_ip_pool_from_internal_silo_query, + BAD_SILO_LINK_ERROR, LAST_POOL_ERROR, POOL_HAS_IPS_ERROR, + delegate_ip_pool_query, link_ip_pool_to_external_silo_query, + revoke_ip_pool_query, unlink_ip_pool_from_external_silo_query, }; use crate::db::explain::ExplainableAsync as _; use crate::db::model::{ @@ -1895,7 +1708,8 @@ mod test { use diesel::{ ExpressionMethods as _, QueryDsl as _, SelectableHelper as _, }; - use nexus_db_model::{IpPoolIdentity, IpVersion}; + use nexus_db_lookup::LookupPath; + use nexus_db_model::IpVersion; use nexus_sled_agent_shared::inventory::ZoneKind; use nexus_types::deployment::{ OmicronZoneExternalFloatingIp, OmicronZoneExternalIp, @@ -1912,7 +1726,6 @@ mod test { use omicron_uuid_kinds::{ ExternalIpUuid, GenericUuid as _, OmicronZoneUuid, }; - use uuid::Uuid; #[tokio::test] async fn test_default_ip_pools() { @@ -2174,87 +1987,6 @@ mod test { logctx.cleanup_successful(); } - #[tokio::test] - async fn cannot_set_default_ip_pool_for_internal_silo() { - let logctx = - dev::test_setup_log("cannot_set_default_ip_pool_for_internal_silo"); - let db = TestDatabase::new_with_datastore(&logctx.log).await; - let (opctx, datastore) = (db.opctx(), db.datastore()); - - for ip_version in [IpVersion::V4, IpVersion::V6] { - // Make some new pool. - let params = IpPool { - identity: IpPoolIdentity::new( - Uuid::new_v4(), - IdentityMetadataCreateParams { - name: format!("test-pool-{}", ip_version) - .parse() - .unwrap(), - description: String::new(), - }, - ), - ip_version, - rcgen: 0, - }; - let pool = datastore - .ip_pool_create(&opctx, params) - .await - .expect("Should be able to create pool"); - assert_eq!(pool.ip_version, ip_version); - let authz_pool = - nexus_db_lookup::LookupPath::new(&opctx, datastore) - .ip_pool_id(pool.id()) - .lookup_for(authz::Action::Read) - .await - .expect("Should be able to lookup new IP Pool") - .0; - - // Try to link it as the default. - let (authz_silo, ..) = - nexus_db_lookup::LookupPath::new(&opctx, datastore) - .silo_id(nexus_types::silo::INTERNAL_SILO_ID) - .lookup_for(authz::Action::Read) - .await - .expect("Should be able to lookup internal silo"); - let link = IpPoolResource { - ip_pool_id: authz_pool.id(), - resource_type: IpPoolResourceType::Silo, - resource_id: authz_silo.id(), - is_default: true, - }; - let Err(e) = datastore.ip_pool_link_silo(opctx, link).await else { - panic!( - "should have failed to link IP Pool to internal silo as a default" - ); - }; - let Error::InvalidRequest { message } = &e else { - panic!("should have received an invalid request, got: {:?}", e); - }; - assert_eq!(message.external_message(), INTERNAL_SILO_DEFAULT_ERROR); - - // We can link it if it's not the default. - let link = IpPoolResource { is_default: false, ..link }; - datastore.ip_pool_link_silo(opctx, link).await.expect( - "Should be able to link non-default pool to internal silo", - ); - - // Try to set it to the default, and ensure that this also fails. - let Err(e) = datastore - .ip_pool_set_default(opctx, &authz_pool, &authz_silo, true) - .await - else { - panic!("should have failed to set internal pool to default"); - }; - let Error::InvalidRequest { message } = &e else { - panic!("should have received an invalid request, got: {:?}", e); - }; - assert_eq!(message.external_message(), INTERNAL_SILO_DEFAULT_ERROR); - } - - db.terminate().await; - logctx.cleanup_successful(); - } - // We're breaking out the utilization tests for IPv4 and IPv6 pools, since // pools only contain one version now. // @@ -2548,8 +2280,9 @@ mod test { } #[tokio::test] - async fn paginate_ip_pools_by_reservation() { - let logctx = dev::test_setup_log("paginate_ip_pools_by_reservation"); + async fn paginate_ip_pools_by_delegation_type() { + let logctx = + dev::test_setup_log("paginate_ip_pools_by_delegation_type"); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); @@ -2571,7 +2304,7 @@ mod test { } customer_pools.sort_by_key(|pool| pool.id()); - // Create a bunch which _are_ reserved for Oxide's usage. + // Create a bunch which _are_ delegated for Oxide's usage. let mut oxide_pools = Vec::with_capacity(N_POOLS); for i in 0..N_POOLS { // Create the pool @@ -2580,26 +2313,17 @@ mod test { description: "".to_string(), }; let pool = datastore - .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) - .await - .expect("Failed to create IP pool"); - - // Link it to the internal silo - let link = IpPoolResource { - ip_pool_id: pool.id(), - resource_type: IpPoolResourceType::Silo, - resource_id: INTERNAL_SILO_ID, - is_default: false, - }; - datastore - .ip_pool_link_silo(opctx, link) + .ip_pool_create( + opctx, + IpPool::new_delegated(&identity, IpVersion::V4), + ) .await - .expect("Should be able to link pool to internal silo"); + .expect("Failed to create delegated IP pool"); oxide_pools.push(pool); } assert_eq!(oxide_pools.len(), N_POOLS); - let fetch_paginated = |delegated| async move { + let fetch_paginated = |is_delegated| async move { let mut found = Vec::with_capacity(N_POOLS); let mut paginator = Paginator::new( NonZeroU32::new(5).unwrap(), @@ -2609,7 +2333,7 @@ mod test { let batch = datastore .ip_pools_list_paginated( opctx, - delegated, + is_delegated, None, &PaginatedBy::Id(page.current_pagparams()), ) @@ -2622,19 +2346,17 @@ mod test { }; // Paginate all the customer-reserved. - let customer_pools_found = - fetch_paginated(PoolReservationType::External).await; + let customer_pools_found = fetch_paginated(false).await; assert_eq!(customer_pools.len(), customer_pools_found.len()); assert_eq!(customer_pools, customer_pools_found); - // Paginate all the Oxide-reserved. + // Paginate all those delegated to Oxide. // // Note that we have 2 extra pools today, which are the builtin service // pools. These will go away in the future, so we'll unfortunately need // to update this test at that time. Until then, fetch those service // pools explicitly and add them. - let oxide_reserved_found = - fetch_paginated(PoolReservationType::Internal).await; + let oxide_reserved_found = fetch_paginated(true).await; let pools = datastore .ip_pools_service_lookup_both_versions(opctx) .await @@ -2658,7 +2380,7 @@ mod test { resource_id: INTERNAL_SILO_ID, is_default: false, }; - let query = link_ip_pool_to_silo_query(&res); + let query = link_ip_pool_to_external_silo_query(&res); expectorate_query_contents( &query, "tests/output/ip_pool_external_silo_link.sql", @@ -2682,7 +2404,7 @@ mod test { is_default: false, }; - let query = link_ip_pool_to_silo_query(&res); + let query = link_ip_pool_to_external_silo_query(&res); let _ = query .explain_async(&conn) .await @@ -2692,13 +2414,10 @@ mod test { logctx.cleanup_successful(); } - // Ensure that we fail to insert a link between an external Silo and IP - // Pool, if the pool is already linked to the internal IP Pool. #[tokio::test] - async fn cannot_link_pool_to_external_silo_if_linked_internally() { - let logctx = dev::test_setup_log( - "cannot_link_pool_to_external_silo_if_linked_internally", - ); + async fn cannot_link_delegated_pool_to_external_silo() { + let logctx = + dev::test_setup_log("cannot_link_delegated_pool_to_external_silo"); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); @@ -2708,22 +2427,13 @@ mod test { description: "".to_string(), }; let ip_pool = datastore - .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .ip_pool_create( + opctx, + IpPool::new_delegated(&identity, IpVersion::V4), + ) .await .expect("Failed to create IP pool"); - // Link to the internal silo. - let internal_link = IpPoolResource { - ip_pool_id: ip_pool.id(), - resource_type: IpPoolResourceType::Silo, - resource_id: INTERNAL_SILO_ID, - is_default: false, - }; - let _ = datastore - .ip_pool_link_silo(&opctx, internal_link) - .await - .expect("Should be able to link unlinked pool to internal silo"); - // We should fail to link it to some other silo now. let link = IpPoolResource { ip_pool_id: ip_pool.id(), @@ -2732,8 +2442,7 @@ mod test { is_default: false, }; let err = datastore.ip_pool_link_silo(&opctx, link).await.expect_err( - "Expected to fail linking an IP Pool to an external Silo \ - when it's already linked to the internal silo", + "Expected to fail linking a delegated IP Pool to an external Silo", ); println!("{err:#?}"); @@ -2741,17 +2450,14 @@ mod test { logctx.cleanup_successful(); } - // Ensure that we fail to insert a link between the internal Silo and IP - // Pool, if the pool is already linked to an external IP Pool. #[tokio::test] - async fn cannot_link_pool_to_internal_silo_if_linked_externally() { - let logctx = dev::test_setup_log( - "cannot_link_pool_to_internal_silo_if_linked_externally", - ); + async fn cannot_delegate_externally_linked_pool() { + let logctx = + dev::test_setup_log("cannot_delegate_externally_linked_pool"); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); - // Create the pool + // Create the pool, non-delegated. let identity = IdentityMetadataCreateParams { name: "external-ip-pool".parse().unwrap(), description: "".to_string(), @@ -2773,18 +2479,23 @@ mod test { .await .expect("Should be able to link unlinked pool to default silo"); - // We should fail to link it the internal silo. - let link = IpPoolResource { - ip_pool_id: ip_pool.id(), - resource_type: IpPoolResourceType::Silo, - resource_id: INTERNAL_SILO_ID, - is_default: false, - }; - let err = datastore.ip_pool_link_silo(&opctx, link).await.expect_err( - "Expected to fail linking an IP Pool to the internal Silo \ + // We should fail to delegate it now. + let (authz_pool, db_pool) = LookupPath::new(opctx, datastore) + .ip_pool_id(ip_pool.id()) + .fetch_for(authz::Action::Modify) + .await + .unwrap(); + let err = datastore + .ip_pool_delegate(opctx, &authz_pool, &db_pool) + .await + .expect_err( + "Expected to fail delegating an IP Pool \ when it's already linked to an external silo", - ); - assert_matches!(err, Error::InvalidRequest { .. }); + ); + let Error::InvalidRequest { message } = err else { + panic!("Expected InvalidRequest, got {err:#?}"); + }; + assert_eq!(message.external_message(), BAD_SILO_LINK_ERROR); db.terminate().await; logctx.cleanup_successful(); @@ -2907,75 +2618,108 @@ mod test { } #[tokio::test] - async fn can_explain_unlink_ip_pool_from_internal_silo_query() { - let logctx = dev::test_setup_log( - "can_explain_unlink_ip_pool_from_internal_silo_query", - ); - let db = TestDatabase::new_with_pool(&logctx.log).await; - let pool = db.pool(); - let conn = pool.claim().await.unwrap(); + async fn cannot_delete_last_delegated_ip_pool() { + let logctx = + dev::test_setup_log("cannot_delete_last_delegated_ip_pool"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); - let ip_pool_id = uuid::uuid!("aaa84fbd-85a5-4fcb-b34f-23b7e56145c7"); - let query = unlink_ip_pool_from_internal_silo_query(ip_pool_id); - let _ = query - .explain_async(&conn) + // Fetch the pools. + let pools = datastore + .ip_pools_service_lookup_both_versions(opctx) .await - .expect("Failed to explain query - is it valid SQL?"); + .unwrap(); + + // We should be able to delete one of these. + let _ = datastore.ip_pool_delete( + opctx, + &pools.ipv4.authz_pool, + &pools.ipv4.db_pool + ) + .await + .expect("Should be able to delete delegated IP Pool when at least one remains"); + + // Check there's only one left. + let pagparams = &PaginatedBy::Id(DataPageParams { + marker: None, + direction: dropshot::PaginationOrder::Ascending, + limit: 100.try_into().unwrap(), + }); + let l = datastore + .ip_pools_delegated_list(opctx, None, &pagparams) + .await + .unwrap(); + assert_eq!(l.len(), 1); + + // We should _not_ be able to delete the other now, because there's only + // one left. + let err = datastore.ip_pool_delete( + opctx, + &pools.ipv6.authz_pool, + &pools.ipv6.db_pool, + ).await + .expect_err("Should not be able to delete delegated IP Pool when only one remains"); + let Error::InvalidRequest { message } = err else { + panic!("Expected InvalidRequest error, found {err:#?}"); + }; + assert_eq!(message.external_message(), LAST_POOL_ERROR); + + let l = datastore + .ip_pools_delegated_list(opctx, None, &pagparams) + .await + .unwrap(); + assert_eq!(l.len(), 1); db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] - async fn cannot_unlink_last_linked_internal_ip_pool() { + async fn cannot_revoke_last_delegated_ip_pool() { let logctx = - dev::test_setup_log("cannot_unlink_last_linked_internal_ip_pool"); + dev::test_setup_log("cannot_revoke_last_delegated_ip_pool"); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); - // Get some basic data. - let (authz_silo, _) = - nexus_db_lookup::LookupPath::new(opctx, datastore) - .silo_id(INTERNAL_SILO_ID) - .fetch() - .await - .unwrap(); + // Fetch the pools. let pools = datastore .ip_pools_service_lookup_both_versions(opctx) .await .unwrap(); - // We should be able to delete one of these. - let _ = datastore.ip_pool_unlink_silo( + // We should be able to revoke one of these. + let _ = datastore.ip_pool_revoke( opctx, &pools.ipv4.authz_pool, - &authz_silo, - ).await - .expect("Should be able to delete internal IP Pool when more than one remains"); + &pools.ipv4.db_pool + ) + .await + .expect("Should be able to revoke delegated IP Pool when at least one remains"); + // Check there's only one left. let pagparams = &PaginatedBy::Id(DataPageParams { marker: None, direction: dropshot::PaginationOrder::Ascending, limit: 100.try_into().unwrap(), }); let l = datastore - .ip_pools_service_list(opctx, None, &pagparams) + .ip_pools_delegated_list(opctx, None, &pagparams) .await .unwrap(); assert_eq!(l.len(), 1); - // We should _not_ be able to delete the other now, because there's only + // We should _not_ be able to revoke the other now, because there's only // one left. - let err = datastore.ip_pool_unlink_silo( + let err = datastore.ip_pool_revoke( opctx, &pools.ipv6.authz_pool, - &authz_silo, + &pools.ipv6.db_pool, ).await - .expect_err("Should not be able to delete internal IP Pool when only one remains"); + .expect_err("Should not be able to revoke delegated IP Pool when only one remains"); assert_matches!(err, Error::InvalidRequest { .. }); let l = datastore - .ip_pools_service_list(opctx, None, &pagparams) + .ip_pools_delegated_list(opctx, None, &pagparams) .await .unwrap(); assert_eq!(l.len(), 1); @@ -2985,21 +2729,13 @@ mod test { } #[tokio::test] - async fn cannot_unlink_internal_ip_pool_with_outstanding_external_ips() { + async fn cannot_revoke_delegated_ip_pool_with_outstanding_external_ips() { let logctx = dev::test_setup_log( - "cannot_unlink_internal_ip_pool_with_outstanding_external_ips", + "cannot_revoke_delegated_ip_pool_with_outstanding_external_ips", ); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); - // Get some basic data. - let (authz_silo, _) = - nexus_db_lookup::LookupPath::new(opctx, datastore) - .silo_id(INTERNAL_SILO_ID) - .fetch() - .await - .unwrap(); - // Get pool, add a range, allocate an external IP. let pools = datastore .ip_pools_service_lookup_both_versions(opctx) @@ -3039,14 +2775,18 @@ mod test { .await .expect("Should be able to create zone external IP"); - // Should not be able to delete the IPv4 pool now, since we've got an + // Should not be able to revoke the IPv4 pool now, since we've got an // address in use. - let _ = datastore.ip_pool_unlink_silo( + let err = datastore.ip_pool_revoke( opctx, &pools.ipv4.authz_pool, - &authz_silo, + &pools.ipv4.db_pool ).await - .expect_err("Should not be able to delete internal IP Pool when an address is in use"); + .expect_err("Should not be able to revoke internal IP Pool when an address is in use"); + let Error::InvalidRequest { message } = err else { + panic!("Expected InvalidRequest, got: {err:#?}"); + }; + assert_eq!(message.external_message(), POOL_HAS_IPS_ERROR); // Delete the address, and now we can delete the link. let _ = datastore @@ -3055,10 +2795,10 @@ mod test { .expect("Should be able to delete external IP"); // We should be able to delete one of these. - let _ = datastore.ip_pool_unlink_silo( + let _ = datastore.ip_pool_revoke( opctx, &pools.ipv4.authz_pool, - &authz_silo, + &pools.ipv4.db_pool, ).await .expect( "Should be able to delete internal IP Pool when more than one remains, \ @@ -3068,4 +2808,40 @@ mod test { db.terminate().await; logctx.cleanup_successful(); } + + #[tokio::test] + async fn can_explain_delegate_ip_pool_query() { + let logctx = dev::test_setup_log("can_explain_delegate_ip_pool_query"); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = pool.claim().await.unwrap(); + + let ip_pool_id = uuid::uuid!("27b74f5d-0a76-45db-8768-cd043c644f1d"); + let query = delegate_ip_pool_query(ip_pool_id); + let _ = query + .explain_async(&conn) + .await + .expect("Failed to explain query - is it valid SQL?"); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn can_explain_revoke_ip_pool_query() { + let logctx = dev::test_setup_log("can_explain_revoke_ip_pool_query"); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = pool.claim().await.unwrap(); + + let ip_pool_id = uuid::uuid!("27b74f5d-0a76-45db-8768-cd043c644f1d"); + let query = revoke_ip_pool_query(ip_pool_id); + let _ = query + .explain_async(&conn) + .await + .expect("Failed to explain query - is it valid SQL?"); + + db.terminate().await; + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 6059a30e008..83eb769c77b 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -57,7 +57,6 @@ use nexus_types::external_api::shared; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::SiloRole; use nexus_types::identity::Resource; -use nexus_types::silo::INTERNAL_SILO_ID; use omicron_common::api::external::AllowedSourceIps; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; @@ -987,12 +986,13 @@ impl DataStore { self.rack_insert(opctx, &db::model::Rack::new(rack_id)).await?; - // Insert and link the services IP Pool for both IP versions. + // Insert a delegated IP Pool for both IP versions. for (version, name) in [ (IpVersion::V4, SERVICE_IPV4_POOL_NAME), (IpVersion::V6, SERVICE_IPV6_POOL_NAME), ] { - let internal_pool = db::model::IpPool::new( + // Create a delegated IP Pool, if needed. + let internal_pool = db::model::IpPool::new_delegated( &IdentityMetadataCreateParams { name: name.parse::().unwrap(), description: format!( @@ -1001,28 +1001,9 @@ impl DataStore { }, version, ); - // Create the pool, and link it to the internal silo if needed. But - // we cannot set a default. - let internal_pool_id = internal_pool.id(); - let internal_created = self - .ip_pool_create(opctx, internal_pool) - .await - .map(|_| true) - .or_else(|e| match e { - Error::ObjectAlreadyExists { .. } => Ok(false), - _ => Err(e), - })?; - if internal_created { - self.ip_pool_link_silo( - opctx, - db::model::IpPoolResource { - ip_pool_id: internal_pool_id, - resource_type: db::model::IpPoolResourceType::Silo, - resource_id: INTERNAL_SILO_ID, - is_default: false, - }, - ) - .await?; + match self.ip_pool_create(opctx, internal_pool).await { + Ok(_) | Err(Error::ObjectAlreadyExists { .. }) => {} + Err(e) => return Err(e), } } diff --git a/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql b/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql index a5f0cba9fb8..6e5b05a377d 100644 --- a/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql +++ b/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql @@ -1,32 +1,23 @@ WITH - ip_pool AS (SELECT id FROM ip_pool WHERE id = $1 AND time_deleted IS NULL), - silo AS (SELECT id FROM silo WHERE id = $2 AND time_deleted IS NULL), - existing_linked_silo + ip_pool AS ( - SELECT resource_id FROM ip_pool_resource WHERE ip_pool_id = $3 AND resource_type = $4 LIMIT 1 - ) + SELECT + CAST(IF(is_delegated, 'bad-link-type', $1) AS UUID) AS id + FROM + ip_pool + WHERE + id = $2 AND time_deleted IS NULL + ), + silo AS (SELECT id FROM silo WHERE id = $3 AND time_deleted IS NULL) INSERT INTO ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) SELECT CAST(COALESCE(CAST(ip.id AS STRING), 'ip-pool-deleted') AS UUID), - $5, - CAST( - CASE - WHEN s.id IS NULL THEN 'silo-deleted' - WHEN (s.id = $6 AND els.resource_id = $7) - OR (s.id != $8 AND els.resource_id != $9) - OR els.resource_id IS NULL - THEN $10 - ELSE 'bad-link-type' - END - AS UUID - ), - $11 + $4, + CAST(COALESCE(CAST(s.id AS STRING), 'silo-deleted') AS UUID), + $5 FROM - (SELECT 1) AS dummy - LEFT JOIN ip_pool AS ip ON true - LEFT JOIN silo AS s ON true - LEFT JOIN existing_linked_silo AS els ON true + (SELECT 1) AS dummy LEFT JOIN ip_pool AS ip ON true LEFT JOIN silo AS s ON true RETURNING * diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 777c4cb2dbb..a120682e612 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -630,6 +630,7 @@ table! { time_deleted -> Nullable, ip_version -> crate::enums::IpVersionEnum, rcgen -> Int8, + is_delegated -> Bool, } } diff --git a/schema/crdb/add-ip-pool-is-delegated-column/up01.sql b/schema/crdb/add-ip-pool-is-delegated-column/up01.sql new file mode 100644 index 00000000000..d9780a04420 --- /dev/null +++ b/schema/crdb/add-ip-pool-is-delegated-column/up01.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.ip_pool +ADD COLUMN is_delegated BOOL NOT NULL +DEFAULT FALSE; diff --git a/schema/crdb/add-ip-pool-is-delegated-column/up02.sql b/schema/crdb/add-ip-pool-is-delegated-column/up02.sql new file mode 100644 index 00000000000..e9be4e2308e --- /dev/null +++ b/schema/crdb/add-ip-pool-is-delegated-column/up02.sql @@ -0,0 +1,4 @@ +SET LOCAL disallow_full_table_scans = 'off'; +UPDATE ip_pool +SET is_delegated = TRUE +WHERE name = 'oxide-service-pool-v4' OR name = 'oxide-service-pool-v6'; diff --git a/schema/crdb/add-ip-pool-is-delegated-column/up03.sql b/schema/crdb/add-ip-pool-is-delegated-column/up03.sql new file mode 100644 index 00000000000..296dfb00470 --- /dev/null +++ b/schema/crdb/add-ip-pool-is-delegated-column/up03.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.ip_pool +ALTER COLUMN is_delegated +DROP DEFAULT; diff --git a/schema/crdb/add-ip-pool-is-delegated-column/up04.sql b/schema/crdb/add-ip-pool-is-delegated-column/up04.sql new file mode 100644 index 00000000000..a291be977e1 --- /dev/null +++ b/schema/crdb/add-ip-pool-is-delegated-column/up04.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.ip_pool_resource +DROP CONSTRAINT IF EXISTS internal_silo_has_no_default_pool; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 5126e39944b..a9d133b4ce5 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2102,7 +2102,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( rcgen INT8 NOT NULL, /* The IP version of the ranges contained in this pool. */ - ip_version omicron.public.ip_version NOT NULL + ip_version omicron.public.ip_version NOT NULL, + + /* Indicates the pool is delegated by an operator for internal Oxide use */ + is_delegated BOOL NOT NULL ); /* @@ -2132,16 +2135,6 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( -- resource_type is redundant because resource IDs are globally unique, but -- logically it belongs here PRIMARY KEY (ip_pool_id, resource_type, resource_id), - - -- Check that there are no default pools for the internal silo - CONSTRAINT internal_silo_has_no_default_pool CHECK ( - NOT ( - resource_type = 'silo' AND - resource_id = '001de000-5110-4000-8000-000000000001' AND - is_default - ) - ) - ); -- a given resource can only have one default ip pool @@ -6688,7 +6681,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '193.0.0', NULL) + (TRUE, NOW(), NOW(), '194.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; From b3da59b4c21d303790bd81b6a55fb826c7a82bf9 Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Thu, 2 Oct 2025 23:50:02 +0000 Subject: [PATCH 3/8] Typo --- schema/crdb/dbinit.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index a9d133b4ce5..913fe1d8ef6 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2134,7 +2134,7 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_resource ( -- resource_type is redundant because resource IDs are globally unique, but -- logically it belongs here - PRIMARY KEY (ip_pool_id, resource_type, resource_id), + PRIMARY KEY (ip_pool_id, resource_type, resource_id) ); -- a given resource can only have one default ip pool From 34fa464874b4070d1aaa7f522323acd9f22e659d Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Fri, 3 Oct 2025 01:42:12 +0000 Subject: [PATCH 4/8] Schema migration idempotency fix --- schema/crdb/add-ip-pool-is-delegated-column/up01.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/schema/crdb/add-ip-pool-is-delegated-column/up01.sql b/schema/crdb/add-ip-pool-is-delegated-column/up01.sql index d9780a04420..4d14275932a 100644 --- a/schema/crdb/add-ip-pool-is-delegated-column/up01.sql +++ b/schema/crdb/add-ip-pool-is-delegated-column/up01.sql @@ -1,3 +1,3 @@ ALTER TABLE omicron.public.ip_pool -ADD COLUMN is_delegated BOOL NOT NULL +ADD COLUMN IF NOT EXISTS is_delegated BOOL NOT NULL DEFAULT FALSE; From e4ff3b8b8cc982724a3b989fec1b0c686dc4b2c7 Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Mon, 6 Oct 2025 19:08:09 +0000 Subject: [PATCH 5/8] Use an enum instead of a bool to represent reservations - Add an enum for IP Pool reservation type, currently just representing internal or external uses. - Rework queries to be phrased in terms of "reserving" a pool for a specific use and checking for valid reservation types at query time. - Add expectorate tests for all complicated queries. --- common/src/api/external/http_pagination.rs | 9 - nexus/db-model/src/ip_pool.rs | 49 +- nexus/db-model/src/schema_versions.rs | 2 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 672 ++++++++++++------ nexus/db-queries/src/db/datastore/rack.rs | 6 +- .../output/ip_pool_external_silo_link.sql | 10 +- .../tests/output/reserve_external_ip_pool.sql | 16 + .../tests/output/reserve_internal_ip_pool.sql | 38 + nexus/db-schema/src/enums.rs | 1 + nexus/db-schema/src/schema.rs | 2 +- .../add-ip-pool-is-delegated-column/up01.sql | 3 - .../up01.sql | 5 + .../up02.sql | 3 + .../up03.sql} | 2 +- .../up04.sql} | 2 +- .../up05.sql} | 0 schema/crdb/dbinit.sql | 10 +- 17 files changed, 560 insertions(+), 270 deletions(-) create mode 100644 nexus/db-queries/tests/output/reserve_external_ip_pool.sql create mode 100644 nexus/db-queries/tests/output/reserve_internal_ip_pool.sql delete mode 100644 schema/crdb/add-ip-pool-is-delegated-column/up01.sql create mode 100644 schema/crdb/add-ip-pool-reservation-type-column/up01.sql create mode 100644 schema/crdb/add-ip-pool-reservation-type-column/up02.sql rename schema/crdb/{add-ip-pool-is-delegated-column/up02.sql => add-ip-pool-reservation-type-column/up03.sql} (74%) rename schema/crdb/{add-ip-pool-is-delegated-column/up03.sql => add-ip-pool-reservation-type-column/up04.sql} (62%) rename schema/crdb/{add-ip-pool-is-delegated-column/up04.sql => add-ip-pool-reservation-type-column/up05.sql} (100%) diff --git a/common/src/api/external/http_pagination.rs b/common/src/api/external/http_pagination.rs index 1fb18874b6a..710de77f483 100644 --- a/common/src/api/external/http_pagination.rs +++ b/common/src/api/external/http_pagination.rs @@ -382,15 +382,6 @@ pub enum PaginatedBy<'a> { Name(DataPageParams<'a, Name>), } -impl<'a> PaginatedBy<'a> { - pub fn limit(&self) -> NonZeroU32 { - match self { - PaginatedBy::Id(inner) => inner.limit, - PaginatedBy::Name(inner) => inner.limit, - } - } -} - impl ScanParams for ScanByNameOrId { diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 83ab169de7b..811d33820bd 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -24,6 +24,37 @@ use omicron_common::api::external; use std::net::IpAddr; use uuid::Uuid; +impl_enum_type!( + IpPoolReservationTypeEnum: + + #[derive( + AsExpression, + Clone, + Copy, + Debug, + Eq, + FromSqlRow, + PartialEq, + schemars::JsonSchema, + serde::Deserialize, + serde::Serialize, + )] + pub enum IpPoolReservationType; + + ExternalSilos => b"external_silos" + OxideInternal => b"oxide_internal" +); + +impl ::std::fmt::Display for IpPoolReservationType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + IpPoolReservationType::ExternalSilos => "external_silos", + IpPoolReservationType::OxideInternal => "oxide_internal", + }; + f.write_str(s) + } +} + impl_enum_type!( IpVersionEnum: @@ -92,14 +123,14 @@ pub struct IpPool { /// the contained ranges. pub rcgen: i64, - /// True if the IP Pool has been delegated for Oxide use. - pub is_delegated: bool, + /// Indicates what the pool is reserved for. + pub reservation_type: IpPoolReservationType, } impl IpPool { /// Create a new IP Pool. /// - /// The pool is not delegated to Oxide. + /// The pool is reserved for external customer Silos. pub fn new( pool_identity: &external::IdentityMetadataCreateParams, ip_version: IpVersion, @@ -111,12 +142,12 @@ impl IpPool { ), ip_version, rcgen: 0, - is_delegated: false, + reservation_type: IpPoolReservationType::ExternalSilos, } } - /// Create a new pool delegated for Oxide's internal use. - pub fn new_delegated( + /// Create a new pool reserved for Oxide's internal use. + pub fn new_oxide_internal( pool_identity: &external::IdentityMetadataCreateParams, ip_version: IpVersion, ) -> Self { @@ -127,13 +158,13 @@ impl IpPool { ), ip_version, rcgen: 0, - is_delegated: true, + reservation_type: IpPoolReservationType::OxideInternal, } } /// Create a new IPv4 IP Pool. /// - /// The pool is not delegated to Oxide. + /// The pool is reserved for external customer Silos. pub fn new_v4( pool_identity: &external::IdentityMetadataCreateParams, ) -> Self { @@ -142,7 +173,7 @@ impl IpPool { /// Create a new IPv6 IP Pool. /// - /// The pool is not delegated to Oxide. + /// The pool is reserved for external customer Silos. pub fn new_v6( pool_identity: &external::IdentityMetadataCreateParams, ) -> Self { diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 86a583dbf9d..be8c5692bd3 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -28,7 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), - KnownVersion::new(196, "add-ip-pool-is-delegated-column"), + KnownVersion::new(196, "add-ip-pool-reservation-type-column"), KnownVersion::new(195, "tuf-pruned-index"), KnownVersion::new(194, "tuf-pruned"), KnownVersion::new(193, "nexus-lockstep-port"), diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 7e379ed54c5..0c7594583a4 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -16,6 +16,7 @@ use crate::db::identity::Resource; use crate::db::model::IpKind; use crate::db::model::IpPool; use crate::db::model::IpPoolRange; +use crate::db::model::IpPoolReservationType; use crate::db::model::IpPoolResource; use crate::db::model::IpPoolResourceType; use crate::db::model::IpPoolUpdate; @@ -46,6 +47,7 @@ use nexus_db_model::IpVersion; use nexus_db_model::Project; use nexus_db_model::Vpc; use nexus_db_schema::enums::IpKindEnum; +use nexus_db_schema::enums::IpPoolReservationTypeEnum; use nexus_types::external_api::shared::IpRange; use nexus_types::silo::INTERNAL_SILO_ID; use omicron_common::api::external::CreateResult; @@ -93,9 +95,9 @@ impl ServiceIpPools { } // Error message emitted when a user attempts to link an IP Pool and internal -// Silo, but the pool is already linked to an external Silo, or vice versa. +// Silo, but the pool is already reserved for internal use, or vice versa. const BAD_SILO_LINK_ERROR: &str = "IP Pools cannot be both linked to external \ - Silos and delegated for internal Oxide usage."; + Silos and reserved for internal Oxide usage."; // Error message emitted when a user attempts to unlink an IP Pool from a Silo // while the pool has external IP addresses allocated from it. @@ -104,16 +106,16 @@ const POOL_HAS_IPS_ERROR: &str = // Error message emitted when a user attempts to unlink an IP Pool from the // Oxide internal Silo, without at least one other IP Pool linked to it. -const LAST_POOL_ERROR: &str = "Cannot delete the last IP Pool delegated to \ - Oxide internal usage. Create and delegate at least one more IP Pool \ +const LAST_POOL_ERROR: &str = "Cannot delete the last IP Pool reserved for \ + Oxide internal usage. Create and reserve at least one more IP Pool \ before deleting this one."; impl DataStore { - /// List IP Pools by their delegation state and optionally IP version, paginated. - async fn ip_pools_list_paginated( + /// List IP Pools by their reservation type and optionally IP version, paginated. + pub async fn ip_pools_list_paginated( &self, opctx: &OpContext, - is_delegated: bool, + reservation_type: IpPoolReservationType, version: Option, pagparams: &PaginatedBy<'_>, ) -> ListResultVec { @@ -137,8 +139,7 @@ impl DataStore { }; query .filter(ip_pool::time_deleted.is_null()) - .filter(ip_pool::is_delegated.eq(is_delegated)) - .limit(pagparams.limit().get().into()) + .filter(ip_pool::reservation_type.eq(reservation_type)) .select(IpPool::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -153,7 +154,13 @@ impl DataStore { opctx: &OpContext, pagparams: &PaginatedBy<'_>, ) -> ListResultVec { - self.ip_pools_list_paginated(opctx, false, None, pagparams).await + self.ip_pools_list_paginated( + opctx, + IpPoolReservationType::ExternalSilos, + None, + pagparams, + ) + .await } /// Look up whether the given pool is available to users in the current @@ -258,16 +265,6 @@ impl DataStore { }) } - /// List IP Pools delegated for Oxide's internal usage. - pub async fn ip_pools_delegated_list( - &self, - opctx: &OpContext, - version: Option, - pagparams: &PaginatedBy<'_>, - ) -> ListResultVec { - self.ip_pools_list_paginated(opctx, true, version, pagparams).await - } - /// Look up internal service IP Pools for both IP versions. /// /// This is useful when you need to handle resources like external IPs where @@ -293,7 +290,8 @@ impl DataStore { /// /// This method may require an index by Availability Zone in the future. // - // TODO-remove: Use ip_pools_delegated_list instead. + // TODO-remove: Use ip_pools_list_paginated with the right enum type + // instead. // // See https://github.com/oxidecomputer/omicron/issues/8947. pub async fn ip_pools_service_lookup( @@ -338,7 +336,7 @@ impl DataStore { /// Delete an IP Pool, and any links between it an any Silos. /// /// This fails if there are still IP Ranges in the pool, or if we're - /// deleting the last delegated pool. + /// deleting the last pool reserved for Oxide use. pub async fn ip_pool_delete( &self, opctx: &OpContext, @@ -369,12 +367,17 @@ impl DataStore { } // Add a small subquery, if needed, to ensure that we don't delete this - // IP Pool if it's the last delegated pool. There has to always be at + // IP Pool if it's the last reserved pool. There has to always be at // least one of these. - let ensure_delegated = if db_pool.is_delegated { - diesel::dsl::sql::(ENSURE_DELEGATED_COUNT_SUBQUERY) - } else { + let enough_reserved_pools = if matches!( + db_pool.reservation_type, + IpPoolReservationType::ExternalSilos + ) { diesel::dsl::sql::("TRUE") + } else { + diesel::dsl::sql::(&count_reserved_pools_subquery( + db_pool.reservation_type, + )) }; // Delete the pool, conditional on the rcgen not having changed. This @@ -385,7 +388,7 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::rcgen.eq(db_pool.rcgen)) - .filter(ensure_delegated) + .filter(enough_reserved_pools) .set(dsl::time_deleted.eq(now)) .execute_async(&*conn) .await @@ -425,10 +428,11 @@ impl DataStore { /// Check whether the pool is internal by checking that it exists and is /// associated with the internal silo // - // TODO-remove: This should probably go away when we let operators link any - // IP Pools to the internal Silo. The pool belongs to them, even if they've - // delegated it to us. See - // https://github.com/oxidecomputer/omicron/issues/8947. + // TODO-remove: This should go away when we let operators reserve any IP + // Pools for internal Oxide usage. The pool belongs to them even in that + // case, and so we should show it to them. + // + // See https://github.com/oxidecomputer/omicron/issues/8947. pub async fn ip_pool_is_internal( &self, opctx: &OpContext, @@ -438,7 +442,10 @@ impl DataStore { ip_pool::table .find(authz_pool.id()) .filter(ip_pool::time_deleted.is_null()) - .select(ip_pool::is_delegated) + .select( + ip_pool::reservation_type + .ne(IpPoolReservationType::ExternalSilos), + ) .first_async::( &*self.pool_connection_authorized(opctx).await?, ) @@ -472,25 +479,40 @@ impl DataStore { }) } - /// Delegate an IP Pool for Oxide internal use. - pub async fn ip_pool_delegate( + /// Reserve an IP Pool for a specific use. + pub async fn ip_pool_reserve( &self, opctx: &OpContext, authz_pool: &authz::IpPool, db_pool: &IpPool, + reservation_type: IpPoolReservationType, ) -> UpdateResult<()> { - if db_pool.is_delegated { - return Err(Error::invalid_request("IP Pool is already delegated")); + if db_pool.reservation_type == reservation_type { + return Err(Error::invalid_request(format!( + "IP Pool already has reservation type '{}'", + reservation_type, + ))); } - let n_rows = delegate_ip_pool_query(authz_pool.id()) + let n_rows = reserve_ip_pool_query(db_pool, reservation_type) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| match e { DieselError::DatabaseError( DatabaseErrorKind::Unknown, ref info, - ) if info.message().ends_with("invalid bool value") => { - Error::invalid_request(BAD_SILO_LINK_ERROR) + ) => { + let message = info.message(); + if message.ends_with("invalid bool value") { + Error::invalid_request(BAD_SILO_LINK_ERROR) + } else if message.contains("division by zero") { + Error::invalid_request(POOL_HAS_IPS_ERROR) + } else if message.starts_with("could not parse") + && message.contains("as type int") + { + Error::invalid_request(LAST_POOL_ERROR) + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } } _ => public_error_from_diesel( e, @@ -506,46 +528,16 @@ impl DataStore { } } - /// Revoke an IP Pool previously delegated for Oxide internal use. + /// Unreserve an IP. + /// + /// TODO(ben) Remove this, use reserve with specific type for everything. pub async fn ip_pool_revoke( &self, - opctx: &OpContext, - authz_pool: &authz::IpPool, - db_pool: &IpPool, + _opctx: &OpContext, + _authz_pool: &authz::IpPool, + _db_pool: &IpPool, ) -> UpdateResult<()> { - if !db_pool.is_delegated { - return Err(Error::invalid_request( - "Cannot revoke a pool that has not been previously delegated", - )); - } - let n_rows = revoke_ip_pool_query(authz_pool.id()) - .execute_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| match e { - DieselError::DatabaseError( - DatabaseErrorKind::Unknown, - ref info, - ) => { - if info.message().ends_with("invalid bool value") { - Error::invalid_request(LAST_POOL_ERROR) - } else if info.message().contains("division by zero") { - Error::invalid_request(POOL_HAS_IPS_ERROR) - } else { - public_error_from_diesel(e, ErrorHandler::Server) - } - } - _ => public_error_from_diesel( - e, - ErrorHandler::NotFoundByResource(authz_pool), - ), - })?; - if n_rows == 0 { - Err(Error::invalid_request( - "update failed to due concurrent modification", - )) - } else { - Ok(()) - } + todo!("remove me"); } /// Return the number of IPs allocated from and the capacity of the provided @@ -744,7 +736,8 @@ impl DataStore { if ip_pool_resource.resource_id == INTERNAL_SILO_ID { return Err(Error::internal_error( "IP Pools should not be linked to the internal silo. \ - Set the `is_delegated` column to true instead.", + Set the `reservation_type` column to an internal \ + variant instead.", )); } opctx @@ -774,13 +767,13 @@ impl DataStore { // Handle intentional errors in the query. DieselError::DatabaseError(DatabaseErrorKind::Unknown, ref info) => { let is_uuid_cast_error = |msg: &str, sentinel: &str| -> bool { + // We're unfortunately allocating here, but this + // error path isn't expected to be common. let expected = format!( - "could not parse \"{}\" as type uuid: uuid: \ - incorrect UUID length: {}", - sentinel, + "uuid: incorrect UUID length: {}", sentinel, ); - msg == expected + msg.ends_with(&expected) }; let msg = info.message(); if is_uuid_cast_error(msg, BAD_SILO_LINK_SENTINEL) { @@ -1113,8 +1106,8 @@ impl DataStore { if authz_silo.id() == INTERNAL_SILO_ID { return Err(Error::internal_error( - "Cannot unlink a delegated IP Pool. \ - Use the `is_delegated` column instead.", + "Cannot unlink an internally-reserved IP Pool. \ + Use the `reservation_type` column instead.", )); } @@ -1390,10 +1383,14 @@ const IP_POOL_DELETED_SENTINEL: &str = "ip-pool-deleted"; // between selecting it and trying to insert the link. const SILO_DELETED_SENTINEL: &str = "silo-deleted"; +// Sentinel we try to cast as an integer when removing the reservation on the +// last internal pool of a given type. +const LAST_POOL_SENTINEL: &str = "last-pool"; + // Query to conditionally link an IP Pool to an external customer Silo. // // This method returns a SQL query to conditionally insert a link between an IP -// Pool and a Silo. It maintains the invariant that a pool can be delegated for +// Pool and a Silo. It maintains the invariant that a pool can be reserved for // Oxide internal usage XOR linked to customer silos. It also checks that the // pool and silo still exist when the query is run. // @@ -1402,16 +1399,20 @@ const SILO_DELETED_SENTINEL: &str = "silo-deleted"; // ```sql // WITH // -- Select the IP Pool by ID, used to ensure it still exists when we run -// -- this query. Also select the delegation state, and fail if the pool is -// -- currently delegated to Oxide. +// -- this query. Also select the reservation type, and fail if the pool is +// -- currently reserved for Oxide. // ip_pool AS ( -// SELECT CAST(IF(is_delegated, 'bad-link-type', 'id') AS UUID) +// SELECT CAST(IF( +// reservation_type != 'external_silos', +// 'bad-link-type', +// $1) +// AS UUID) // FROM ip_pool -// WHERE id = $1 AND time_deleted IS NULL +// WHERE id = $2 AND time_deleted IS NULL // ) // -- Select the Silo by ID, used to ensure it still exists when we run this // -- query -// silo AS (SELECT id FROM silo WHERE id = $2 AND time_deleted IS NULL), +// silo AS (SELECT id FROM silo WHERE id = $3 AND time_deleted IS NULL), // INSERT // INTO // ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) @@ -1421,12 +1422,12 @@ const SILO_DELETED_SENTINEL: &str = "silo-deleted"; // -- This is the "true or cast error" trick we use in many places. // CAST(COALESCE(CAST(ip.id AS STRING), 'ip-pool-deleted') AS UUID), // -- The resource type, always 'silo' here. -// $5, +// $4, // -- If the silo exists, take its ID as a string. If it does not exist, take // -- the string `'silo-deleted'`. Attempt to cast the result to a UUID. // -- This is the "true or cast error" trick we use in many places. // CAST(COALESCE(CAST(s.id AS STRING), 'silo-deleted') AS UUID), -// $11 +// $5 // FROM // (SELECT 1) AS dummy // LEFT JOIN ip_pool AS ip ON true @@ -1439,7 +1440,12 @@ fn link_ip_pool_to_external_silo_query( ) -> TypedSqlQuery> { let mut builder = QueryBuilder::new(); builder - .sql("WITH ip_pool AS (SELECT CAST(IF(is_delegated, '") + .sql("WITH ip_pool AS (SELECT CAST(IF(reservation_type != ") + .param() + .bind::( + IpPoolReservationType::ExternalSilos, + ) + .sql(", '") .sql(BAD_SILO_LINK_SENTINEL) .sql("', ") .param() @@ -1612,76 +1618,139 @@ fn unlink_ip_pool_from_external_silo_query( ); builder.query() } -// Helper subquery which fails with a bool-cast error if there are fewer -// than two delegated IP Pools. This is useful when we're trying to delete -// or revoke a delegated pool, to ensure there's at least one left. -const ENSURE_DELEGATED_COUNT_SUBQUERY: &str = "(SELECT CAST(IF((\ - SELECT COUNT(1) \ - FROM ip_pool \ - WHERE time_deleted IS NULL AND is_delegated LIMIT 2\ + +// Generate a small helper subquery which fails with a bool-cast error if there +// are fewer than 2 IP Pools reserved for the provided use. It must be internal. +fn count_reserved_pools_subquery( + reservation_type: IpPoolReservationType, +) -> String { + assert!(!matches!(reservation_type, IpPoolReservationType::ExternalSilos)); + format!( + "CAST(IF(\ + (SELECT COUNT(1) \ + FROM ip_pool \ + WHERE time_deleted IS NULL AND reservation_type = '{}' LIMIT 2\ ) >= 2, \ 'true', \ - 'last-pool') \ - AS BOOL) \ - )"; + '{}') \ + AS BOOL)", + reservation_type, LAST_POOL_SENTINEL, + ) +} -fn revoke_ip_pool_query(ip_pool_id: Uuid) -> TypedSqlQuery<()> { +// Conditionally reserve an IP Pool for a specific use. +// +// # Panics +// +// Panics if the current and new reservation type are the same. +fn reserve_ip_pool_query( + pool: &IpPool, + reservation_type: IpPoolReservationType, +) -> TypedSqlQuery<()> { + assert_ne!(pool.reservation_type, reservation_type); + match pool.reservation_type { + IpPoolReservationType::ExternalSilos => { + reserve_external_ip_pool_query(pool, reservation_type) + } + IpPoolReservationType::OxideInternal => { + reserve_internal_ip_pool_query(pool, reservation_type) + } + } +} + +// Query to conditionally reserve an IP Pool that is currently reserved for +// external silo use. +// +// Checks that the pool is not currently linked to any silos first. Note that +// this means there cannot be any silo-specific resources using the pool. +fn reserve_external_ip_pool_query( + ip_pool: &IpPool, + new_reservation_type: IpPoolReservationType, +) -> TypedSqlQuery<()> { let mut builder = QueryBuilder::new(); builder - .sql( - "\ - UPDATE ip_pool \ - SET is_delegated = FALSE, time_modified = NOW() \ - WHERE id = ", - ) + .sql("UPDATE ip_pool SET reservation_type = ") .param() - .bind::(ip_pool_id) + .bind::(new_reservation_type) + .sql(", time_modified = NOW() WHERE id = ") + .param() + .bind::(ip_pool.id()) + .sql(" AND time_deleted IS NULL AND reservation_type = ") + .param() + .bind::(ip_pool.reservation_type) .sql( - " AND time_deleted IS NULL AND is_delegated = TRUE AND \ - CAST(IF((\ - SELECT COUNT(1) \ - FROM ip_pool \ - WHERE time_deleted is NULL AND is_delegated LIMIT 2\ - ) >= 2, \ - 'true', \ - 'last-pool') \ - AS BOOL) AND \ - CAST(IF(EXISTS(\ + " AND CAST(IF(EXISTS(\ SELECT 1 \ - FROM external_ip \ - WHERE time_deleted IS NULL AND ip_pool_id = ", + FROM ip_pool_resource \ + WHERE ip_pool_id = ", ) .param() - .bind::(ip_pool_id) - .sql("), 1/0, 1) AS BOOL)"); + .bind::(ip_pool.id()) + .sql(" LIMIT 1), '") + .sql(BAD_SILO_LINK_SENTINEL) + .sql("', 'TRUE') AS BOOL)"); builder.query() } -// Conditionally delegate an IP Pool, checking that the pool isn't linked to any -// external silos. -fn delegate_ip_pool_query(ip_pool_id: Uuid) -> TypedSqlQuery<()> { +// Query to conditionally reserve an IP Pool that is currently reserved for Oxide +// internal use. +// +// Checks that: +// +// - There are no external IPs in use by Oxide resources. +// - There is at least one other internal pool of the same reservation type. +fn reserve_internal_ip_pool_query( + ip_pool: &IpPool, + new_reservation_type: IpPoolReservationType, +) -> TypedSqlQuery<()> { let mut builder = QueryBuilder::new(); builder - .sql( - "\ - UPDATE ip_pool \ - SET is_delegated = TRUE, time_modified = NOW() \ - WHERE id = ", - ) + .sql("UPDATE ip_pool SET reservation_type = ") .param() - .bind::(ip_pool_id) + .bind::(new_reservation_type) + .sql(", time_modified = NOW() WHERE id = ") + .param() + .bind::(ip_pool.id()) + .sql(" AND time_deleted IS NULL AND reservation_type = ") + .param() + .bind::(ip_pool.reservation_type) + // Generate div-by-zero error if there are IPs .sql( - " AND time_deleted IS NULL AND is_delegated = FALSE AND (\ + " AND (\ SELECT CAST(IF(EXISTS(\ SELECT 1 \ - FROM ip_pool_resource \ + FROM external_ip \ WHERE ip_pool_id = ", ) .param() - .bind::(ip_pool_id) - .sql("), '") - .sql(BAD_SILO_LINK_SENTINEL) - .sql("', 'TRUE') AS BOOL))"); + .bind::(ip_pool.id()) + .sql( + " AND \ + external_ip.is_service AND \ + time_deleted IS NULL \ + LIMIT 1\ + ), 1/0, 1) AS BOOL))", + ) + // Generate int-cast error if this is the last pool of this reservation + // type. + .sql( + " AND CAST(IF(\ + (SELECT COUNT(1) \ + FROM ip_pool \ + WHERE time_deleted IS NULL \ + AND reservation_type = ", + ) + .param() + .bind::(ip_pool.reservation_type) + .sql( + " \ + LIMIT 2\ + ) >= 2, \ + '1', ", + ) + .param() + .bind::(LAST_POOL_SENTINEL) + .sql(") AS INT) = 1"); builder.query() } @@ -1693,8 +1762,8 @@ mod test { use crate::authz; use crate::db::datastore::ip_pool::{ BAD_SILO_LINK_ERROR, LAST_POOL_ERROR, POOL_HAS_IPS_ERROR, - delegate_ip_pool_query, link_ip_pool_to_external_silo_query, - revoke_ip_pool_query, unlink_ip_pool_from_external_silo_query, + link_ip_pool_to_external_silo_query, reserve_ip_pool_query, + unlink_ip_pool_from_external_silo_query, }; use crate::db::explain::ExplainableAsync as _; use crate::db::model::{ @@ -1709,7 +1778,7 @@ mod test { ExpressionMethods as _, QueryDsl as _, SelectableHelper as _, }; use nexus_db_lookup::LookupPath; - use nexus_db_model::IpVersion; + use nexus_db_model::{IpPoolIdentity, IpPoolReservationType, IpVersion}; use nexus_sled_agent_shared::inventory::ZoneKind; use nexus_types::deployment::{ OmicronZoneExternalFloatingIp, OmicronZoneExternalIp, @@ -2304,7 +2373,7 @@ mod test { } customer_pools.sort_by_key(|pool| pool.id()); - // Create a bunch which _are_ delegated for Oxide's usage. + // Create a bunch which _are_ reserved for Oxide's usage. let mut oxide_pools = Vec::with_capacity(N_POOLS); for i in 0..N_POOLS { // Create the pool @@ -2315,15 +2384,15 @@ mod test { let pool = datastore .ip_pool_create( opctx, - IpPool::new_delegated(&identity, IpVersion::V4), + IpPool::new_oxide_internal(&identity, IpVersion::V4), ) .await - .expect("Failed to create delegated IP pool"); + .expect("Failed to create reserved IP pool"); oxide_pools.push(pool); } assert_eq!(oxide_pools.len(), N_POOLS); - let fetch_paginated = |is_delegated| async move { + let fetch_paginated = |reservation_type| async move { let mut found = Vec::with_capacity(N_POOLS); let mut paginator = Paginator::new( NonZeroU32::new(5).unwrap(), @@ -2333,7 +2402,7 @@ mod test { let batch = datastore .ip_pools_list_paginated( opctx, - is_delegated, + reservation_type, None, &PaginatedBy::Id(page.current_pagparams()), ) @@ -2346,17 +2415,19 @@ mod test { }; // Paginate all the customer-reserved. - let customer_pools_found = fetch_paginated(false).await; + let customer_pools_found = + fetch_paginated(IpPoolReservationType::ExternalSilos).await; assert_eq!(customer_pools.len(), customer_pools_found.len()); assert_eq!(customer_pools, customer_pools_found); - // Paginate all those delegated to Oxide. + // Paginate all those reserved for Oxide. // // Note that we have 2 extra pools today, which are the builtin service // pools. These will go away in the future, so we'll unfortunately need // to update this test at that time. Until then, fetch those service // pools explicitly and add them. - let oxide_reserved_found = fetch_paginated(true).await; + let oxide_reserved_found = + fetch_paginated(IpPoolReservationType::OxideInternal).await; let pools = datastore .ip_pools_service_lookup_both_versions(opctx) .await @@ -2415,9 +2486,10 @@ mod test { } #[tokio::test] - async fn cannot_link_delegated_pool_to_external_silo() { - let logctx = - dev::test_setup_log("cannot_link_delegated_pool_to_external_silo"); + async fn cannot_link_oxide_internal_pool_to_external_silo() { + let logctx = dev::test_setup_log( + "cannot_link_oxide_internal_pool_to_external_silo", + ); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); @@ -2429,7 +2501,7 @@ mod test { let ip_pool = datastore .ip_pool_create( opctx, - IpPool::new_delegated(&identity, IpVersion::V4), + IpPool::new_oxide_internal(&identity, IpVersion::V4), ) .await .expect("Failed to create IP pool"); @@ -2441,23 +2513,28 @@ mod test { resource_id: uuid::uuid!("cfb16a9d-764e-4c5d-8d0d-cf737885b84a"), is_default: false, }; - let err = datastore.ip_pool_link_silo(&opctx, link).await.expect_err( - "Expected to fail linking a delegated IP Pool to an external Silo", - ); - println!("{err:#?}"); + let res = datastore.ip_pool_link_silo(&opctx, link).await; + let Err(Error::InvalidRequest { message }) = &res else { + panic!( + "Expected to fail linking an internally-reserved \ + IP Pool to an external Silo, found: {res:#?}", + ); + }; + assert_eq!(message.external_message(), BAD_SILO_LINK_ERROR); db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] - async fn cannot_delegate_externally_linked_pool() { - let logctx = - dev::test_setup_log("cannot_delegate_externally_linked_pool"); + async fn cannot_reserve_externally_linked_pool_for_internal_use() { + let logctx = dev::test_setup_log( + "cannot_reserve_externally_linked_pool_for_internal_use", + ); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); - // Create the pool, non-delegated. + // Create the pool, reserved for external silos. let identity = IdentityMetadataCreateParams { name: "external-ip-pool".parse().unwrap(), description: "".to_string(), @@ -2479,21 +2556,25 @@ mod test { .await .expect("Should be able to link unlinked pool to default silo"); - // We should fail to delegate it now. + // We should fail to reserve it for Oxide-internal use now. let (authz_pool, db_pool) = LookupPath::new(opctx, datastore) .ip_pool_id(ip_pool.id()) .fetch_for(authz::Action::Modify) .await .unwrap(); - let err = datastore - .ip_pool_delegate(opctx, &authz_pool, &db_pool) - .await - .expect_err( + let res = datastore + .ip_pool_reserve( + opctx, + &authz_pool, + &db_pool, + IpPoolReservationType::OxideInternal, + ) + .await; + let Err(Error::InvalidRequest { message }) = &res else { + panic!( "Expected to fail delegating an IP Pool \ - when it's already linked to an external silo", + when it's already linked to an external silo, found {res:#?}" ); - let Error::InvalidRequest { message } = err else { - panic!("Expected InvalidRequest, got {err:#?}"); }; assert_eq!(message.external_message(), BAD_SILO_LINK_ERROR); @@ -2618,9 +2699,10 @@ mod test { } #[tokio::test] - async fn cannot_delete_last_delegated_ip_pool() { - let logctx = - dev::test_setup_log("cannot_delete_last_delegated_ip_pool"); + async fn cannot_delete_last_internally_reserved_ip_pool() { + let logctx = dev::test_setup_log( + "cannot_delete_last_internally_reserved_ip_pool", + ); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); @@ -2631,13 +2713,13 @@ mod test { .unwrap(); // We should be able to delete one of these. - let _ = datastore.ip_pool_delete( - opctx, - &pools.ipv4.authz_pool, - &pools.ipv4.db_pool - ) + let _ = datastore + .ip_pool_delete(opctx, &pools.ipv4.authz_pool, &pools.ipv4.db_pool) .await - .expect("Should be able to delete delegated IP Pool when at least one remains"); + .expect( + "Should be able to delete internally-reserved \ + IP Pool when at least one remains", + ); // Check there's only one left. let pagparams = &PaginatedBy::Id(DataPageParams { @@ -2646,26 +2728,37 @@ mod test { limit: 100.try_into().unwrap(), }); let l = datastore - .ip_pools_delegated_list(opctx, None, &pagparams) + .ip_pools_list_paginated( + opctx, + IpPoolReservationType::OxideInternal, + None, + &pagparams, + ) .await .unwrap(); assert_eq!(l.len(), 1); // We should _not_ be able to delete the other now, because there's only // one left. - let err = datastore.ip_pool_delete( - opctx, - &pools.ipv6.authz_pool, - &pools.ipv6.db_pool, - ).await - .expect_err("Should not be able to delete delegated IP Pool when only one remains"); - let Error::InvalidRequest { message } = err else { - panic!("Expected InvalidRequest error, found {err:#?}"); + let res = datastore + .ip_pool_delete(opctx, &pools.ipv6.authz_pool, &pools.ipv6.db_pool) + .await; + + let Err(Error::InvalidRequest { message }) = &res else { + panic!( + "Should not be able to delete internally-reserved \ + IP Pool when only one remains, found {res:#?}" + ); }; assert_eq!(message.external_message(), LAST_POOL_ERROR); let l = datastore - .ip_pools_delegated_list(opctx, None, &pagparams) + .ip_pools_list_paginated( + opctx, + IpPoolReservationType::OxideInternal, + None, + &pagparams, + ) .await .unwrap(); assert_eq!(l.len(), 1); @@ -2675,9 +2768,10 @@ mod test { } #[tokio::test] - async fn cannot_revoke_last_delegated_ip_pool() { - let logctx = - dev::test_setup_log("cannot_revoke_last_delegated_ip_pool"); + async fn cannot_externally_reserve_last_internally_reserved_ip_pool() { + let logctx = dev::test_setup_log( + "cannot_externally_reserve_last_internally_reserved_ip_pool", + ); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); @@ -2687,14 +2781,19 @@ mod test { .await .unwrap(); - // We should be able to revoke one of these. - let _ = datastore.ip_pool_revoke( - opctx, - &pools.ipv4.authz_pool, - &pools.ipv4.db_pool - ) + // We should be able to reserve one of these for external use. + let _ = datastore + .ip_pool_reserve( + opctx, + &pools.ipv4.authz_pool, + &pools.ipv4.db_pool, + IpPoolReservationType::ExternalSilos, + ) .await - .expect("Should be able to revoke delegated IP Pool when at least one remains"); + .expect( + "Should be able to externally reserve IP Pool \ + when at least one internally-reserved pool remains", + ); // Check there's only one left. let pagparams = &PaginatedBy::Id(DataPageParams { @@ -2703,23 +2802,42 @@ mod test { limit: 100.try_into().unwrap(), }); let l = datastore - .ip_pools_delegated_list(opctx, None, &pagparams) + .ip_pools_list_paginated( + opctx, + IpPoolReservationType::OxideInternal, + None, + &pagparams, + ) .await .unwrap(); assert_eq!(l.len(), 1); - // We should _not_ be able to revoke the other now, because there's only - // one left. - let err = datastore.ip_pool_revoke( - opctx, - &pools.ipv6.authz_pool, - &pools.ipv6.db_pool, - ).await - .expect_err("Should not be able to revoke delegated IP Pool when only one remains"); - assert_matches!(err, Error::InvalidRequest { .. }); + // We should _not_ be able to reserve the other for external use now, + // because there's only one left for internal use. + let res = datastore + .ip_pool_reserve( + opctx, + &pools.ipv6.authz_pool, + &pools.ipv6.db_pool, + IpPoolReservationType::ExternalSilos, + ) + .await; + let Err(Error::InvalidRequest { message }) = &res else { + panic!( + "Should not be able to externally-reserve an \ + internally-reserved IP Pool when only one remains, \ + found {res:#?}" + ); + }; + assert_eq!(message.external_message(), LAST_POOL_ERROR); let l = datastore - .ip_pools_delegated_list(opctx, None, &pagparams) + .ip_pools_list_paginated( + opctx, + IpPoolReservationType::OxideInternal, + None, + &pagparams, + ) .await .unwrap(); assert_eq!(l.len(), 1); @@ -2729,9 +2847,9 @@ mod test { } #[tokio::test] - async fn cannot_revoke_delegated_ip_pool_with_outstanding_external_ips() { + async fn cannot_externally_reserve_ip_pool_with_outstanding_external_ips() { let logctx = dev::test_setup_log( - "cannot_revoke_delegated_ip_pool_with_outstanding_external_ips", + "cannot_externally_reserve_ip_pool_with_outstanding_external_ips", ); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); @@ -2775,30 +2893,34 @@ mod test { .await .expect("Should be able to create zone external IP"); - // Should not be able to revoke the IPv4 pool now, since we've got an - // address in use. - let err = datastore.ip_pool_revoke( - opctx, - &pools.ipv4.authz_pool, - &pools.ipv4.db_pool - ).await - .expect_err("Should not be able to revoke internal IP Pool when an address is in use"); - let Error::InvalidRequest { message } = err else { - panic!("Expected InvalidRequest, got: {err:#?}"); + // Should not be able to externally-reserve the IPv4 pool now, since + // we've got an address in use. + let res = datastore + .ip_pool_reserve( + opctx, + &pools.ipv4.authz_pool, + &pools.ipv4.db_pool, + IpPoolReservationType::ExternalSilos, + ) + .await; + let Err(Error::InvalidRequest { message }) = &res else { + panic!( + "Should not be able to externally reserve internal \ + IP Pool when an address is in use, found {res:#?}" + ); }; assert_eq!(message.external_message(), POOL_HAS_IPS_ERROR); - // Delete the address, and now we can delete the link. + // Delete the address, and now we can reserve the pool for external use. let _ = datastore .deallocate_external_ip(opctx, eip.id) .await .expect("Should be able to delete external IP"); - - // We should be able to delete one of these. - let _ = datastore.ip_pool_revoke( + let _ = datastore.ip_pool_reserve( opctx, &pools.ipv4.authz_pool, &pools.ipv4.db_pool, + IpPoolReservationType::ExternalSilos, ).await .expect( "Should be able to delete internal IP Pool when more than one remains, \ @@ -2810,14 +2932,29 @@ mod test { } #[tokio::test] - async fn can_explain_delegate_ip_pool_query() { - let logctx = dev::test_setup_log("can_explain_delegate_ip_pool_query"); + async fn can_explain_reserve_external_ip_pool_query() { + let logctx = + dev::test_setup_log("can_explain_reserve_external_ip_pool_query"); let db = TestDatabase::new_with_pool(&logctx.log).await; let pool = db.pool(); let conn = pool.claim().await.unwrap(); - let ip_pool_id = uuid::uuid!("27b74f5d-0a76-45db-8768-cd043c644f1d"); - let query = delegate_ip_pool_query(ip_pool_id); + let ip_pool = IpPool { + identity: IpPoolIdentity::new( + uuid::uuid!("93fea64d-5d0a-4cc6-8f94-7c527ee640a9"), + IdentityMetadataCreateParams { + name: "some-pool".parse().unwrap(), + description: String::new(), + }, + ), + ip_version: IpVersion::V4, + rcgen: 0, + reservation_type: IpPoolReservationType::ExternalSilos, + }; + let query = reserve_ip_pool_query( + &ip_pool, + IpPoolReservationType::OxideInternal, + ); let _ = query .explain_async(&conn) .await @@ -2828,14 +2965,54 @@ mod test { } #[tokio::test] - async fn can_explain_revoke_ip_pool_query() { - let logctx = dev::test_setup_log("can_explain_revoke_ip_pool_query"); + async fn expectorate_reserve_external_ip_pool_query() { + let ip_pool = IpPool { + identity: IpPoolIdentity::new( + uuid::uuid!("93fea64d-5d0a-4cc6-8f94-7c527ee640a9"), + IdentityMetadataCreateParams { + name: "some-pool".parse().unwrap(), + description: String::new(), + }, + ), + ip_version: IpVersion::V4, + rcgen: 0, + reservation_type: IpPoolReservationType::ExternalSilos, + }; + let query = reserve_ip_pool_query( + &ip_pool, + IpPoolReservationType::OxideInternal, + ); + expectorate_query_contents( + &query, + "tests/output/reserve_external_ip_pool.sql", + ) + .await; + } + + #[tokio::test] + async fn can_explain_reserve_internal_ip_pool_query() { + let logctx = + dev::test_setup_log("can_explain_reserve_internal_ip_pool_query"); let db = TestDatabase::new_with_pool(&logctx.log).await; let pool = db.pool(); let conn = pool.claim().await.unwrap(); - let ip_pool_id = uuid::uuid!("27b74f5d-0a76-45db-8768-cd043c644f1d"); - let query = revoke_ip_pool_query(ip_pool_id); + let ip_pool = IpPool { + identity: IpPoolIdentity::new( + uuid::uuid!("93fea64d-5d0a-4cc6-8f94-7c527ee640a9"), + IdentityMetadataCreateParams { + name: "some-pool".parse().unwrap(), + description: String::new(), + }, + ), + ip_version: IpVersion::V4, + rcgen: 0, + reservation_type: IpPoolReservationType::OxideInternal, + }; + let query = reserve_ip_pool_query( + &ip_pool, + IpPoolReservationType::ExternalSilos, + ); let _ = query .explain_async(&conn) .await @@ -2844,4 +3021,29 @@ mod test { db.terminate().await; logctx.cleanup_successful(); } + + #[tokio::test] + async fn expectorate_reserve_internal_ip_pool_query() { + let ip_pool = IpPool { + identity: IpPoolIdentity::new( + uuid::uuid!("93fea64d-5d0a-4cc6-8f94-7c527ee640a9"), + IdentityMetadataCreateParams { + name: "some-pool".parse().unwrap(), + description: String::new(), + }, + ), + ip_version: IpVersion::V4, + rcgen: 0, + reservation_type: IpPoolReservationType::OxideInternal, + }; + let query = reserve_ip_pool_query( + &ip_pool, + IpPoolReservationType::ExternalSilos, + ); + expectorate_query_contents( + &query, + "tests/output/reserve_internal_ip_pool.sql", + ) + .await; + } } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 83eb769c77b..0d69609167b 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -986,13 +986,13 @@ impl DataStore { self.rack_insert(opctx, &db::model::Rack::new(rack_id)).await?; - // Insert a delegated IP Pool for both IP versions. + // Insert an IP Pool for both IP versions, reserved for Oxide internal + // use. for (version, name) in [ (IpVersion::V4, SERVICE_IPV4_POOL_NAME), (IpVersion::V6, SERVICE_IPV6_POOL_NAME), ] { - // Create a delegated IP Pool, if needed. - let internal_pool = db::model::IpPool::new_delegated( + let internal_pool = db::model::IpPool::new_oxide_internal( &IdentityMetadataCreateParams { name: name.parse::().unwrap(), description: format!( diff --git a/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql b/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql index 6e5b05a377d..8b1876dfe17 100644 --- a/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql +++ b/nexus/db-queries/tests/output/ip_pool_external_silo_link.sql @@ -2,21 +2,21 @@ WITH ip_pool AS ( SELECT - CAST(IF(is_delegated, 'bad-link-type', $1) AS UUID) AS id + CAST(IF(reservation_type != $1, 'bad-link-type', $2) AS UUID) AS id FROM ip_pool WHERE - id = $2 AND time_deleted IS NULL + id = $3 AND time_deleted IS NULL ), - silo AS (SELECT id FROM silo WHERE id = $3 AND time_deleted IS NULL) + silo AS (SELECT id FROM silo WHERE id = $4 AND time_deleted IS NULL) INSERT INTO ip_pool_resource (ip_pool_id, resource_type, resource_id, is_default) SELECT CAST(COALESCE(CAST(ip.id AS STRING), 'ip-pool-deleted') AS UUID), - $4, + $5, CAST(COALESCE(CAST(s.id AS STRING), 'silo-deleted') AS UUID), - $5 + $6 FROM (SELECT 1) AS dummy LEFT JOIN ip_pool AS ip ON true LEFT JOIN silo AS s ON true RETURNING diff --git a/nexus/db-queries/tests/output/reserve_external_ip_pool.sql b/nexus/db-queries/tests/output/reserve_external_ip_pool.sql new file mode 100644 index 00000000000..8dacb652b63 --- /dev/null +++ b/nexus/db-queries/tests/output/reserve_external_ip_pool.sql @@ -0,0 +1,16 @@ +UPDATE + ip_pool +SET + reservation_type = $1, time_modified = now() +WHERE + id = $2 + AND time_deleted IS NULL + AND reservation_type = $3 + AND CAST( + IF( + EXISTS(SELECT 1 FROM ip_pool_resource WHERE ip_pool_id = $4 LIMIT 1), + 'bad-link-type', + 'TRUE' + ) + AS BOOL + ) diff --git a/nexus/db-queries/tests/output/reserve_internal_ip_pool.sql b/nexus/db-queries/tests/output/reserve_internal_ip_pool.sql new file mode 100644 index 00000000000..4421c5612b2 --- /dev/null +++ b/nexus/db-queries/tests/output/reserve_internal_ip_pool.sql @@ -0,0 +1,38 @@ +UPDATE + ip_pool +SET + reservation_type = $1, time_modified = now() +WHERE + id = $2 + AND time_deleted IS NULL + AND reservation_type = $3 + AND ( + SELECT + CAST( + IF( + EXISTS( + SELECT + 1 + FROM + external_ip + WHERE + ip_pool_id = $4 AND external_ip.is_service AND time_deleted IS NULL + LIMIT + 1 + ), + 1 / 0, + 1 + ) + AS BOOL + ) + ) + AND CAST( + IF( + (SELECT count(1) FROM ip_pool WHERE time_deleted IS NULL AND reservation_type = $5 LIMIT 2) + >= 2, + '1', + $6 + ) + AS INT8 + ) + = 1 diff --git a/nexus/db-schema/src/enums.rs b/nexus/db-schema/src/enums.rs index 5ea98088306..9abb574eb86 100644 --- a/nexus/db-schema/src/enums.rs +++ b/nexus/db-schema/src/enums.rs @@ -57,6 +57,7 @@ define_enums! { InvZoneManifestSourceEnum => "inv_zone_manifest_source", IpAttachStateEnum => "ip_attach_state", IpKindEnum => "ip_kind", + IpPoolReservationTypeEnum => "ip_pool_reservation_type", IpPoolResourceTypeEnum => "ip_pool_resource_type", IpVersionEnum => "ip_version", MigrationStateEnum => "migration_state", diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index eb18c99474d..768b19ffcac 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -630,7 +630,7 @@ table! { time_deleted -> Nullable, ip_version -> crate::enums::IpVersionEnum, rcgen -> Int8, - is_delegated -> Bool, + reservation_type -> crate::enums::IpPoolReservationTypeEnum, } } diff --git a/schema/crdb/add-ip-pool-is-delegated-column/up01.sql b/schema/crdb/add-ip-pool-is-delegated-column/up01.sql deleted file mode 100644 index 4d14275932a..00000000000 --- a/schema/crdb/add-ip-pool-is-delegated-column/up01.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE omicron.public.ip_pool -ADD COLUMN IF NOT EXISTS is_delegated BOOL NOT NULL -DEFAULT FALSE; diff --git a/schema/crdb/add-ip-pool-reservation-type-column/up01.sql b/schema/crdb/add-ip-pool-reservation-type-column/up01.sql new file mode 100644 index 00000000000..dea4b43b3ec --- /dev/null +++ b/schema/crdb/add-ip-pool-reservation-type-column/up01.sql @@ -0,0 +1,5 @@ +CREATE TYPE IF NOT EXISTS +omicron.public.ip_pool_reservation_type AS ENUM ( + "external_silos", + "oxide_internal" +); diff --git a/schema/crdb/add-ip-pool-reservation-type-column/up02.sql b/schema/crdb/add-ip-pool-reservation-type-column/up02.sql new file mode 100644 index 00000000000..c56b148d92a --- /dev/null +++ b/schema/crdb/add-ip-pool-reservation-type-column/up02.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.ip_pool +ADD COLUMN IF NOT EXISTS reservation_type omicron.public.ip_pool_reservation_type NOT NULL +DEFAULT 'external_silos'; diff --git a/schema/crdb/add-ip-pool-is-delegated-column/up02.sql b/schema/crdb/add-ip-pool-reservation-type-column/up03.sql similarity index 74% rename from schema/crdb/add-ip-pool-is-delegated-column/up02.sql rename to schema/crdb/add-ip-pool-reservation-type-column/up03.sql index e9be4e2308e..5afdb6bb48a 100644 --- a/schema/crdb/add-ip-pool-is-delegated-column/up02.sql +++ b/schema/crdb/add-ip-pool-reservation-type-column/up03.sql @@ -1,4 +1,4 @@ SET LOCAL disallow_full_table_scans = 'off'; UPDATE ip_pool -SET is_delegated = TRUE +SET ip_pool_delgation_type = 'oxide_internal' WHERE name = 'oxide-service-pool-v4' OR name = 'oxide-service-pool-v6'; diff --git a/schema/crdb/add-ip-pool-is-delegated-column/up03.sql b/schema/crdb/add-ip-pool-reservation-type-column/up04.sql similarity index 62% rename from schema/crdb/add-ip-pool-is-delegated-column/up03.sql rename to schema/crdb/add-ip-pool-reservation-type-column/up04.sql index 296dfb00470..e2ec88a3969 100644 --- a/schema/crdb/add-ip-pool-is-delegated-column/up03.sql +++ b/schema/crdb/add-ip-pool-reservation-type-column/up04.sql @@ -1,3 +1,3 @@ ALTER TABLE omicron.public.ip_pool -ALTER COLUMN is_delegated +ALTER COLUMN reservation_type DROP DEFAULT; diff --git a/schema/crdb/add-ip-pool-is-delegated-column/up04.sql b/schema/crdb/add-ip-pool-reservation-type-column/up05.sql similarity index 100% rename from schema/crdb/add-ip-pool-is-delegated-column/up04.sql rename to schema/crdb/add-ip-pool-reservation-type-column/up05.sql diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index c36c2fffb6d..acab3d76142 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2086,6 +2086,12 @@ CREATE TYPE IF NOT EXISTS omicron.public.ip_version AS ENUM ( 'v6' ); +/* Indicates what an IP Pool is reserved for. */ +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_reservation_type AS ENUM ( + 'external_silos', + 'oxide_internal' +); + /* * An IP Pool, a collection of zero or more IP ranges for external IPs. */ @@ -2104,8 +2110,8 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( /* The IP version of the ranges contained in this pool. */ ip_version omicron.public.ip_version NOT NULL, - /* Indicates the pool is delegated by an operator for internal Oxide use */ - is_delegated BOOL NOT NULL + /* Indicates what the IP Pool is reserved for. */ + reservation_type omicron.public.ip_pool_reservation_type NOT NULL ); /* From d59a1ca1d70ea14e67bea9b860ff98fc1160573a Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Tue, 7 Oct 2025 19:31:42 +0000 Subject: [PATCH 6/8] SQL typo --- schema/crdb/add-ip-pool-reservation-type-column/up01.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/schema/crdb/add-ip-pool-reservation-type-column/up01.sql b/schema/crdb/add-ip-pool-reservation-type-column/up01.sql index dea4b43b3ec..c3c30fc8148 100644 --- a/schema/crdb/add-ip-pool-reservation-type-column/up01.sql +++ b/schema/crdb/add-ip-pool-reservation-type-column/up01.sql @@ -1,5 +1,5 @@ CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_reservation_type AS ENUM ( - "external_silos", - "oxide_internal" + 'external_silos', + 'oxide_internal' ); From 44a947c2baa684130b02be40c158758003bfb5ad Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Tue, 7 Oct 2025 21:54:32 +0000 Subject: [PATCH 7/8] More typos --- schema/crdb/add-ip-pool-reservation-type-column/up03.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/schema/crdb/add-ip-pool-reservation-type-column/up03.sql b/schema/crdb/add-ip-pool-reservation-type-column/up03.sql index 5afdb6bb48a..f58953bd2e8 100644 --- a/schema/crdb/add-ip-pool-reservation-type-column/up03.sql +++ b/schema/crdb/add-ip-pool-reservation-type-column/up03.sql @@ -1,4 +1,4 @@ SET LOCAL disallow_full_table_scans = 'off'; UPDATE ip_pool -SET ip_pool_delgation_type = 'oxide_internal' +SET reservation_type = 'oxide_internal' WHERE name = 'oxide-service-pool-v4' OR name = 'oxide-service-pool-v6'; From beb5523ce96687f4b8e9b9b272b2724158761087 Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Wed, 8 Oct 2025 17:02:39 +0000 Subject: [PATCH 8/8] More review feedback --- nexus/db-model/src/ip_pool.rs | 25 ++--- nexus/db-queries/src/db/datastore/ip_pool.rs | 102 +++++++++++++++--- nexus/db-queries/src/db/datastore/rack.rs | 3 +- .../db-queries/src/db/queries/external_ip.rs | 2 + nexus/src/app/ip_pool.rs | 7 +- nexus/tests/integration_tests/ip_pools.rs | 1 + .../up06.sql | 4 + 7 files changed, 111 insertions(+), 33 deletions(-) create mode 100644 schema/crdb/add-ip-pool-reservation-type-column/up06.sql diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 811d33820bd..c105a662d8c 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -134,6 +134,7 @@ impl IpPool { pub fn new( pool_identity: &external::IdentityMetadataCreateParams, ip_version: IpVersion, + reservation_type: IpPoolReservationType, ) -> Self { Self { identity: IpPoolIdentity::new( @@ -142,23 +143,7 @@ impl IpPool { ), ip_version, rcgen: 0, - reservation_type: IpPoolReservationType::ExternalSilos, - } - } - - /// Create a new pool reserved for Oxide's internal use. - pub fn new_oxide_internal( - pool_identity: &external::IdentityMetadataCreateParams, - ip_version: IpVersion, - ) -> Self { - Self { - identity: IpPoolIdentity::new( - Uuid::new_v4(), - pool_identity.clone(), - ), - ip_version, - rcgen: 0, - reservation_type: IpPoolReservationType::OxideInternal, + reservation_type, } } @@ -167,8 +152,9 @@ impl IpPool { /// The pool is reserved for external customer Silos. pub fn new_v4( pool_identity: &external::IdentityMetadataCreateParams, + reservation_type: IpPoolReservationType, ) -> Self { - Self::new(pool_identity, IpVersion::V4) + Self::new(pool_identity, IpVersion::V4, reservation_type) } /// Create a new IPv6 IP Pool. @@ -176,8 +162,9 @@ impl IpPool { /// The pool is reserved for external customer Silos. pub fn new_v6( pool_identity: &external::IdentityMetadataCreateParams, + reservation_type: IpPoolReservationType, ) -> Self { - Self::new(pool_identity, IpVersion::V6) + Self::new(pool_identity, IpVersion::V6, reservation_type) } } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 0c7594583a4..bcc12654491 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -1836,7 +1836,14 @@ mod test { description: "".to_string(), }; let pool1_for_silo = datastore - .ip_pool_create(&opctx, IpPool::new(&identity, IpVersion::V4)) + .ip_pool_create( + &opctx, + IpPool::new( + &identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create IP pool"); @@ -1922,7 +1929,14 @@ mod test { description: "".to_string(), }; let second_silo_default = datastore - .ip_pool_create(&opctx, IpPool::new(&identity, IpVersion::V4)) + .ip_pool_create( + &opctx, + IpPool::new( + &identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create pool"); let err = datastore @@ -2019,7 +2033,14 @@ mod test { description: "".to_string(), }; let other_pool = datastore - .ip_pool_create(&opctx, IpPool::new(&identity, version)) + .ip_pool_create( + &opctx, + IpPool::new( + &identity, + version, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create IP pool"); assert_eq!(other_pool.ip_version, version); @@ -2085,7 +2106,14 @@ mod test { description: "".to_string(), }; let pool = datastore - .ip_pool_create(&opctx, IpPool::new(&identity, IpVersion::V4)) + .ip_pool_create( + &opctx, + IpPool::new( + &identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create IP pool"); let authz_pool = authz::IpPool::new( @@ -2189,7 +2217,14 @@ mod test { description: "".to_string(), }; let pool = datastore - .ip_pool_create(&opctx, IpPool::new(&identity, IpVersion::V6)) + .ip_pool_create( + &opctx, + IpPool::new( + &identity, + IpVersion::V6, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create IP pool"); let authz_pool = authz::IpPool::new( @@ -2323,7 +2358,14 @@ mod test { description: "".to_string(), }; let pool = datastore - .ip_pool_create(&opctx, IpPool::new(&identity, version)) + .ip_pool_create( + &opctx, + IpPool::new( + &identity, + version, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create IP pool"); let authz_pool = authz::IpPool::new( @@ -2366,7 +2408,14 @@ mod test { description: "".to_string(), }; let pool = datastore - .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .ip_pool_create( + opctx, + IpPool::new( + &identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create IP pool"); customer_pools.push(pool); @@ -2384,7 +2433,11 @@ mod test { let pool = datastore .ip_pool_create( opctx, - IpPool::new_oxide_internal(&identity, IpVersion::V4), + IpPool::new( + &identity, + IpVersion::V4, + IpPoolReservationType::OxideInternal, + ), ) .await .expect("Failed to create reserved IP pool"); @@ -2501,7 +2554,11 @@ mod test { let ip_pool = datastore .ip_pool_create( opctx, - IpPool::new_oxide_internal(&identity, IpVersion::V4), + IpPool::new( + &identity, + IpVersion::V4, + IpPoolReservationType::OxideInternal, + ), ) .await .expect("Failed to create IP pool"); @@ -2540,7 +2597,14 @@ mod test { description: "".to_string(), }; let ip_pool = datastore - .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .ip_pool_create( + opctx, + IpPool::new( + &identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create IP pool"); @@ -2595,7 +2659,14 @@ mod test { description: "".to_string(), }; let ip_pool = datastore - .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .ip_pool_create( + opctx, + IpPool::new( + &identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create IP pool"); @@ -2644,7 +2715,14 @@ mod test { description: "".to_string(), }; let ip_pool = datastore - .ip_pool_create(opctx, IpPool::new(&identity, IpVersion::V4)) + .ip_pool_create( + opctx, + IpPool::new( + &identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) .await .expect("Failed to create IP pool"); diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 0d69609167b..95073e81135 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -992,7 +992,7 @@ impl DataStore { (IpVersion::V4, SERVICE_IPV4_POOL_NAME), (IpVersion::V6, SERVICE_IPV6_POOL_NAME), ] { - let internal_pool = db::model::IpPool::new_oxide_internal( + let internal_pool = db::model::IpPool::new( &IdentityMetadataCreateParams { name: name.parse::().unwrap(), description: format!( @@ -1000,6 +1000,7 @@ impl DataStore { ), }, version, + nexus_db_model::IpPoolReservationType::OxideInternal, ); match self.ip_pool_create(opctx, internal_pool).await { Ok(_) | Err(Error::ObjectAlreadyExists { .. }) => {} diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index c7ae3743f3b..a045c726e01 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -879,6 +879,7 @@ mod tests { use nexus_db_model::ByteCount; use nexus_db_model::Instance; use nexus_db_model::InstanceCpuCount; + use nexus_db_model::IpPoolReservationType; use nexus_db_model::IpPoolResource; use nexus_db_model::IpPoolResourceType; use nexus_db_model::IpVersion; @@ -927,6 +928,7 @@ mod tests { description: format!("ip pool {}", name), }, IpVersion::V4, + IpPoolReservationType::ExternalSilos, ); self.db diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index f51bc8a5541..ecb5d2e9041 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -9,6 +9,7 @@ use crate::external_api::shared::IpRange; use ipnetwork::IpNetwork; use nexus_db_lookup::LookupPath; use nexus_db_lookup::lookup; +use nexus_db_model::IpPoolReservationType; use nexus_db_model::IpVersion; use nexus_db_queries::authz; use nexus_db_queries::authz::ApiResource; @@ -79,7 +80,11 @@ impl super::Nexus { "IPv6 pools are not yet supported", )); } - let pool = db::model::IpPool::new(&pool_params.identity, ip_version); + let pool = db::model::IpPool::new( + &pool_params.identity, + ip_version, + IpPoolReservationType::ExternalSilos, + ); self.db_datastore.ip_pool_create(opctx, pool).await } diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 881ef3eb817..19ce8bcf21c 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -985,6 +985,7 @@ async fn test_ipv6_ip_pool_utilization_total( nexus_db_model::IpPool::new( &identity, nexus_db_model::IpVersion::V6, + nexus_db_model::IpPoolReservationType::ExternalSilos, ), ) .await diff --git a/schema/crdb/add-ip-pool-reservation-type-column/up06.sql b/schema/crdb/add-ip-pool-reservation-type-column/up06.sql new file mode 100644 index 00000000000..7fb76218367 --- /dev/null +++ b/schema/crdb/add-ip-pool-reservation-type-column/up06.sql @@ -0,0 +1,4 @@ +SET LOCAL disallow_full_table_scans = 'off'; +DELETE FROM +ip_pool_resource +WHERE resource_type = 'silo' AND resource_id = '001de000-5110-4000-8000-000000000001';