diff --git a/common/src/address.rs b/common/src/address.rs index 3dfcdfb8d60..84e69c15af1 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -22,6 +22,18 @@ pub const AZ_PREFIX: u8 = 48; pub const RACK_PREFIX: u8 = 56; pub const SLED_PREFIX: u8 = 64; +// Multicast constants + +/// IPv4 Source-Specific Multicast (SSM) subnet as defined in RFC 4607: +/// . +pub const IPV4_SSM_SUBNET: oxnet::Ipv4Net = + oxnet::Ipv4Net::new_unchecked(Ipv4Addr::new(232, 0, 0, 0), 8); + +/// IPv6 Source-Specific Multicast (SSM) flag field value as defined in RFC 4607: +/// . +/// This is the flags nibble (high nibble of second byte) for FF3x::/32 addresses. +pub const IPV6_SSM_FLAG_FIELD: u8 = 3; + /// maximum possible value for a tcp or udp port pub const MAX_PORT: u16 = u16::MAX; diff --git a/common/src/vlan.rs b/common/src/vlan.rs index 5e5765ffe20..67c9d4c343e 100644 --- a/common/src/vlan.rs +++ b/common/src/vlan.rs @@ -5,7 +5,9 @@ //! VLAN ID wrapper. use crate::api::external::Error; +use schemars::JsonSchema; use serde::Deserialize; +use serde::Serialize; use std::fmt; use std::str::FromStr; @@ -13,7 +15,8 @@ use std::str::FromStr; pub const VLAN_MAX: u16 = 4094; /// Wrapper around a VLAN ID, ensuring it is valid. -#[derive(Debug, Deserialize, Clone, Copy)] +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Copy, JsonSchema)] +#[serde(rename = "VlanId")] pub struct VlanID(u16); impl VlanID { @@ -44,3 +47,20 @@ impl FromStr for VlanID { ) } } + +impl From for u16 { + fn from(vlan_id: VlanID) -> u16 { + vlan_id.0 + } +} + +impl slog::Value for VlanID { + fn serialize( + &self, + _record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + serializer.emit_u16(key, self.0) + } +} diff --git a/docs/control-plane-architecture.adoc b/docs/control-plane-architecture.adoc index 88b91cb7b30..12ecc6999a3 100644 --- a/docs/control-plane-architecture.adoc +++ b/docs/control-plane-architecture.adoc @@ -14,6 +14,8 @@ NOTE: Much of this material originally came from <> and <>. This NOTE: The RFD references in this documentation may be Oxide-internal. Where possible, we're trying to move relevant documentation from those RFDs into docs here. +See also: link:../notes/multicast-architecture.adoc[Multicast Architecture: VLAN Scope] + == What is the control plane In software systems the terms **data plane** and **control plane** are often used to refer to the parts of the system that directly provide resources to users (the data plane) and the parts that support the configuration, control, monitoring, and operation of the system (the control plane). Within the Oxide system, we say that the data plane comprises those parts that provide CPU resources (including both the host CPU and hypervisor software), storage resources, and network resources. The control plane provides the APIs through which users provision, configure, and monitor these resources and the mechanisms through which these APIs are implemented. Also part of the control plane are the APIs and facilities through which operators manage the system itself, including fault management, alerting, software updates for various components of the system, and so on. diff --git a/docs/networking.adoc b/docs/networking.adoc index 84c95832c0d..9d4d1ea6936 100644 --- a/docs/networking.adoc +++ b/docs/networking.adoc @@ -6,6 +6,8 @@ This is a very rough introduction to how networking works within the Oxide system and particularly the control plane (Omicron). Much more information is available in various RFDs, particularly <>. +See also: link:../notes/multicast-architecture.adoc[Multicast Architecture: VLAN Scope] + == IPv6: the least you need to know While IPv4 can be used for connectivity between Omicron and the outside world, everything else in the system uses IPv6. This section provides a _very_ cursory introduction to IPv6 for people only familiar with IPv4. You can skip this if you know IPv6. If you want slightly more detail than what's here, see https://www.roesen.org/files/ipv6_cheat_sheet.pdf[this cheat sheet]. diff --git a/end-to-end-tests/src/bin/bootstrap.rs b/end-to-end-tests/src/bin/bootstrap.rs index 26f7a30dc16..5aa9cf22f7f 100644 --- a/end-to-end-tests/src/bin/bootstrap.rs +++ b/end-to-end-tests/src/bin/bootstrap.rs @@ -6,8 +6,8 @@ use end_to_end_tests::helpers::{ use omicron_test_utils::dev::poll::{CondCheckError, wait_for_condition}; use oxide_client::types::{ ByteCount, DeviceAccessTokenRequest, DeviceAuthRequest, DeviceAuthVerify, - DiskCreate, DiskSource, IpPoolCreate, IpPoolLinkSilo, IpVersion, NameOrId, - SiloQuotasUpdate, + DiskCreate, DiskSource, IpPoolCreate, IpPoolLinkSilo, IpPoolType, + IpVersion, NameOrId, SiloQuotasUpdate, }; use oxide_client::{ ClientConsoleAuthExt, ClientDisksExt, ClientProjectsExt, @@ -53,6 +53,7 @@ async fn run_test() -> Result<()> { name: pool_name.parse().unwrap(), description: "Default IP pool".to_string(), ip_version, + pool_type: IpPoolType::Unicast, }) .send() .await?; diff --git a/end-to-end-tests/src/bin/commtest.rs b/end-to-end-tests/src/bin/commtest.rs index 1da1cd1c4df..2fae239db59 100644 --- a/end-to-end-tests/src/bin/commtest.rs +++ b/end-to-end-tests/src/bin/commtest.rs @@ -7,8 +7,8 @@ use oxide_client::{ ClientSystemHardwareExt, ClientSystemIpPoolsExt, ClientSystemStatusExt, ClientVpcsExt, types::{ - IpPoolCreate, IpPoolLinkSilo, IpRange, IpVersion, Name, NameOrId, - PingStatus, ProbeCreate, ProbeInfo, ProjectCreate, + IpPoolCreate, IpPoolLinkSilo, IpPoolType, IpRange, IpVersion, Name, + NameOrId, PingStatus, ProbeCreate, ProbeInfo, ProjectCreate, UsernamePasswordCredentials, }, }; @@ -295,6 +295,7 @@ async fn rack_prepare( name: pool_name.parse().unwrap(), description: "Default IP pool".to_string(), ip_version, + pool_type: IpPoolType::Unicast, }) .send() .await?; diff --git a/nexus/db-model/src/generation.rs b/nexus/db-model/src/generation.rs index 751cb98f3c7..c1b4fba62c5 100644 --- a/nexus/db-model/src/generation.rs +++ b/nexus/db-model/src/generation.rs @@ -8,6 +8,7 @@ use diesel::pg::Pg; use diesel::serialize::{self, ToSql}; use diesel::sql_types; use omicron_common::api::external; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; @@ -23,6 +24,7 @@ use std::convert::TryFrom; FromSqlRow, Serialize, Deserialize, + JsonSchema, )] #[diesel(sql_type = sql_types::BigInt)] #[repr(transparent)] diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 819be85ec8e..817206ed24d 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -17,7 +17,6 @@ use nexus_db_schema::schema::ip_pool_range; use nexus_db_schema::schema::ip_pool_resource; use nexus_types::external_api::params; use nexus_types::external_api::shared; -use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::views; use nexus_types::identity::Resource; use omicron_common::api::external; @@ -72,6 +71,24 @@ impl From for shared::IpVersion { } } +impl From for IpPoolType { + fn from(value: shared::IpPoolType) -> Self { + match value { + shared::IpPoolType::Unicast => Self::Unicast, + shared::IpPoolType::Multicast => Self::Multicast, + } + } +} + +impl From for shared::IpPoolType { + fn from(value: IpPoolType) -> Self { + match value { + IpPoolType::Unicast => Self::Unicast, + IpPoolType::Multicast => Self::Multicast, + } + } +} + /// An IP Pool is a collection of IP addresses external to the rack. /// /// IP pools can be external or internal. External IP pools can be associated @@ -82,16 +99,17 @@ impl From for shared::IpVersion { pub struct IpPool { #[diesel(embed)] pub identity: IpPoolIdentity, - /// The IP version of the pool. pub ip_version: IpVersion, - + /// Pool type for unicast (default) vs multicast pools. + pub pool_type: IpPoolType, /// Child resource generation number, for optimistic concurrency control of /// the contained ranges. pub rcgen: i64, } impl IpPool { + /// Creates a new unicast (default) IP pool. pub fn new( pool_identity: &external::IdentityMetadataCreateParams, ip_version: IpVersion, @@ -102,6 +120,23 @@ impl IpPool { pool_identity.clone(), ), ip_version, + pool_type: IpPoolType::Unicast, + rcgen: 0, + } + } + + /// Creates a new multicast IP pool. + pub fn new_multicast( + pool_identity: &external::IdentityMetadataCreateParams, + ip_version: IpVersion, + ) -> Self { + Self { + identity: IpPoolIdentity::new( + Uuid::new_v4(), + pool_identity.clone(), + ), + ip_version, + pool_type: IpPoolType::Multicast, rcgen: 0, } } @@ -121,11 +156,21 @@ impl IpPool { impl From for views::IpPool { fn from(pool: IpPool) -> Self { - Self { identity: pool.identity(), ip_version: pool.ip_version.into() } + let identity = pool.identity(); + let pool_type = pool.pool_type; + + Self { + identity, + pool_type: pool_type.into(), + ip_version: pool.ip_version.into(), + } } } -/// A set of updates to an IP Pool +/// A set of updates to an IP Pool. +/// +/// We do not modify the pool type after creation (e.g. unicast -> multicast or +/// vice versa), as that would require a migration of all associated resources. #[derive(AsChangeset)] #[diesel(table_name = ip_pool)] pub struct IpPoolUpdate { @@ -153,6 +198,25 @@ impl_enum_type!( Silo => b"silo" ); +impl_enum_type!( + IpPoolTypeEnum: + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq)] + pub enum IpPoolType; + + Unicast => b"unicast" + Multicast => b"multicast" +); + +impl std::fmt::Display for IpPoolType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IpPoolType::Unicast => write!(f, "unicast"), + IpPoolType::Multicast => write!(f, "multicast"), + } + } +} + #[derive(Queryable, Insertable, Selectable, Clone, Copy, Debug, PartialEq)] #[diesel(table_name = ip_pool_resource)] pub struct IpPoolResource { @@ -192,7 +256,7 @@ pub struct IpPoolRange { } impl IpPoolRange { - pub fn new(range: &IpRange, ip_pool_id: Uuid) -> Self { + pub fn new(range: &shared::IpRange, ip_pool_id: Uuid) -> Self { let now = Utc::now(); let first_address = range.first_address(); let last_address = range.last_address(); @@ -221,20 +285,20 @@ impl From for views::IpPoolRange { id: range.id, ip_pool_id: range.ip_pool_id, time_created: range.time_created, - range: IpRange::from(&range), + range: shared::IpRange::from(&range), } } } -impl From<&IpPoolRange> for IpRange { +impl From<&IpPoolRange> for shared::IpRange { fn from(range: &IpPoolRange) -> Self { let maybe_range = match (range.first_address.ip(), range.last_address.ip()) { (IpAddr::V4(first), IpAddr::V4(last)) => { - IpRange::try_from((first, last)) + shared::IpRange::try_from((first, last)) } (IpAddr::V6(first), IpAddr::V6(last)) => { - IpRange::try_from((first, last)) + shared::IpRange::try_from((first, last)) } (first, last) => { unreachable!( diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index d4a37d0b6c8..e8c6bd35db1 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(197, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(198, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(198, "multicast-pool-support"), KnownVersion::new(197, "scim-users-and-groups"), KnownVersion::new(196, "user-provision-type-for-silo-user-and-group"), KnownVersion::new(195, "tuf-pruned-index"), diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 4aa91b46664..4ca47aa6df7 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -7,7 +7,6 @@ use super::DataStore; use super::SQL_BATCH_SIZE; use crate::authz; -use crate::authz::ApiResource; use crate::context::OpContext; use crate::db::collection_attach::AttachError; use crate::db::collection_attach::DatastoreAttachTarget; @@ -18,6 +17,7 @@ use crate::db::model::FloatingIp; use crate::db::model::IncompleteExternalIp; use crate::db::model::IpKind; use crate::db::model::IpPool; +use crate::db::model::IpPoolType; use crate::db::model::Name; use crate::db::pagination::Paginator; use crate::db::pagination::paginated; @@ -87,7 +87,9 @@ impl DataStore { probe_id: Uuid, pool: Option, ) -> CreateResult { - let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; + let authz_pool = self + .resolve_pool_for_allocation(opctx, pool, IpPoolType::Unicast) + .await?; let data = IncompleteExternalIp::for_ephemeral_probe( ip_id, probe_id, @@ -123,7 +125,9 @@ impl DataStore { // Naturally, we now *need* to destroy the ephemeral IP if the newly alloc'd // IP was not attached, including on idempotent success. - let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; + let authz_pool = self + .resolve_pool_for_allocation(opctx, pool, IpPoolType::Unicast) + .await?; let data = IncompleteExternalIp::for_ephemeral(ip_id, authz_pool.id()); // We might not be able to acquire a new IP, but in the event of an @@ -186,33 +190,6 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// If a pool is specified, make sure it's linked to this silo. If a pool is - /// not specified, fetch the default pool for this silo. Once the pool is - /// resolved (by either method) do an auth check. Then return the pool. - async fn resolve_pool_for_allocation( - &self, - opctx: &OpContext, - pool: Option, - ) -> LookupResult { - let authz_pool = match pool { - Some(authz_pool) => { - self.ip_pool_fetch_link(opctx, authz_pool.id()) - .await - .map_err(|_| authz_pool.not_found())?; - - authz_pool - } - // If no pool specified, use the default logic - None => { - let (authz_pool, ..) = - self.ip_pools_fetch_default(opctx).await?; - authz_pool - } - }; - opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; - Ok(authz_pool) - } - /// Allocates a floating IP address for instance usage. pub async fn allocate_floating_ip( &self, @@ -224,7 +201,9 @@ impl DataStore { ) -> CreateResult { let ip_id = Uuid::new_v4(); - let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; + let authz_pool = self + .resolve_pool_for_allocation(opctx, pool, IpPoolType::Unicast) + .await?; let data = if let Some(ip) = ip { IncompleteExternalIp::for_floating_explicit( diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index f31ae4181fa..b01d115636b 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -7,6 +7,7 @@ use super::DataStore; use super::SQL_BATCH_SIZE; use crate::authz; +use crate::authz::ApiResource; use crate::context::OpContext; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; @@ -19,6 +20,7 @@ use crate::db::model::IpPool; use crate::db::model::IpPoolRange; use crate::db::model::IpPoolResource; use crate::db::model::IpPoolResourceType; +use crate::db::model::IpPoolType; use crate::db::model::IpPoolUpdate; use crate::db::model::Name; use crate::db::pagination::Paginator; @@ -43,6 +45,7 @@ use nexus_db_model::IpVersion; use nexus_db_model::Project; use nexus_db_model::Vpc; use nexus_types::external_api::shared::IpRange; +use omicron_common::address::{IPV4_SSM_SUBNET, IPV6_SSM_FLAG_FIELD}; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; @@ -98,18 +101,18 @@ const INTERNAL_SILO_DEFAULT_ERROR: &'static str = "The internal Silo cannot have a default IP Pool"; impl DataStore { - /// List IP Pools - pub async fn ip_pools_list( + async fn ip_pools_list_with_type( &self, opctx: &OpContext, pagparams: &PaginatedBy<'_>, + pool_type: Option, ) -> ListResultVec { use nexus_db_schema::schema::ip_pool; opctx .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; - match pagparams { + let mut q = match pagparams { PaginatedBy::Id(pagparams) => { paginated(ip_pool::table, ip_pool::id, pagparams) } @@ -118,14 +121,56 @@ impl DataStore { ip_pool::name, &pagparams.map_name(|n| Name::ref_cast(n)), ), + }; + + if let Some(pt) = pool_type { + q = q.filter(ip_pool::pool_type.eq(pt)); } - .filter(ip_pool::name.ne(SERVICE_IPV4_POOL_NAME)) - .filter(ip_pool::name.ne(SERVICE_IPV6_POOL_NAME)) - .filter(ip_pool::time_deleted.is_null()) - .select(IpPool::as_select()) - .get_results_async(&*self.pool_connection_authorized(opctx).await?) + + q.filter(ip_pool::name.ne(SERVICE_IPV4_POOL_NAME)) + .filter(ip_pool::name.ne(SERVICE_IPV6_POOL_NAME)) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPool::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// List IP Pools + pub async fn ip_pools_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.ip_pools_list_with_type(opctx, pagparams, None).await + } + + /// List Multicast IP Pools + pub async fn ip_pools_list_multicast( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.ip_pools_list_with_type( + opctx, + pagparams, + Some(IpPoolType::Multicast), + ) + .await + } + + /// List Unicast IP Pools + pub async fn ip_pools_list_unicast( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.ip_pools_list_with_type( + opctx, + pagparams, + Some(IpPoolType::Unicast), + ) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Look up whether the given pool is available to users in the current @@ -160,14 +205,15 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// Look up the default IP pool for the current silo. If there is no default - /// at silo scope, fall back to the next level up, namely the fleet default. - /// There should always be a default pool at the fleet level, though this - /// query can theoretically fail if someone is able to delete that pool or - /// make another one the default and delete that. - pub async fn ip_pools_fetch_default( + /// Look up the default IP pool for the current silo by pool type. + /// + /// Related to `ip_pools_fetch_default`, but this one allows you to specify + /// the pool type (unicast or multicast) to fetch the default pool of that + /// type. + async fn ip_pools_fetch_default_by_type( &self, opctx: &OpContext, + pool_type: IpPoolType, ) -> LookupResult<(authz::IpPool, IpPool)> { use nexus_db_schema::schema::ip_pool; use nexus_db_schema::schema::ip_pool_resource; @@ -183,8 +229,9 @@ impl DataStore { // .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) // .await?; - let lookup_type = - LookupType::ByOther("default IP pool for current silo".to_string()); + let lookup_type = LookupType::ByOther(format!( + "default {pool_type} IP pool for current silo" + )); ip_pool::table .inner_join(ip_pool_resource::table) @@ -194,6 +241,8 @@ impl DataStore { .filter(ip_pool_resource::resource_id.eq(authz_silo_id)) .filter(ip_pool_resource::is_default.eq(true)) .filter(ip_pool::time_deleted.is_null()) + // Filter by pool type + .filter(ip_pool::pool_type.eq(pool_type)) // Order by most specific first so we get the most specific. // resource_type is an enum in the DB and therefore gets its order // from the definition; it's not lexicographic. So correctness here @@ -239,8 +288,77 @@ impl DataStore { }) } - /// Look up IP pool intended for internal services by their well-known - /// names. There are separate IP Pools for IPv4 and IPv6 address ranges. + /// Look up the default IP pool for the current silo. If there is no default + /// at silo scope, fall back to the next level up, namely the fleet default. + /// + /// There should always be a default pool at the fleet level, though this + /// query can theoretically fail if someone is able to delete that pool or + /// make another one the default and delete that. + pub async fn ip_pools_fetch_default( + &self, + opctx: &OpContext, + ) -> LookupResult<(authz::IpPool, IpPool)> { + // Default to unicast pools (existing behavior) + self.ip_pools_fetch_default_by_type(opctx, IpPoolType::Unicast).await + } + + /// Pool resolution for allocation by pool type. + /// + /// If pool is provided, validate it's linked to this silo and is of the + /// correct type. If no pool is provided, fetch the default pool of the + /// specified type for this silo. Once the pool is resolved (by either + /// method) do an auth check. Then return the pool. + pub async fn resolve_pool_for_allocation( + &self, + opctx: &OpContext, + pool: Option, + pool_type: IpPoolType, + ) -> LookupResult { + use nexus_db_schema::schema::ip_pool; + + let authz_pool = match pool { + Some(authz_pool) => { + self.ip_pool_fetch_link(opctx, authz_pool.id()) + .await + .map_err(|_| authz_pool.not_found())?; + + let pool_record = { + ip_pool::table + .filter(ip_pool::id.eq(authz_pool.id())) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPool::as_select()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|_| authz_pool.not_found())? + }; + + // Verify it's the correct pool type + if pool_record.pool_type != pool_type { + return Err(Error::invalid_request(&format!( + "Pool '{}' is not a {} pool (type: {})", + pool_record.identity.name, + pool_type, + pool_record.pool_type + ))); + } + + authz_pool + } + // If no pool specified, use the default pool of the specified type + None => { + let (authz_pool, ..) = self + .ip_pools_fetch_default_by_type(opctx, pool_type) + .await?; + authz_pool + } + }; + opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; + Ok(authz_pool) + } + + /// Look up IP pool intended for internal services by its well-known name. /// /// This method may require an index by Availability Zone in the future. pub async fn ip_pools_service_lookup( @@ -1193,6 +1311,14 @@ impl DataStore { ))); } + // For multicast pools, validate ASM/SSM separation + if pool.pool_type == IpPoolType::Multicast { + Self::validate_multicast_pool_range_consistency_on_conn( + conn, authz_pool, range, + ) + .await?; + } + let new_range = IpPoolRange::new(range, pool_id); let filter_subquery = FilterOverlappingIpRanges { range: new_range }; let insert_query = @@ -1311,20 +1437,121 @@ impl DataStore { )) } } + + /// Validate that a new range being added to a multicast pool is consistent + /// with existing ranges in the pool, i.e., that we don't mix ASM and SSM + /// ranges in the same pool. + /// + /// Takes in a connection so it can be called from within a + /// transaction context. + async fn validate_multicast_pool_range_consistency_on_conn( + conn: &async_bb8_diesel::Connection, + authz_pool: &authz::IpPool, + range: &IpRange, + ) -> Result<(), Error> { + use nexus_db_schema::schema::ip_pool_range::dsl; + + let new_range_is_ssm = match range { + IpRange::V4(v4_range) => { + let first = v4_range.first_address(); + IPV4_SSM_SUBNET.contains(first) + } + IpRange::V6(v6_range) => { + let first = v6_range.first_address(); + // Check if the flag field (second nibble) is 3 for SSM + let flag_field = (first.octets()[1] & 0xF0) >> 4; + flag_field == IPV6_SSM_FLAG_FIELD + } + }; + + // Query existing ranges within THIS pool only + let existing_ranges: Vec = dsl::ip_pool_range + .filter(dsl::ip_pool_id.eq(authz_pool.id())) + .filter(dsl::time_deleted.is_null()) + .get_results_async(conn) + .await + .map_err(|e| { + Error::internal_error(&format!( + "Failed to fetch existing IP pool ranges: {}", + e + )) + })?; + + // Check if any existing range conflicts with the new range type + for existing_range in &existing_ranges { + let existing_is_ssm = match &existing_range.first_address { + IpNetwork::V4(net) => IPV4_SSM_SUBNET.contains(net.network()), + IpNetwork::V6(net) => { + // Check if the flag field (second nibble) is 3 for SSM + let flag_field = (net.network().octets()[1] & 0xF0) >> 4; + flag_field == IPV6_SSM_FLAG_FIELD + } + }; + + // If we have a mix of ASM and SSM within this pool, reject + if new_range_is_ssm != existing_is_ssm { + let new_type = if new_range_is_ssm { "SSM" } else { "ASM" }; + let existing_type = if existing_is_ssm { "SSM" } else { "ASM" }; + return Err(Error::invalid_request(&format!( + "Cannot mix {new_type} and {existing_type} ranges in multicast pool. \ + {new_type} ranges (IPv4 232/8, IPv6 FF3x::/32) and \ + {existing_type} ranges (IPv4 224/4, IPv6 FF0x-FF2x::/32) must be in separate pools." + ))); + } + } + + Ok(()) + } + + /// Determine whether a multicast IP pool is SSM (true) or ASM (false). + /// Assumes pools are range-consistent (validated on range insertion). + pub async fn multicast_pool_is_ssm( + &self, + opctx: &OpContext, + pool_id: Uuid, + ) -> LookupResult { + use nexus_db_schema::schema::ip_pool_range::dsl; + + // Fetch any active range for the pool. Validation at insert time + // guarantees consistency across ranges in a multicast pool. + let range = dsl::ip_pool_range + .filter(dsl::ip_pool_id.eq(pool_id)) + .filter(dsl::time_deleted.is_null()) + .select(IpPoolRange::as_select()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + let Some(range) = range else { + return Err(Error::insufficient_capacity( + "No IP ranges available in multicast pool", + "multicast pool has no active ranges", + )); + }; + + let is_ssm = match range.first_address { + IpNetwork::V4(net) => IPV4_SSM_SUBNET.contains(net.network()), + IpNetwork::V6(net) => { + // Check if the flag field (second nibble) is 3 for SSM + let flags = (net.network().octets()[1] & 0xF0) >> 4; + flags == IPV6_SSM_FLAG_FIELD + } + }; + + Ok(is_ssm) + } } #[cfg(test)] mod test { + use std::net::{Ipv4Addr, Ipv6Addr}; use std::num::NonZeroU32; - use crate::authz; - use crate::db::datastore::ip_pool::INTERNAL_SILO_DEFAULT_ERROR; - use crate::db::model::{ - IpPool, IpPoolResource, IpPoolResourceType, Project, - }; - use crate::db::pub_test_utils::TestDatabase; use assert_matches::assert_matches; - use nexus_db_model::{IpPoolIdentity, IpVersion}; + use nexus_db_model::{IpPoolIdentity, IpPoolType, IpVersion}; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::address::{IpRange, Ipv4Range, Ipv6Range}; @@ -1335,6 +1562,13 @@ mod test { use omicron_test_utils::dev; use uuid::Uuid; + use crate::authz; + use crate::db::datastore::ip_pool::INTERNAL_SILO_DEFAULT_ERROR; + use crate::db::model::{ + IpPool, IpPoolResource, IpPoolResourceType, Project, + }; + use crate::db::pub_test_utils::TestDatabase; + #[tokio::test] async fn test_default_ip_pools() { let logctx = dev::test_setup_log("test_default_ip_pools"); @@ -1391,7 +1625,7 @@ mod test { .expect("Should list silo IP pools"); assert_eq!(silo_pools.len(), 0); - // make default should fail when there is no link yet + // Make default should fail when there is no link yet let authz_pool = authz::IpPool::new( authz::FLEET, pool1_for_silo.id(), @@ -1563,7 +1797,7 @@ mod test { } #[tokio::test] - async fn cannot_set_default_ip_pool_for_internal_silo() { + async fn test_cannot_set_default_ip_pool_for_internal_silo() { let logctx = dev::test_setup_log("cannot_set_default_ip_pool_for_internal_silo"); let db = TestDatabase::new_with_datastore(&logctx.log).await; @@ -1583,6 +1817,7 @@ mod test { ), ip_version, rcgen: 0, + pool_type: IpPoolType::Unicast, }; let pool = datastore .ip_pool_create(&opctx, params) @@ -1690,8 +1925,8 @@ mod test { let range = IpRange::V4( Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 5), + Ipv4Addr::new(10, 0, 0, 1), + Ipv4Addr::new(10, 0, 0, 5), ) .unwrap(), ); @@ -1805,8 +2040,8 @@ mod test { // Add an IPv6 range let ipv6_range = IpRange::V6( Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 10), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 20), + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 10), + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 20), ) .unwrap(), ); @@ -1852,8 +2087,8 @@ mod test { // add a giant range for fun let ipv6_range = IpRange::V6( Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 21), - std::net::Ipv6Addr::new( + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 21), + Ipv6Addr::new( 0xfd00, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, ), ) @@ -1875,7 +2110,7 @@ mod test { } #[tokio::test] - async fn cannot_insert_range_in_pool_with_different_ip_version() { + async fn test_cannot_insert_range_in_pool_with_different_ip_version() { let logctx = dev::test_setup_log( "cannot_insert_range_in_pool_with_different_ip_version", ); @@ -1887,8 +2122,8 @@ mod test { let ranges = [ IpRange::V6( Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 21), - std::net::Ipv6Addr::new( + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 21), + Ipv6Addr::new( 0xfd00, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, ), ) @@ -1896,8 +2131,8 @@ mod test { ), IpRange::V4( Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 5), + Ipv4Addr::new(10, 0, 0, 1), + Ipv4Addr::new(10, 0, 0, 5), ) .unwrap(), ), @@ -1934,4 +2169,219 @@ mod test { db.terminate().await; logctx.cleanup_successful(); } + + #[tokio::test] + async fn test_multicast_ip_pool_basic_operations() { + let logctx = + dev::test_setup_log("test_multicast_ip_pool_basic_operations"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a multicast IP pool + let identity = IdentityMetadataCreateParams { + name: "multicast-pool".parse().unwrap(), + description: "Test multicast IP pool".to_string(), + }; + let pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast(&identity, IpVersion::V4), + ) + .await + .expect("Failed to create multicast IP pool"); + + let authz_silo = opctx.authn.silo_required().unwrap(); + let link = IpPoolResource { + ip_pool_id: pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: true, + }; + datastore + .ip_pool_link_silo(&opctx, link) + .await + .expect("Failed to link IP pool to silo"); + + // Verify it's marked as multicast + assert_eq!(pool.pool_type, IpPoolType::Multicast); + + // Test multicast-specific listing + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + + let multicast_pools = datastore + .ip_pools_list_multicast(&opctx, &pagbyid) + .await + .expect("Should list multicast IP pools"); + assert_eq!(multicast_pools.len(), 1); + assert_eq!(multicast_pools[0].id(), pool.id()); + + // Regular pool listing should also include it + let all_pools = datastore + .ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list all IP pools"); + assert_eq!(all_pools.len(), 1); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_multicast_ip_pool_default_by_type() { + let logctx = + dev::test_setup_log("test_multicast_ip_pool_default_by_type"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let authz_silo = opctx.authn.silo_required().unwrap(); + + // Initially no default multicast pool + let error = datastore + .ip_pools_fetch_default_by_type(&opctx, IpPoolType::Multicast) + .await + .unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // Create and link a multicast pool as default + let identity = IdentityMetadataCreateParams { + name: "default-multicast-pool".parse().unwrap(), + description: "Default multicast pool".to_string(), + }; + let pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast(&identity, IpVersion::V4), + ) + .await + .expect("Failed to create multicast IP pool"); + + let link = IpPoolResource { + ip_pool_id: pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: true, + }; + datastore + .ip_pool_link_silo(&opctx, link) + .await + .expect("Could not link multicast pool to silo"); + + // Now should find the default multicast pool + let default_pool = datastore + .ip_pools_fetch_default_by_type(&opctx, IpPoolType::Multicast) + .await + .expect("Should find default multicast pool"); + assert_eq!(default_pool.1.id(), pool.id()); + assert_eq!(default_pool.1.pool_type, IpPoolType::Multicast); + + // Regular default should still fail (no unicast pool) + let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_multicast_ip_pool_ranges() { + let logctx = dev::test_setup_log("test_multicast_ip_pool_ranges"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create IPv4 multicast IP pool + let ipv4_identity = IdentityMetadataCreateParams { + name: "multicast-ipv4-pool".parse().unwrap(), + description: "Test IPv4 multicast IP pool".to_string(), + }; + let ipv4_pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast(&ipv4_identity, IpVersion::V4), + ) + .await + .expect("Failed to create IPv4 multicast IP pool"); + + let authz_ipv4_pool = authz::IpPool::new( + authz::FLEET, + ipv4_pool.id(), + LookupType::ById(ipv4_pool.id()), + ); + + // Add IPv4 multicast range (224.0.0.0/4) + let ipv4_range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(224, 1, 1, 1), + Ipv4Addr::new(224, 1, 1, 10), + ) + .unwrap(), + ); + datastore + .ip_pool_add_range( + &opctx, + &authz_ipv4_pool, + &ipv4_pool, + &ipv4_range, + ) + .await + .expect("Could not add IPv4 multicast range"); + + // Create IPv6 multicast IP pool + let ipv6_identity = IdentityMetadataCreateParams { + name: "multicast-ipv6-pool".parse().unwrap(), + description: "Test IPv6 multicast IP pool".to_string(), + }; + let ipv6_pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast(&ipv6_identity, IpVersion::V6), + ) + .await + .expect("Failed to create IPv6 multicast IP pool"); + + let authz_ipv6_pool = authz::IpPool::new( + authz::FLEET, + ipv6_pool.id(), + LookupType::ById(ipv6_pool.id()), + ); + + // Add IPv6 multicast range (ff00::/8) + let ipv6_range = IpRange::V6( + Ipv6Range::new( + Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 1), + Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 10), + ) + .unwrap(), + ); + datastore + .ip_pool_add_range( + &opctx, + &authz_ipv6_pool, + &ipv6_pool, + &ipv6_range, + ) + .await + .expect("Could not add IPv6 multicast range"); + + // Check IPv4 pool capacity + let ipv4_capacity = datastore + .ip_pool_total_capacity(&opctx, &authz_ipv4_pool) + .await + .unwrap(); + assert_eq!(ipv4_capacity, 10); // 224.1.1.1 to 224.1.1.10 + + // Check IPv6 pool capacity + let ipv6_capacity = datastore + .ip_pool_total_capacity(&opctx, &authz_ipv6_pool) + .await + .unwrap(); + assert_eq!(ipv6_capacity, 10); // ff01::1 to ff01::a + + db.terminate().await; + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index c4093806eda..9a756916e6b 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -1138,6 +1138,46 @@ impl DataStore { Ok(id) } + /// Given a list of switch port UUIDs, return a list of strings in the + /// format ".". The order of the returned list + /// matches the order of the input UUIDs. + pub async fn switch_ports_from_ids( + &self, + opctx: &OpContext, + uplink_uuids: &[Uuid], + ) -> LookupResult> { + use nexus_db_schema::schema::switch_port::{ + self, dsl, port_name, switch_location, + }; + + if uplink_uuids.is_empty() { + return Ok(Vec::new()); + } + + let conn = self.pool_connection_authorized(opctx).await?; + let uplink_uuids_vec: Vec = uplink_uuids.to_vec(); + + // Maintain the order from the input UUIDs + let mut result = Vec::with_capacity(uplink_uuids.len()); + for uuid in uplink_uuids_vec.iter() { + let switch_port_info = dsl::switch_port + .filter(switch_port::id.eq(*uuid)) + .select((switch_location, port_name)) + .first_async::<(String, String)>(&*conn) + .await + .map_err(|_| { + Error::internal_error(&format!( + "Switch port UUID {uuid} not found", + )) + })?; + + result + .push(format!("{}.{}", switch_port_info.0, switch_port_info.1)); + } + + Ok(result) + } + pub async fn switch_ports_with_uplinks( &self, opctx: &OpContext, diff --git a/nexus/db-queries/src/db/on_conflict_ext.rs b/nexus/db-queries/src/db/on_conflict_ext.rs index 5f31eb99fb6..bcf9664b77e 100644 --- a/nexus/db-queries/src/db/on_conflict_ext.rs +++ b/nexus/db-queries/src/db/on_conflict_ext.rs @@ -293,7 +293,7 @@ pub trait IncompleteOnConflictExt { /// [the `filter` method]: /// https://docs.rs/diesel/2.1.4/diesel/query_dsl/methods/trait.FilterDsl.html#tymethod.filter /// [`filter_target` method]: - /// https://docs.rs/diesel/2.1.4/diesel/query_dsl/trait.FilterTarget.html#tymethod.filter_targehttps://docs.rs/diesel/2.1.4/diesel/upsert/trait.DecoratableTarget.html#tymethod.filter_targett + /// https://docs.rs/diesel/2.1.4/diesel/upsert/trait.DecoratableTarget.html#tymethod.filter_target /// [`disallowed_methods`]: /// https://rust-lang.github.io/rust-clippy/master/index.html#disallowed_methods /// [principle of explosion]: diff --git a/nexus/db-queries/src/db/pub_test_utils/helpers.rs b/nexus/db-queries/src/db/pub_test_utils/helpers.rs index 7b9fbc7e00c..2fda16ef2a6 100644 --- a/nexus/db-queries/src/db/pub_test_utils/helpers.rs +++ b/nexus/db-queries/src/db/pub_test_utils/helpers.rs @@ -31,6 +31,7 @@ use nexus_db_model::SledUpdate; use nexus_db_model::Snapshot; use nexus_db_model::SnapshotIdentity; use nexus_db_model::SnapshotState; +use nexus_db_model::Vmm; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::external; @@ -40,6 +41,7 @@ use omicron_common::api::external::{ use omicron_common::update::ArtifactId; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; +use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::TufRepoUuid; use omicron_uuid_kinds::VolumeUuid; @@ -503,6 +505,93 @@ pub async fn create_project_image( .unwrap() } +/// Create a VMM record for testing. +pub async fn create_vmm_for_instance( + opctx: &OpContext, + datastore: &DataStore, + instance_id: InstanceUuid, + sled_id: SledUuid, +) -> PropolisUuid { + let vmm_id = PropolisUuid::new_v4(); + let vmm = Vmm::new( + vmm_id, + instance_id, + sled_id, + "127.0.0.1".parse().unwrap(), // Test IP + 12400, // Test port + nexus_db_model::VmmCpuPlatform::SledDefault, // Test CPU platform + ); + datastore.vmm_insert(opctx, vmm).await.expect("Should create VMM"); + vmm_id +} + +/// Update instance runtime to point to a VMM. +pub async fn attach_instance_to_vmm( + opctx: &OpContext, + datastore: &DataStore, + authz_project: &authz::Project, + instance_id: InstanceUuid, + vmm_id: PropolisUuid, +) { + // Fetch current instance to get generation + let authz_instance = authz::Instance::new( + authz_project.clone(), + instance_id.into_untyped_uuid(), + external::LookupType::ById(instance_id.into_untyped_uuid()), + ); + let instance = datastore + .instance_refetch(opctx, &authz_instance) + .await + .expect("Should fetch instance"); + + datastore + .instance_update_runtime( + &instance_id, + &InstanceRuntimeState { + nexus_state: InstanceState::Vmm, + propolis_id: Some(vmm_id.into_untyped_uuid()), + dst_propolis_id: None, + migration_id: None, + gen: Generation::from(instance.runtime().gen.next()), + time_updated: Utc::now(), + time_last_auto_restarted: None, + }, + ) + .await + .expect("Should update instance runtime state"); +} + +/// Create an instance with an associated VMM (convenience function). +pub async fn create_instance_with_vmm( + opctx: &OpContext, + datastore: &DataStore, + authz_project: &authz::Project, + instance_name: &str, + sled_id: SledUuid, +) -> (InstanceUuid, PropolisUuid) { + let instance_id = create_stopped_instance_record( + opctx, + datastore, + authz_project, + instance_name, + ) + .await; + + let vmm_id = + create_vmm_for_instance(opctx, datastore, instance_id, sled_id).await; + + attach_instance_to_vmm( + opctx, + datastore, + authz_project, + instance_id, + vmm_id, + ) + .await; + + (instance_id, vmm_id) +} + pub async fn insert_test_tuf_repo( opctx: &OpContext, datastore: &DataStore, diff --git a/nexus/db-queries/src/db/queries/external_multicast_group.rs b/nexus/db-queries/src/db/queries/external_multicast_group.rs new file mode 100644 index 00000000000..79014b00df4 --- /dev/null +++ b/nexus/db-queries/src/db/queries/external_multicast_group.rs @@ -0,0 +1,281 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementation of queries for operating on external multicast groups from IP +//! Pools. +//! +//! Much of this is based on the external IP allocation code, with +//! modifications for multicast group semantics. + +use chrono::{DateTime, Utc}; +use diesel::pg::Pg; +use diesel::query_builder::{AstPass, Query, QueryFragment, QueryId}; +use diesel::{Column, QueryResult, RunQueryDsl, sql_types}; +use ipnetwork::IpNetwork; +use uuid::Uuid; + +use nexus_db_lookup::DbConnection; +use nexus_db_schema::schema; + +use crate::db::model::{ + ExternalMulticastGroup, Generation, IncompleteExternalMulticastGroup, + MulticastGroupState, Name, Vni, +}; +use crate::db::true_or_cast_error::matches_sentinel; + +const REALLOCATION_WITH_DIFFERENT_MULTICAST_GROUP_SENTINEL: &'static str = + "Reallocation of multicast group with different configuration"; + +/// Translates a generic multicast group allocation error to an external error. +pub fn from_diesel( + e: diesel::result::Error, +) -> omicron_common::api::external::Error { + let sentinels = [REALLOCATION_WITH_DIFFERENT_MULTICAST_GROUP_SENTINEL]; + if let Some(sentinel) = matches_sentinel(&e, &sentinels) { + match sentinel { + REALLOCATION_WITH_DIFFERENT_MULTICAST_GROUP_SENTINEL => { + return omicron_common::api::external::Error::invalid_request( + "Re-allocating multicast group with different configuration", + ); + } + // Fall-through to the generic error conversion. + _ => {} + } + } + + nexus_db_errors::public_error_from_diesel( + e, + nexus_db_errors::ErrorHandler::Server, + ) +} + +/// Query to allocate the next available external multicast group address from +/// IP pools. +/// +/// This query follows a similar pattern as [`super::external_ip::NextExternalIp`] but for multicast +/// addresses. +/// +/// It handles pool-based allocation, explicit address requests, and +/// idempotency. +pub struct NextExternalMulticastGroup { + group: IncompleteExternalMulticastGroup, + now: DateTime, +} + +impl NextExternalMulticastGroup { + pub fn new(group: IncompleteExternalMulticastGroup) -> Self { + let now = Utc::now(); + Self { group, now } + } + + fn push_next_multicast_ip_subquery<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> QueryResult<()> { + out.push_sql("SELECT "); + out.push_bind_param::(&self.group.id)?; + out.push_sql(" AS id, "); + + // Use provided name (now required via identity pattern) + out.push_bind_param::(&self.group.name)?; + out.push_sql(" AS name, "); + + // Use provided description (now required via identity pattern) + out.push_bind_param::( + &self.group.description, + )?; + out.push_sql(" AS description, "); + + out.push_bind_param::>( + &self.now, + )?; + out.push_sql(" AS time_created, "); + out.push_bind_param::>( + &self.now, + )?; + out.push_sql(" AS time_modified, "); + + out.push_bind_param::, Option>>(&None)?; + out.push_sql(" AS time_deleted, "); + + out.push_bind_param::(&self.group.project_id)?; + out.push_sql(" AS project_id, "); + + // Pool ID from the candidates subquery (like external IP) + out.push_sql("ip_pool_id, "); + + // Pool range ID from the candidates subquery + out.push_sql("ip_pool_range_id, "); + + // VNI + out.push_bind_param::(&self.group.vni)?; + out.push_sql(" AS vni, "); + + // The multicast IP comes from the candidates subquery + out.push_sql("candidate_ip AS multicast_ip, "); + + // Handle source IPs array + out.push_sql("ARRAY["); + for (i, source_ip) in self.group.source_ips.iter().enumerate() { + if i > 0 { + out.push_sql(", "); + } + out.push_bind_param::( + source_ip, + )?; + } + out.push_sql("]::inet[] AS source_ips, "); + + out.push_bind_param::, Option>(&None)?; + out.push_sql(" AS underlay_group_id, "); + + out.push_bind_param::(&self.group.rack_id)?; + out.push_sql(" AS rack_id, "); + + out.push_bind_param::, Option>(&self.group.tag)?; + out.push_sql(" AS tag, "); + + // New multicast groups start in "Creating" state (RPW pattern) + out.push_bind_param::(&MulticastGroupState::Creating)?; + out.push_sql(" AS state, "); + + out.push_sql("nextval('omicron.public.multicast_group_version') AS version_added, "); + out.push_bind_param::, Option>(&None)?; + out.push_sql(" AS version_removed"); + + // FROM the candidates subquery with LEFT JOIN (like external IP) + out.push_sql(" FROM ("); + self.push_address_candidates_subquery(out.reborrow())?; + out.push_sql(") LEFT OUTER JOIN "); + schema::multicast_group::table.walk_ast(out.reborrow())?; + out.push_sql( + " ON (multicast_ip = candidate_ip AND time_deleted IS NULL)", + ); + out.push_sql( + " WHERE candidate_ip IS NOT NULL AND multicast_ip IS NULL LIMIT 1", + ); + + Ok(()) + } + + fn push_address_candidates_subquery<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> QueryResult<()> { + use schema::ip_pool_range::dsl; + + out.push_sql("SELECT "); + out.push_identifier(dsl::ip_pool_id::NAME)?; + out.push_sql(", "); + out.push_identifier(dsl::id::NAME)?; + out.push_sql(" AS ip_pool_range_id, "); + + // Handle explicit address vs automatic allocation + if let Some(explicit_addr) = &self.group.explicit_address { + out.push_sql("CASE "); + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(" <= "); + out.push_bind_param::(explicit_addr)?; + out.push_sql(" AND "); + out.push_bind_param::(explicit_addr)?; + out.push_sql(" <= "); + out.push_identifier(dsl::last_address::NAME)?; + out.push_sql(" WHEN TRUE THEN "); + out.push_bind_param::(explicit_addr)?; + out.push_sql(" ELSE NULL END"); + } else { + // Generate series of candidate IPs (like external IP does) + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(" + generate_series(0, "); + out.push_identifier(dsl::last_address::NAME)?; + out.push_sql(" - "); + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(")"); + } + + out.push_sql(" AS candidate_ip FROM "); + schema::ip_pool_range::table.walk_ast(out.reborrow())?; + out.push_sql(" WHERE "); + out.push_identifier(dsl::ip_pool_id::NAME)?; + out.push_sql(" = "); + out.push_bind_param::(&self.group.ip_pool_id)?; + out.push_sql(" AND "); + out.push_identifier(dsl::time_deleted::NAME)?; + out.push_sql(" IS NULL"); + + // Filter for multicast address ranges (224.0.0.0/4 for IPv4, + // ff00::/8 for IPv6) + out.push_sql(" AND ("); + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(" << '224.0.0.0/4'::inet OR "); + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(" << 'ff00::/8'::inet)"); + + Ok(()) + } + + fn push_prior_allocation_subquery<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> QueryResult<()> { + out.push_sql("SELECT * FROM "); + schema::multicast_group::table.walk_ast(out.reborrow())?; + out.push_sql(" WHERE id = "); + out.push_bind_param::(&self.group.id)?; + out.push_sql(" AND time_deleted IS NULL"); + Ok(()) + } +} + +impl QueryFragment for NextExternalMulticastGroup { + fn walk_ast<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> diesel::QueryResult<()> { + out.unsafe_to_cache_prepared(); + + // Create CTE for candidate multicast group + out.push_sql("WITH next_external_multicast_group AS ("); + self.push_next_multicast_ip_subquery(out.reborrow())?; + out.push_sql("), "); + + // Check for existing allocation (idempotency) + out.push_sql("previously_allocated_group AS ("); + self.push_prior_allocation_subquery(out.reborrow())?; + out.push_sql("), "); + + // Insert new record or return existing one + out.push_sql("multicast_group AS ("); + out.push_sql("INSERT INTO "); + schema::multicast_group::table.walk_ast(out.reborrow())?; + out.push_sql( + " (id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed) + SELECT id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed FROM next_external_multicast_group + WHERE NOT EXISTS (SELECT 1 FROM previously_allocated_group) + RETURNING id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed", + ); + out.push_sql(") "); + + // Return either the newly inserted or previously allocated group + out.push_sql( + "SELECT id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed FROM previously_allocated_group + UNION ALL + SELECT id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed FROM multicast_group", + ); + + Ok(()) + } +} + +impl QueryId for NextExternalMulticastGroup { + type QueryId = (); + const HAS_STATIC_QUERY_ID: bool = false; +} + +impl Query for NextExternalMulticastGroup { + type SqlType = <>::SelectExpression as diesel::Expression>::SqlType; +} + +impl RunQueryDsl for NextExternalMulticastGroup {} diff --git a/nexus/db-schema/src/enums.rs b/nexus/db-schema/src/enums.rs index 5ea98088306..bb10279a629 100644 --- a/nexus/db-schema/src/enums.rs +++ b/nexus/db-schema/src/enums.rs @@ -58,6 +58,7 @@ define_enums! { IpAttachStateEnum => "ip_attach_state", IpKindEnum => "ip_kind", IpPoolResourceTypeEnum => "ip_pool_resource_type", + IpPoolTypeEnum => "ip_pool_type", IpVersionEnum => "ip_version", MigrationStateEnum => "migration_state", NetworkInterfaceKindEnum => "network_interface_kind", diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 1f85f3e8d58..f58f3cca620 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -629,6 +629,7 @@ table! { time_modified -> Timestamptz, time_deleted -> Nullable, ip_version -> crate::enums::IpVersionEnum, + pool_type -> crate::enums::IpPoolTypeEnum, rcgen -> Int8, } } diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index f51bc8a5541..24c70c89fdf 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -5,10 +5,13 @@ //! IP Pools, collections of external IP addresses for guest instances use crate::external_api::params; -use crate::external_api::shared::IpRange; +use crate::external_api::shared; use ipnetwork::IpNetwork; use nexus_db_lookup::LookupPath; use nexus_db_lookup::lookup; +use nexus_db_model::IpPool; +use nexus_db_model::IpPoolType; +use nexus_db_model::IpPoolUpdate; use nexus_db_model::IpVersion; use nexus_db_queries::authz; use nexus_db_queries::authz::ApiResource; @@ -71,15 +74,19 @@ impl super::Nexus { &self, opctx: &OpContext, pool_params: ¶ms::IpPoolCreate, - ) -> CreateResult { - // https://github.com/oxidecomputer/omicron/issues/8966 + ) -> CreateResult { + // https://github.com/oxidecomputer/omicron/issues/8881 let ip_version = pool_params.ip_version.into(); - if matches!(ip_version, IpVersion::V6) { - return Err(Error::invalid_request( - "IPv6 pools are not yet supported", - )); - } - let pool = db::model::IpPool::new(&pool_params.identity, ip_version); + + let pool = match pool_params.pool_type.clone() { + shared::IpPoolType::Unicast => { + IpPool::new(&pool_params.identity, ip_version) + } + shared::IpPoolType::Multicast => { + IpPool::new_multicast(&pool_params.identity, ip_version) + } + }; + self.db_datastore.ip_pool_create(opctx, pool).await } @@ -281,9 +288,9 @@ impl super::Nexus { return Err(not_found_from_lookup(pool_lookup)); } - self.db_datastore - .ip_pool_update(opctx, &authz_pool, updates.clone().into()) - .await + let updates_db = IpPoolUpdate::from(updates.clone()); + + self.db_datastore.ip_pool_update(opctx, &authz_pool, updates_db).await } pub(crate) async fn ip_pool_list_ranges( @@ -308,7 +315,7 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - range: &IpRange, + range: &shared::IpRange, ) -> UpdateResult { let (.., authz_pool, db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; @@ -326,12 +333,45 @@ impl super::Nexus { // pool utilization. // // See https://github.com/oxidecomputer/omicron/issues/8761. - if matches!(range, IpRange::V6(_)) { + if matches!(range, shared::IpRange::V6(_)) { return Err(Error::invalid_request( "IPv6 ranges are not allowed yet", )); } + let range_is_multicast = match range { + shared::IpRange::V4(v4_range) => { + let first = v4_range.first_address(); + let last = v4_range.last_address(); + first.is_multicast() && last.is_multicast() + } + shared::IpRange::V6(v6_range) => { + let first = v6_range.first_address(); + let last = v6_range.last_address(); + first.is_multicast() && last.is_multicast() + } + }; + + match db_pool.pool_type { + IpPoolType::Multicast => { + if !range_is_multicast { + return Err(Error::invalid_request( + "Cannot add unicast address range to multicast IP pool", + )); + } + + // For multicast pools, validate ASM/SSM separation + // This validation is done in the datastore layer + } + IpPoolType::Unicast => { + if range_is_multicast { + return Err(Error::invalid_request( + "Cannot add multicast address range to unicast IP pool", + )); + } + } + } + self.db_datastore .ip_pool_add_range(opctx, &authz_pool, &db_pool, range) .await @@ -341,7 +381,7 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - range: &IpRange, + range: &shared::IpRange, ) -> DeleteResult { let (.., authz_pool, _db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; @@ -391,8 +431,14 @@ impl super::Nexus { pub(crate) async fn ip_pool_service_add_range( &self, opctx: &OpContext, - range: &IpRange, + range: &shared::IpRange, ) -> UpdateResult { + let (authz_pool, db_pool) = self + .db_datastore + .ip_pools_service_lookup(opctx, range.version().into()) + .await?; + opctx.authorize(authz::Action::Modify, &authz_pool).await?; + // Disallow V6 ranges until IPv6 is fully supported by the networking // subsystem. Instead of changing the API to reflect that (making this // endpoint inconsistent with the rest) and changing it back when we @@ -402,16 +448,43 @@ impl super::Nexus { // pool utilization. // // See https://github.com/oxidecomputer/omicron/issues/8761. - if matches!(range, IpRange::V6(_)) { + if matches!(range, shared::IpRange::V6(_)) { return Err(Error::invalid_request( "IPv6 ranges are not allowed yet", )); } - let (authz_pool, db_pool) = self - .db_datastore - .ip_pools_service_lookup(opctx, range.version().into()) - .await?; - opctx.authorize(authz::Action::Modify, &authz_pool).await?; + + // Validate that the range matches the pool type + let range_is_multicast = match range { + shared::IpRange::V4(v4_range) => { + let first = v4_range.first_address(); + let last = v4_range.last_address(); + first.is_multicast() && last.is_multicast() + } + shared::IpRange::V6(v6_range) => { + let first = v6_range.first_address(); + let last = v6_range.last_address(); + first.is_multicast() && last.is_multicast() + } + }; + + match db_pool.pool_type { + IpPoolType::Multicast => { + if !range_is_multicast { + return Err(Error::invalid_request( + "Cannot add unicast address range to multicast IP pool", + )); + } + } + IpPoolType::Unicast => { + if range_is_multicast { + return Err(Error::invalid_request( + "Cannot add multicast address range to unicast IP pool", + )); + } + } + } + self.db_datastore .ip_pool_add_range(opctx, &authz_pool, &db_pool, range) .await @@ -420,7 +493,7 @@ impl super::Nexus { pub(crate) async fn ip_pool_service_delete_range( &self, opctx: &OpContext, - range: &IpRange, + range: &shared::IpRange, ) -> DeleteResult { let (authz_pool, ..) = self .db_datastore diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index a4e940d8a5f..3275acd49e9 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1775,7 +1775,7 @@ impl NexusExternalApi for NexusExternalApiImpl { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; - Ok(HttpResponseCreated(IpPool::from(pool))) + Ok(HttpResponseCreated(pool.into())) }; apictx .context diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 71871e932c8..5f9f59b039f 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -250,20 +250,17 @@ pub async fn create_ip_pool( pool_name: &str, ip_range: Option, ) -> (IpPool, IpPoolRange) { - let pool = object_create( - client, - "/v1/system/ip-pools", - ¶ms::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), - description: String::from("an ip pool"), - }, - ip_version: ip_range - .map(|r| r.version()) - .unwrap_or_else(views::IpVersion::v4), + let pool_params = params::IpPoolCreate::new( + IdentityMetadataCreateParams { + name: pool_name.parse().unwrap(), + description: String::from("an ip pool"), }, - ) - .await; + ip_range + .as_ref() + .map(|r| r.version()) + .unwrap_or_else(views::IpVersion::v4), + ); + let pool = object_create(client, "/v1/system/ip-pools", &pool_params).await; let ip_range = ip_range.unwrap_or_else(|| { use std::net::Ipv4Addr; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index d8c62a95d94..1500e6158ae 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -928,12 +928,14 @@ pub const DEMO_IP_POOLS_URL: &'static str = "/v1/system/ip-pools"; pub static DEMO_IP_POOL_NAME: LazyLock = LazyLock::new(|| "default".parse().unwrap()); pub static DEMO_IP_POOL_CREATE: LazyLock = - LazyLock::new(|| params::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: DEMO_IP_POOL_NAME.clone(), - description: String::from("an IP pool"), - }, - ip_version: IpVersion::V4, + LazyLock::new(|| { + params::IpPoolCreate::new( + IdentityMetadataCreateParams { + name: DEMO_IP_POOL_NAME.clone(), + description: String::from("an IP pool"), + }, + IpVersion::V4, + ) }); pub static DEMO_IP_POOL_PROJ_URL: LazyLock = LazyLock::new(|| { format!( diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 65e5ede0a70..d0061f4c3a2 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -7163,7 +7163,7 @@ async fn test_instance_ephemeral_ip_no_default_pool_error( let url = format!("/v1/instances?project={}", PROJECT_NAME); let error = object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; - let msg = "not found: default IP pool for current silo".to_string(); + let msg = "not found: default unicast IP pool for current silo".to_string(); assert_eq!(error.message, msg); // same deal if you specify a pool that doesn't exist diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index fa6fa2839ec..9c16e8cfb4d 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -43,6 +43,7 @@ use nexus_types::external_api::params::IpPoolCreate; use nexus_types::external_api::params::IpPoolLinkSilo; use nexus_types::external_api::params::IpPoolSiloUpdate; use nexus_types::external_api::params::IpPoolUpdate; +use nexus_types::external_api::shared::IpPoolType; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::shared::SiloIdentityMode; @@ -101,13 +102,13 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { // Create the pool, verify we can get it back by either listing or fetching // directly - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - ip_version: IpVersion::V4, - }; + IpVersion::V4, + ); let created_pool: IpPool = object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); @@ -125,13 +126,13 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { let error = object_create_error( client, ip_pools_url, - ¶ms::IpPoolCreate { - identity: IdentityMetadataCreateParams { + ¶ms::IpPoolCreate::new( + IdentityMetadataCreateParams { name: pool_name.parse().unwrap(), description: String::new(), }, - ip_version: IpVersion::V4, - }, + IpVersion::V4, + ), StatusCode::BAD_REQUEST, ) .await; @@ -821,13 +822,13 @@ async fn create_pool( name: &str, ip_version: IpVersion, ) -> IpPool { - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), description: "".to_string(), }, ip_version, - }; + ); NexusRequest::objects_post(client, "/v1/system/ip-pools", ¶ms) .authn_as(AuthnMode::PrivilegedUser) .execute() @@ -948,13 +949,14 @@ async fn test_ip_pool_range_overlapping_ranges_fails( let ip_pool_add_range_url = format!("{}/add", ip_pool_ranges_url); // Create the pool, verify basic properties - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - ip_version: IpVersion::V4, - }; + IpVersion::V4, + ); + let created_pool: IpPool = object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); @@ -1107,13 +1109,13 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { let ip_pool_add_range_url = format!("{}/add", ip_pool_ranges_url); // Create the pool, verify basic properties - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - ip_version: IpVersion::V4, - }; + IpVersion::V4, + ); let created_pool: IpPool = object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); @@ -1523,3 +1525,24 @@ fn assert_ranges_eq(first: &IpPoolRange, second: &IpPoolRange) { assert_eq!(first.range.first_address(), second.range.first_address()); assert_eq!(first.range.last_address(), second.range.last_address()); } + +#[nexus_test] +async fn test_ip_pool_unicast_defaults(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + // Test that regular IP pool creation uses unicast defaults + let pool = create_pool(client, "unicast-test", IpVersion::V4).await; + assert_eq!(pool.pool_type, IpPoolType::Unicast); + + // Test that explicitly creating with default type still works + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: "explicit-unicast".parse().unwrap(), + description: "Explicit unicast pool".to_string(), + }, + IpVersion::V4, + ); + let pool: IpPool = + object_create(client, "/v1/system/ip-pools", ¶ms).await; + assert_eq!(pool.pool_type, IpPoolType::Unicast); +} diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 03e93dab5b2..3553dcfdf3a 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -1007,6 +1007,27 @@ pub struct IpPoolCreate { /// The default is IPv4. #[serde(default = "IpVersion::v4")] pub ip_version: IpVersion, + /// Type of IP pool (defaults to Unicast for backward compatibility) + #[serde(default)] + pub pool_type: shared::IpPoolType, +} + +impl IpPoolCreate { + /// Create parameters for a unicast IP pool (the default) + pub fn new( + identity: IdentityMetadataCreateParams, + ip_version: IpVersion, + ) -> Self { + Self { identity, ip_version, pool_type: shared::IpPoolType::Unicast } + } + + /// Create parameters for a multicast IP pool + pub fn new_multicast( + identity: IdentityMetadataCreateParams, + ip_version: IpVersion, + ) -> Self { + Self { identity, ip_version, pool_type: shared::IpPoolType::Multicast } + } } /// Parameters for updating an IP Pool diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index a678d7bb085..f357d0eaf46 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -742,3 +742,19 @@ impl RelayState { .context("json from relay state string") } } + +/// Type of IP pool +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum IpPoolType { + /// Unicast IP pool for standard IP allocations + Unicast, + /// Multicast IP pool for multicast group allocations + Multicast, +} + +impl Default for IpPoolType { + fn default() -> Self { + Self::Unicast + } +} diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index f37f0fe97ed..cefe7d1e1b3 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -394,6 +394,8 @@ pub struct IpPool { pub identity: IdentityMetadata, /// The IP version for the pool. pub ip_version: IpVersion, + /// Type of IP pool (unicast or multicast) + pub pool_type: shared::IpPoolType, } /// The utilization of IP addresses in a pool. diff --git a/openapi/nexus.json b/openapi/nexus.json index f4c4dc182ad..6f86b5d0c4b 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -21498,6 +21498,14 @@ } ] }, + "pool_type": { + "description": "Type of IP pool (unicast or multicast)", + "allOf": [ + { + "$ref": "#/components/schemas/IpPoolType" + } + ] + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -21514,6 +21522,7 @@ "id", "ip_version", "name", + "pool_type", "time_created", "time_modified" ] @@ -21536,6 +21545,15 @@ }, "name": { "$ref": "#/components/schemas/Name" + }, + "pool_type": { + "description": "Type of IP pool (defaults to Unicast for backward compatibility)", + "default": "unicast", + "allOf": [ + { + "$ref": "#/components/schemas/IpPoolType" + } + ] } }, "required": [ @@ -21683,6 +21701,25 @@ "is_default" ] }, + "IpPoolType": { + "description": "Type of IP pool", + "oneOf": [ + { + "description": "Unicast IP pool for standard IP allocations", + "type": "string", + "enum": [ + "unicast" + ] + }, + { + "description": "Multicast IP pool for multicast group allocations", + "type": "string", + "enum": [ + "multicast" + ] + } + ] + }, "IpPoolUpdate": { "description": "Parameters for updating an IP Pool", "type": "object", diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index a0eb918ee25..6fc94ca29ee 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2162,6 +2162,14 @@ CREATE TYPE IF NOT EXISTS omicron.public.ip_version AS ENUM ( 'v6' ); +/* + * IP pool types for unicast vs multicast pools + */ +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_type AS ENUM ( + 'unicast', + 'multicast' +); + /* * An IP Pool, a collection of zero or more IP ranges for external IPs. */ @@ -2178,7 +2186,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( rcgen INT8 NOT NULL, /* The IP version of the ranges contained in this pool. */ - ip_version omicron.public.ip_version NOT NULL + ip_version omicron.public.ip_version NOT NULL, + + /* Pool type for unicast (default) vs multicast pools. */ + pool_type omicron.public.ip_pool_type NOT NULL DEFAULT 'unicast' ); /* @@ -2189,6 +2200,14 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_pool_by_name ON omicron.public.ip_pool ) WHERE time_deleted IS NULL; +/* + * Index on pool type for efficient filtering of unicast vs multicast pools. + */ +CREATE INDEX IF NOT EXISTS lookup_ip_pool_by_type ON omicron.public.ip_pool ( + pool_type +) WHERE + time_deleted IS NULL; + -- The order here is most-specific first, and it matters because we use this -- fact to select the most specific default in the case where there is both a -- silo default and a fleet default. If we were to add a project type, it should @@ -6771,7 +6790,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '197.0.0', NULL) + (TRUE, NOW(), NOW(), '198.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/multicast-pool-support/up01.sql b/schema/crdb/multicast-pool-support/up01.sql new file mode 100644 index 00000000000..fccfcd2081f --- /dev/null +++ b/schema/crdb/multicast-pool-support/up01.sql @@ -0,0 +1,18 @@ +-- IP Pool multicast support: Add pool types for unicast vs multicast pools + +-- Add IP pool type for unicast vs multicast pools +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_type AS ENUM ( + 'unicast', + 'multicast' +); + +-- Add pool type column to ip_pool table +-- Defaults to 'unicast' for existing pools +ALTER TABLE omicron.public.ip_pool + ADD COLUMN IF NOT EXISTS pool_type omicron.public.ip_pool_type NOT NULL DEFAULT 'unicast'; + +-- Add index on pool_type for efficient filtering +CREATE INDEX IF NOT EXISTS lookup_ip_pool_by_type ON omicron.public.ip_pool ( + pool_type +) WHERE + time_deleted IS NULL;