diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index d7ad86f092e..01b4828e98a 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -901,6 +901,7 @@ impl JsonSchema for Hostname { // General types used to implement API resources /// Identifies a type of API resource +// NOTE: Please keep this enum in alphabetical order. #[derive( Clone, Copy, @@ -920,77 +921,77 @@ pub enum ResourceType { AddressLotBlock, AffinityGroup, AffinityGroupMember, - AntiAffinityGroup, - AntiAffinityGroupMember, Alert, AlertReceiver, AllowList, + AntiAffinityGroup, + AntiAffinityGroupMember, AuditLogEntry, BackgroundTask, - BgpConfig, BgpAnnounceSet, + BgpConfig, Blueprint, - Fleet, - Silo, - SiloUser, - SiloGroup, - SiloQuotas, - IdentityProvider, - SamlIdentityProvider, - SshKey, Certificate, ConsoleSession, - DeviceAuthRequest, - DeviceAccessToken, - Project, Dataset, + DeviceAccessToken, + DeviceAuthRequest, Disk, + Fleet, + FloatingIp, + IdentityProvider, Image, - SiloImage, - ProjectImage, Instance, - LoopbackAddress, - SiloAuthSettings, - SwitchPortSettings, - SupportBundle, - IpPool, - IpPoolResource, InstanceNetworkInterface, InternetGateway, - InternetGatewayIpPool, InternetGatewayIpAddress, + InternetGatewayIpPool, + IpPool, + IpPoolResource, + LldpLinkConfig, + LoopbackAddress, + MetricProducer, + NatEntry, + Oximeter, PhysicalDisk, + Probe, + ProbeNetworkInterface, + Project, + ProjectImage, Rack, + RoleBuiltin, + RouterRoute, + SagaDbg, + SamlIdentityProvider, Service, ServiceNetworkInterface, + Silo, + SiloAuthSettings, + SiloGroup, + SiloImage, + SiloQuotas, + SiloUser, Sled, SledInstance, SledLedger, - Switch, - SagaDbg, Snapshot, - Volume, - Vpc, - VpcFirewallRule, - VpcSubnet, - VpcRouter, - RouterRoute, - Oximeter, - MetricProducer, - RoleBuiltin, - TufRepo, + SshKey, + SupportBundle, + Switch, + SwitchPort, + SwitchPortSettings, TufArtifact, + TufRepo, TufTrustRoot, - SwitchPort, UserBuiltin, - Zpool, Vmm, - Ipv4NatEntry, - FloatingIp, - Probe, - ProbeNetworkInterface, - LldpLinkConfig, + Volume, + Vpc, + VpcFirewallRule, + VpcRouter, + VpcSubnet, WebhookSecret, + Zpool, } // IDENTITY METADATA diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index a5070d2e5de..9fdc54a9209 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -928,7 +928,7 @@ async fn cmd_nexus_background_tasks_show( "dns_config_external", "dns_servers_external", "dns_propagation_external", - "nat_v4_garbage_collector", + "nat_garbage_collector", "blueprint_loader", "blueprint_executor", ] { diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 846c7d5700b..c0b9f28dfa0 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -128,8 +128,8 @@ task: "metrics_producer_gc" unregisters Oximeter metrics producers that have not renewed their lease -task: "nat_v4_garbage_collector" - prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a +task: "nat_garbage_collector" + prunes soft-deleted NAT entries from nat_entry table based on a predetermined retention policy @@ -336,8 +336,8 @@ task: "metrics_producer_gc" unregisters Oximeter metrics producers that have not renewed their lease -task: "nat_v4_garbage_collector" - prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a +task: "nat_garbage_collector" + prunes soft-deleted NAT entries from nat_entry table based on a predetermined retention policy @@ -531,8 +531,8 @@ task: "metrics_producer_gc" unregisters Oximeter metrics producers that have not renewed their lease -task: "nat_v4_garbage_collector" - prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a +task: "nat_garbage_collector" + prunes soft-deleted NAT entries from nat_entry table based on a predetermined retention policy diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 8e29d085ea9..701561dfebe 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -340,8 +340,8 @@ task: "metrics_producer_gc" unregisters Oximeter metrics producers that have not renewed their lease -task: "nat_v4_garbage_collector" - prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a +task: "nat_garbage_collector" + prunes soft-deleted NAT entries from nat_entry table based on a predetermined retention policy @@ -481,7 +481,7 @@ task: "dns_propagation_external" [::1]:REDACTED_PORT success -task: "nat_v4_garbage_collector" +task: "nat_garbage_collector" configured period: every s last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms @@ -991,7 +991,7 @@ task: "dns_propagation_external" [::1]:REDACTED_PORT success -task: "nat_v4_garbage_collector" +task: "nat_garbage_collector" configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing diff --git a/nexus/db-model/src/ipnet.rs b/nexus/db-model/src/ipnet.rs new file mode 100644 index 00000000000..03c2d0b1ff6 --- /dev/null +++ b/nexus/db-model/src/ipnet.rs @@ -0,0 +1,113 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use diesel::backend::Backend; +use diesel::deserialize; +use diesel::deserialize::FromSql; +use diesel::pg::Pg; +use diesel::serialize; +use diesel::serialize::ToSql; +use diesel::sql_types; +use ipnetwork::IpNetwork; +use serde::Deserialize; +use serde::Serialize; + +#[derive( + Clone, + Copy, + Debug, + Eq, + PartialEq, + AsExpression, + FromSqlRow, + Serialize, + Deserialize, +)] +#[diesel(sql_type = sql_types::Inet)] +pub enum IpNet { + V4(crate::Ipv4Net), + V6(crate::Ipv6Net), +} + +impl ::std::fmt::Display for IpNet { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IpNet::V4(inner) => inner.fmt(f), + IpNet::V6(inner) => inner.fmt(f), + } + } +} + +impl From for ::std::net::IpAddr { + fn from(value: IpNet) -> Self { + match value { + IpNet::V4(inner) => ::std::net::IpAddr::V4(inner.addr()), + IpNet::V6(inner) => ::std::net::IpAddr::V6(inner.addr()), + } + } +} + +impl From for IpNet { + fn from(value: ipnetwork::IpNetwork) -> Self { + match value { + IpNetwork::V4(ipv4) => Self::from(oxnet::Ipv4Net::from(ipv4)), + IpNetwork::V6(ipv6) => Self::from(oxnet::Ipv6Net::from(ipv6)), + } + } +} + +impl From for IpNet { + fn from(value: oxnet::Ipv4Net) -> Self { + Self::V4(crate::Ipv4Net::from(value)) + } +} + +impl From for IpNet { + fn from(value: oxnet::Ipv6Net) -> Self { + Self::V6(crate::Ipv6Net::from(value)) + } +} + +impl From for IpNet { + fn from(value: oxnet::IpNet) -> Self { + match value { + oxnet::IpNet::V4(ipv4_net) => { + Self::V4(crate::Ipv4Net::from(ipv4_net)) + } + oxnet::IpNet::V6(ipv6_net) => { + Self::V6(crate::Ipv6Net::from(ipv6_net)) + } + } + } +} + +impl ToSql for IpNet { + fn to_sql<'a>( + &'a self, + out: &mut serialize::Output<'a, '_, Pg>, + ) -> serialize::Result { + let inner = match self { + IpNet::V4(inner) => IpNetwork::V4(inner.0.into()), + IpNet::V6(inner) => IpNetwork::V6(inner.0.into()), + }; + >::to_sql( + &inner, + &mut out.reborrow(), + ) + } +} + +impl FromSql for IpNet +where + DB: Backend, + IpNetwork: FromSql, +{ + fn from_sql(bytes: DB::RawValue<'_>) -> deserialize::Result { + let inet = IpNetwork::from_sql(bytes)?; + match inet { + IpNetwork::V4(net) => Ok(Self::V4(crate::Ipv4Net(net.into()))), + IpNetwork::V6(net) => Ok(Self::V6(crate::Ipv6Net(net.into()))), + } + } +} diff --git a/nexus/db-model/src/ipv4_nat_entry.rs b/nexus/db-model/src/ipv4_nat_entry.rs deleted file mode 100644 index eb8d3214379..00000000000 --- a/nexus/db-model/src/ipv4_nat_entry.rs +++ /dev/null @@ -1,76 +0,0 @@ -use super::MacAddr; -use crate::{Ipv4Net, Ipv6Net, SqlU16, Vni}; -use chrono::{DateTime, Utc}; -use nexus_db_schema::schema::{ipv4_nat_changes, ipv4_nat_entry}; -use nexus_types::internal_api::views::Ipv4NatEntryView; -use serde::Deserialize; -use serde::Serialize; -use uuid::Uuid; - -/// Values used to create an Ipv4NatEntry -#[derive(Insertable, Debug, Clone, Eq, PartialEq)] -#[diesel(table_name = ipv4_nat_entry)] -pub struct Ipv4NatValues { - pub external_address: Ipv4Net, - pub first_port: SqlU16, - pub last_port: SqlU16, - pub sled_address: Ipv6Net, - pub vni: Vni, - pub mac: MacAddr, -} - -/// Database representation of an Ipv4 NAT Entry. -#[derive(Queryable, Debug, Clone, Selectable, Serialize, Deserialize)] -#[diesel(table_name = ipv4_nat_entry)] -pub struct Ipv4NatEntry { - pub id: Uuid, - pub external_address: Ipv4Net, - pub first_port: SqlU16, - pub last_port: SqlU16, - pub sled_address: Ipv6Net, - pub vni: Vni, - pub mac: MacAddr, - pub version_added: i64, - pub version_removed: Option, - pub time_created: DateTime, - pub time_deleted: Option>, -} - -impl Ipv4NatEntry { - pub fn first_port(&self) -> u16 { - self.first_port.into() - } - - pub fn last_port(&self) -> u16 { - self.last_port.into() - } -} - -/// Summary of changes to ipv4 nat entries. -#[derive(Queryable, Debug, Clone, Selectable, Serialize, Deserialize)] -#[diesel(table_name = ipv4_nat_changes)] -pub struct Ipv4NatChange { - pub external_address: Ipv4Net, - pub first_port: SqlU16, - pub last_port: SqlU16, - pub sled_address: Ipv6Net, - pub vni: Vni, - pub mac: MacAddr, - pub version: i64, - pub deleted: bool, -} - -impl From for Ipv4NatEntryView { - fn from(value: Ipv4NatChange) -> Self { - Self { - external_address: value.external_address.addr(), - first_port: value.first_port.into(), - last_port: value.last_port.into(), - sled_address: value.sled_address.addr(), - vni: value.vni.0, - mac: *value.mac, - gen: value.version, - deleted: value.deleted, - } - } -} diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 92ab5c14e15..e2084433915 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -49,6 +49,7 @@ mod instance_state; mod internet_gateway; mod inventory; mod ip_pool; +mod ipnet; mod ipv4net; pub mod ipv6; mod ipv6net; @@ -85,7 +86,7 @@ mod webhook_rx; // for join-based marker trait generation. mod deployment; mod ereport; -mod ipv4_nat_entry; +pub mod nat_entry; mod omicron_zone_config; mod quota; mod rack; @@ -187,7 +188,7 @@ pub use instance_state::*; pub use internet_gateway::*; pub use inventory::*; pub use ip_pool::*; -pub use ipv4_nat_entry::*; +pub use ipnet::*; pub use ipv4net::*; pub use ipv6::*; pub use ipv6net::*; @@ -195,6 +196,7 @@ pub use l4_port_range::*; pub use migration::*; pub use migration_state::*; pub use name::*; +pub use nat_entry::*; pub use network_interface::*; pub use oximeter_info::*; pub use oximeter_read_policy::*; diff --git a/nexus/db-model/src/nat_entry.rs b/nexus/db-model/src/nat_entry.rs new file mode 100644 index 00000000000..d5252ed6d85 --- /dev/null +++ b/nexus/db-model/src/nat_entry.rs @@ -0,0 +1,94 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! NAT entries mapping external IP addresses to their hosting sled. +//! +//! The data here represents the "Dendrite side" of NAT for external +//! addresses. It maps the external IP to information about the host sled, OPTE +//! instance, and VNI for the external address, but notably does not contain the +//! actual internal or private address that the external address is translated +//! to. +//! +//! That address is provided to OPTE instead, which implements the "other side" +//! of the address translation. It decapsulates the packet and possibly rewrites +//! the IP address on the way to its guest or public Oxide service. The data +//! here is what we need to tell Dendrite how to encapsulate the packet so that +//! it traverses the underlay and OPTE knows how to decap it. + +use super::MacAddr; +use crate::{IpNet, Ipv6Net, SqlU16, Vni}; +use chrono::{DateTime, Utc}; +use nexus_db_schema::schema::{nat_changes, nat_entry}; +use nexus_types::internal_api::views::NatEntryView; +use serde::Deserialize; +use serde::Serialize; +use uuid::Uuid; + +/// Values used to create a `NatEntry` +#[derive(Insertable, Debug, Clone, Eq, PartialEq)] +#[diesel(table_name = nat_entry)] +pub struct NatEntryValues { + pub external_address: IpNet, + pub first_port: SqlU16, + pub last_port: SqlU16, + pub sled_address: Ipv6Net, + pub vni: Vni, + pub mac: MacAddr, +} + +/// Database representation of a NAT Entry. +#[derive(Queryable, Debug, Clone, Selectable, Serialize, Deserialize)] +#[diesel(table_name = nat_entry)] +pub struct NatEntry { + pub id: Uuid, + pub external_address: IpNet, + pub first_port: SqlU16, + pub last_port: SqlU16, + pub sled_address: Ipv6Net, + pub vni: Vni, + pub mac: MacAddr, + pub version_added: i64, + pub version_removed: Option, + pub time_created: DateTime, + pub time_deleted: Option>, +} + +impl NatEntry { + pub fn first_port(&self) -> u16 { + self.first_port.into() + } + + pub fn last_port(&self) -> u16 { + self.last_port.into() + } +} + +/// Summary of changes to NAT entries. +#[derive(Queryable, Debug, Clone, Selectable, Serialize, Deserialize)] +#[diesel(table_name = nat_changes)] +pub struct NatChange { + pub external_address: IpNet, + pub first_port: SqlU16, + pub last_port: SqlU16, + pub sled_address: Ipv6Net, + pub vni: Vni, + pub mac: MacAddr, + pub version: i64, + pub deleted: bool, +} + +impl From for NatEntryView { + fn from(value: NatChange) -> Self { + Self { + external_address: ::std::net::IpAddr::from(value.external_address), + first_port: value.first_port.into(), + last_port: value.last_port.into(), + sled_address: value.sled_address.addr(), + vni: value.vni.0, + mac: *value.mac, + gen: value.version, + deleted: value.deleted, + } + } +} diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 24407614c53..42c1755e13a 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(180, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(181, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(181, "rename-nat-table"), KnownVersion::new(180, "sled-cpu-family"), KnownVersion::new(179, "add-pending-mgs-updates-host-phase-1"), KnownVersion::new(178, "change-lldp-management-ip-to-inet"), diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 24375165ec4..1d148c74200 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -73,10 +73,10 @@ mod image; pub mod instance; mod inventory; mod ip_pool; -mod ipv4_nat_entry; mod lldp; mod lookup_interface; mod migration; +mod nat_entry; mod network_interface; mod oximeter; mod oximeter_read_policy; diff --git a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs b/nexus/db-queries/src/db/datastore/nat_entry.rs similarity index 72% rename from nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs rename to nexus/db-queries/src/db/datastore/nat_entry.rs index 5603ee3c269..d0a7b5f242a 100644 --- a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs +++ b/nexus/db-queries/src/db/datastore/nat_entry.rs @@ -1,6 +1,15 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Queries for manipulating the NAT entry tables. +//! +//! See [`nexus_db_model::nat_entry`] for a discussion of the use of this +//! data. + use super::DataStore; use crate::context::OpContext; -use crate::db::model::{Ipv4NatEntry, Ipv4NatValues}; +use crate::db::model::{NatEntry, NatEntryValues}; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::{DateTime, Utc}; use diesel::prelude::*; @@ -8,8 +17,8 @@ use diesel::sql_types::BigInt; use nexus_db_errors::ErrorHandler; use nexus_db_errors::public_error_from_diesel; use nexus_db_model::ExternalIp; -use nexus_db_model::Ipv4NatChange; -use nexus_types::internal_api::views::Ipv4NatEntryView; +use nexus_db_model::NatChange; +use nexus_types::internal_api::views::NatEntryView; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; @@ -23,19 +32,19 @@ impl DataStore { /// Currently used to ensure that a NAT entry exists for an Instance. /// This SHOULD NOT be directly used to create service zone nat entries, /// as they are updated via a background task. - pub async fn ensure_ipv4_nat_entry( + pub async fn ensure_nat_entry( &self, opctx: &OpContext, - nat_entry: Ipv4NatValues, - ) -> CreateResult { + nat_entry: NatEntryValues, + ) -> CreateResult { use diesel::sql_types; - use nexus_db_schema::schema::ipv4_nat_entry::dsl; + use nexus_db_schema::schema::nat_entry::dsl; // Look up any NAT entries that already have the exact parameters // we're trying to INSERT. // We want to return any existing entry, but not to mask the UniqueViolation // when trying to use an existing IP + port range with a different target. - let matching_entry_subquery = dsl::ipv4_nat_entry + let matching_entry_subquery = dsl::nat_entry .filter(dsl::external_address.eq(nat_entry.external_address)) .filter(dsl::first_port.eq(nat_entry.first_port)) .filter(dsl::last_port.eq(nat_entry.last_port)) @@ -64,7 +73,7 @@ impl DataStore { )) .filter(diesel::dsl::not(diesel::dsl::exists(matching_entry_subquery))); - let out = diesel::insert_into(dsl::ipv4_nat_entry) + let out = diesel::insert_into(dsl::nat_entry) .values(new_entry_subquery) .into_columns(( dsl::external_address, @@ -74,7 +83,7 @@ impl DataStore { dsl::vni, dsl::mac, )) - .returning(Ipv4NatEntry::as_returning()) + .returning(NatEntry::as_returning()) .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await; @@ -88,7 +97,7 @@ impl DataStore { // - Can't ON CONFLICT with a partial constraint, so we can't // do a no-op write and return the row that way either. // So, we do another lookup. - self.ipv4_nat_find_by_values(opctx, nat_entry).await + self.nat_find_by_values(opctx, nat_entry).await } Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), } @@ -101,20 +110,20 @@ impl DataStore { /// Creates missing entries idempotently. /// /// returns the number of records added - pub async fn ipv4_nat_sync_service_zones( + pub async fn nat_sync_service_zones( &self, opctx: &OpContext, - nat_entries: &[Ipv4NatValues], + nat_entries: &[NatEntryValues], ) -> CreateResult { - use nexus_db_schema::schema::ipv4_nat_entry::dsl; + use nexus_db_schema::schema::nat_entry::dsl; let vni = nexus_db_model::Vni(Vni::SERVICES_VNI); // find all active nat entries with the services vni - let result: Vec = dsl::ipv4_nat_entry + let result: Vec = dsl::nat_entry .filter(dsl::vni.eq(vni)) .filter(dsl::version_removed.is_null()) - .select(Ipv4NatEntry::as_select()) + .select(NatEntry::as_select()) .load_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; @@ -124,7 +133,7 @@ impl DataStore { let mut delete: Vec<_> = vec![]; for db_entry in result.iter() { - let values = Ipv4NatValues { + let values = NatEntryValues { external_address: db_entry.external_address, first_port: db_entry.first_port, last_port: db_entry.last_port, @@ -142,7 +151,7 @@ impl DataStore { // delete entries that are not present in requested entries for entry in delete { - if let Err(e) = self.ipv4_nat_delete(opctx, entry).await { + if let Err(e) = self.nat_delete(opctx, entry).await { error!( opctx.log, "failed to delete service zone nat entry"; @@ -159,9 +168,7 @@ impl DataStore { // insert nat_entries for entry in add { - if let Err(e) = - self.ensure_ipv4_nat_entry(opctx, entry.clone()).await - { + if let Err(e) = self.ensure_nat_entry(opctx, entry.clone()).await { error!( opctx.log, "failed to ensure service zone nat entry"; @@ -176,16 +183,21 @@ impl DataStore { Ok(count) } - pub async fn ipv4_nat_delete( + /// Mark the provided NAT entry as removed in the database. + /// + /// This soft-deletes the entry and sets the `version_removed` column. The + /// garbage collection background task is responsible for hard-deleting + /// stale entries. + pub async fn nat_delete( &self, opctx: &OpContext, - nat_entry: &Ipv4NatEntry, + nat_entry: &NatEntry, ) -> DeleteResult { - use nexus_db_schema::schema::ipv4_nat_entry::dsl; + use nexus_db_schema::schema::nat_entry::dsl; - let updated_rows = diesel::update(dsl::ipv4_nat_entry) + let updated_rows = diesel::update(dsl::nat_entry) .set(( - dsl::version_removed.eq(ipv4_nat_next_version().nullable()), + dsl::version_removed.eq(nat_next_version().nullable()), dsl::time_deleted.eq(Utc::now()), )) .filter(dsl::time_deleted.is_null()) @@ -198,7 +210,7 @@ impl DataStore { if updated_rows == 0 { return Err(Error::ObjectNotFound { - type_name: ResourceType::Ipv4NatEntry, + type_name: ResourceType::NatEntry, lookup_type: LookupType::ByCompositeId( "id, version_added".to_string(), ), @@ -207,38 +219,22 @@ impl DataStore { Ok(()) } - pub async fn ipv4_nat_find_by_id( - &self, - opctx: &OpContext, - id: uuid::Uuid, - ) -> LookupResult { - use nexus_db_schema::schema::ipv4_nat_entry::dsl; - - let result = dsl::ipv4_nat_entry - .filter(dsl::id.eq(id)) - .select(Ipv4NatEntry::as_select()) - .limit(1) - .load_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - - if let Some(nat_entry) = result.first() { - Ok(nat_entry.clone()) - } else { - Err(Error::invalid_request("no matching records")) - } - } - - pub async fn ipv4_nat_delete_by_external_ip( + /// Mark the NAT entry for the provided external IP address as removed in + /// the databaase. + /// + /// This soft-deletes the entry and sets the `version_removed` column. The + /// garbage collection background task is responsible for hard-deleting + /// stale entries. + pub async fn nat_delete_by_external_ip( &self, opctx: &OpContext, external_ip: &ExternalIp, ) -> DeleteResult { - use nexus_db_schema::schema::ipv4_nat_entry::dsl; + use nexus_db_schema::schema::nat_entry::dsl; - let updated_rows = diesel::update(dsl::ipv4_nat_entry) + let updated_rows = diesel::update(dsl::nat_entry) .set(( - dsl::version_removed.eq(ipv4_nat_next_version().nullable()), + dsl::version_removed.eq(nat_next_version().nullable()), dsl::time_deleted.eq(Utc::now()), )) .filter(dsl::time_deleted.is_null()) @@ -252,7 +248,7 @@ impl DataStore { if updated_rows == 0 { return Err(Error::ObjectNotFound { - type_name: ResourceType::Ipv4NatEntry, + type_name: ResourceType::NatEntry, lookup_type: LookupType::ByCompositeId( "external_ip, first_port, last_port".to_string(), ), @@ -261,13 +257,14 @@ impl DataStore { Ok(()) } - pub async fn ipv4_nat_find_by_values( + /// Lookup a NAT entry by its associated data. + pub async fn nat_find_by_values( &self, opctx: &OpContext, - values: Ipv4NatValues, - ) -> LookupResult { - use nexus_db_schema::schema::ipv4_nat_entry::dsl; - let result = dsl::ipv4_nat_entry + values: NatEntryValues, + ) -> LookupResult { + use nexus_db_schema::schema::nat_entry::dsl; + let result = dsl::nat_entry .filter(dsl::external_address.eq(values.external_address)) .filter(dsl::first_port.eq(values.first_port)) .filter(dsl::last_port.eq(values.last_port)) @@ -275,7 +272,7 @@ impl DataStore { .filter(dsl::sled_address.eq(values.sled_address)) .filter(dsl::vni.eq(values.vni)) .filter(dsl::time_deleted.is_null()) - .select(Ipv4NatEntry::as_select()) + .select(NatEntry::as_select()) .limit(1) .load_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -288,22 +285,24 @@ impl DataStore { } } - pub async fn ipv4_nat_list_since_version( + /// List all NAT entries with a `version_added` strictly greater than + /// `version`. + pub async fn nat_list_since_version( &self, opctx: &OpContext, version: i64, limit: u32, - ) -> ListResultVec { - use nexus_db_schema::schema::ipv4_nat_entry::dsl; + ) -> ListResultVec { + use nexus_db_schema::schema::nat_entry::dsl; - let list = dsl::ipv4_nat_entry + let list = dsl::nat_entry .filter( dsl::version_added .gt(version) .or(dsl::version_removed.gt(version)), ) .limit(i64::from(limit)) - .select(Ipv4NatEntry::as_select()) + .select(NatEntry::as_select()) .load_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; @@ -311,35 +310,38 @@ impl DataStore { Ok(list) } - pub async fn ipv4_nat_changeset( + /// Return a page of NAT entries with `version_added` strictly greater than + /// `version`. + pub async fn nat_changeset( &self, opctx: &OpContext, version: i64, limit: u32, - ) -> ListResultVec { - use nexus_db_schema::schema::ipv4_nat_changes::dsl; + ) -> ListResultVec { + use nexus_db_schema::schema::nat_changes::dsl; - let nat_changes = dsl::ipv4_nat_changes + let nat_changes = dsl::nat_changes .filter(dsl::version.gt(version)) .limit(i64::from(limit)) .order_by(dsl::version) - .select(Ipv4NatChange::as_select()) + .select(NatChange::as_select()) .load_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - let nat_entries: Vec = + let nat_entries: Vec = nat_changes.iter().map(|e| e.clone().into()).collect(); Ok(nat_entries) } - pub async fn ipv4_nat_current_version( + /// Return the _latest_ version of the any entries in the NAT table. + pub async fn nat_current_version( &self, opctx: &OpContext, ) -> LookupResult { - use nexus_db_schema::schema::ipv4_nat_version::dsl; + use nexus_db_schema::schema::nat_version::dsl; - let latest: Option = dsl::ipv4_nat_version + let latest: Option = dsl::nat_version .select(diesel::dsl::max(dsl::last_value)) .first_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -351,15 +353,18 @@ impl DataStore { } } - pub async fn ipv4_nat_cleanup( + /// Hard-delete NAT table entries whose `version_removed` is strictly less + /// than `version` and `time_deleted` is strictly earlier than + /// `before_timestamp`. + pub async fn nat_cleanup( &self, opctx: &OpContext, version: i64, before_timestamp: DateTime, ) -> DeleteResult { - use nexus_db_schema::schema::ipv4_nat_entry::dsl; + use nexus_db_schema::schema::nat_entry::dsl; - diesel::delete(dsl::ipv4_nat_entry) + diesel::delete(dsl::nat_entry) .filter(dsl::version_removed.lt(version)) .filter(dsl::time_deleted.lt(before_timestamp)) .execute_async(&*self.pool_connection_authorized(opctx).await?) @@ -370,8 +375,9 @@ impl DataStore { } } -fn ipv4_nat_next_version() -> diesel::expression::SqlLiteral { - diesel::dsl::sql::("nextval('omicron.public.ipv4_nat_version')") +/// Return the next version for the NAT table state. +fn nat_next_version() -> diesel::expression::SqlLiteral { + diesel::dsl::sql::("nextval('omicron.public.nat_version')") } #[cfg(test)] @@ -380,7 +386,7 @@ mod test { use crate::db::pub_test_utils::TestDatabase; use chrono::Utc; - use nexus_db_model::{Ipv4NatEntry, Ipv4NatValues, MacAddr, Vni}; + use nexus_db_model::{MacAddr, NatEntry, NatEntryValues, Vni}; use omicron_common::api::external; use omicron_test_utils::dev; use rand::seq::IteratorRandom; @@ -388,31 +394,28 @@ mod test { // Test our ability to track additions and deletions since a given version number #[tokio::test] async fn nat_version_tracking() { - let logctx = dev::test_setup_log("test_nat_version_tracking"); + let logctx = dev::test_setup_log("nat_version_tracking"); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert!(initial_state.is_empty()); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 0 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 0); // Each change (creation / deletion) to the NAT table should increment the // version number of the row in the NAT table - let external_address = + let external_address1 = oxnet::Ipv4Net::host_net("10.0.0.100".parse().unwrap()); let sled_address = oxnet::Ipv6Net::host_net("fd00:1122:3344:104::1".parse().unwrap()); // Add a nat entry. - let nat1 = Ipv4NatValues { - external_address: external_address.into(), + let nat1 = NatEntryValues { + external_address: external_address1.into(), first_port: 0.into(), last_port: 999.into(), sled_address: sled_address.into(), @@ -422,25 +425,24 @@ mod test { ), }; - datastore.ensure_ipv4_nat_entry(&opctx, nat1.clone()).await.unwrap(); + datastore.ensure_nat_entry(&opctx, nat1.clone()).await.unwrap(); let first_entry = - datastore.ipv4_nat_find_by_values(&opctx, nat1).await.unwrap(); + datastore.nat_find_by_values(&opctx, nat1).await.unwrap(); let nat_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); // The NAT table has undergone one change. One entry has been added, // none deleted, so we should be at version 1. assert_eq!(nat_entries.len(), 1); assert_eq!(nat_entries.last().unwrap().version_added, 1); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 1 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 1); // Add another nat entry. - let nat2 = Ipv4NatValues { - external_address: external_address.into(), + let external_address2 = + oxnet::Ipv6Net::host_net("2001:db8::1".parse().unwrap()); + let nat2 = NatEntryValues { + external_address: external_address2.into(), first_port: 1000.into(), last_port: 1999.into(), sled_address: sled_address.into(), @@ -450,10 +452,10 @@ mod test { ), }; - datastore.ensure_ipv4_nat_entry(&opctx, nat2).await.unwrap(); + datastore.ensure_nat_entry(&opctx, nat2).await.unwrap(); let nat_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); // The NAT table has undergone two changes. Two entries have been // added, none deleted, so we should be at version 2. @@ -461,32 +463,26 @@ mod test { nat_entries.iter().find(|e| e.version_added == 2).unwrap(); assert_eq!(nat_entries.len(), 2); assert_eq!(nat_entry.version_added, 2); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 2 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 2); // Test Cleanup logic // Cleanup should only perma-delete entries that are older than a // specified version number and whose `time_deleted` field is // older than a specified age. let time_cutoff = Utc::now(); - datastore.ipv4_nat_cleanup(&opctx, 2, time_cutoff).await.unwrap(); + datastore.nat_cleanup(&opctx, 2, time_cutoff).await.unwrap(); // Nothing should have changed (no records currently marked for deletion) let nat_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert_eq!(nat_entries.len(), 2); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 2 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 2); // Delete the first nat entry. It should show up as a later version number. - datastore.ipv4_nat_delete(&opctx, &first_entry).await.unwrap(); + datastore.nat_delete(&opctx, &first_entry).await.unwrap(); let nat_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); // The NAT table has undergone three changes. Two entries have been // added, one deleted, so we should be at version 3. Since the @@ -497,44 +493,35 @@ mod test { assert_eq!(nat_entries.len(), 2); assert_eq!(nat_entry.version_removed, Some(3)); assert_eq!(nat_entry.id, first_entry.id); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 3 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 3); // Try cleaning up with the old version and time cutoff values - datastore.ipv4_nat_cleanup(&opctx, 2, time_cutoff).await.unwrap(); + datastore.nat_cleanup(&opctx, 2, time_cutoff).await.unwrap(); // Try cleaning up with a greater version and old time cutoff values - datastore.ipv4_nat_cleanup(&opctx, 6, time_cutoff).await.unwrap(); + datastore.nat_cleanup(&opctx, 6, time_cutoff).await.unwrap(); // Try cleaning up with a older version and newer time cutoff values - datastore.ipv4_nat_cleanup(&opctx, 2, Utc::now()).await.unwrap(); + datastore.nat_cleanup(&opctx, 2, Utc::now()).await.unwrap(); // Both records should still exist (soft deleted record is newer than cutoff // values ) let nat_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert_eq!(nat_entries.len(), 2); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 3 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 3); // Try cleaning up with a both cutoff values increased - datastore.ipv4_nat_cleanup(&opctx, 4, Utc::now()).await.unwrap(); + datastore.nat_cleanup(&opctx, 4, Utc::now()).await.unwrap(); // Soft deleted NAT entry should be removed from the table let nat_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert_eq!(nat_entries.len(), 1); // version should be unchanged - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 3 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 3); db.terminate().await; logctx.cleanup_successful(); @@ -545,19 +532,17 @@ mod test { /// set of properties, but allow multiple deleted nat entries for the same set /// of properties. async fn table_allows_unique_active_multiple_deleted() { - let logctx = dev::test_setup_log("test_nat_version_tracking"); + let logctx = + dev::test_setup_log("table_allows_unique_active_multiple_deleted"); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert!(initial_state.is_empty()); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 0 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 0); // Each change (creation / deletion) to the NAT table should increment the // version number of the row in the NAT table @@ -568,7 +553,7 @@ mod test { oxnet::Ipv6Net::host_net("fd00:1122:3344:104::1".parse().unwrap()); // Add a nat entry. - let nat1 = Ipv4NatValues { + let nat1 = NatEntryValues { external_address: external_address.into(), first_port: 0.into(), last_port: 999.into(), @@ -579,106 +564,92 @@ mod test { ), }; - datastore.ensure_ipv4_nat_entry(&opctx, nat1.clone()).await.unwrap(); + datastore.ensure_nat_entry(&opctx, nat1.clone()).await.unwrap(); // Try to add it again. It should still only result in a single entry. - datastore.ensure_ipv4_nat_entry(&opctx, nat1.clone()).await.unwrap(); - let first_entry = datastore - .ipv4_nat_find_by_values(&opctx, nat1.clone()) - .await - .unwrap(); + datastore.ensure_nat_entry(&opctx, nat1.clone()).await.unwrap(); + let first_entry = + datastore.nat_find_by_values(&opctx, nat1.clone()).await.unwrap(); let nat_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); // The NAT table has undergone one change. One entry has been added, // none deleted, so we should be at version 1. assert_eq!(nat_entries.len(), 1); assert_eq!(nat_entries.last().unwrap().version_added, 1); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 1 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 1); - datastore.ipv4_nat_delete(&opctx, &first_entry).await.unwrap(); + datastore.nat_delete(&opctx, &first_entry).await.unwrap(); // The NAT table has undergone two changes. One entry has been added, // then deleted, so we should be at version 2. let nat_entries = datastore - .ipv4_nat_list_since_version(&opctx, 0, 10) + .nat_list_since_version(&opctx, 0, 10) .await .unwrap() .into_iter(); - let active: Vec = nat_entries + let active: Vec = nat_entries .clone() .filter(|entry| entry.version_removed.is_none()) .collect(); - let inactive: Vec = nat_entries + let inactive: Vec = nat_entries .filter(|entry| entry.version_removed.is_some()) .collect(); assert!(active.is_empty()); assert_eq!(inactive.len(), 1); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 2 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 2); // Add the same entry back. This simulates the behavior we will see // when stopping and then restarting an instance. - datastore.ensure_ipv4_nat_entry(&opctx, nat1.clone()).await.unwrap(); + datastore.ensure_nat_entry(&opctx, nat1.clone()).await.unwrap(); // The NAT table has undergone three changes. let nat_entries = datastore - .ipv4_nat_list_since_version(&opctx, 0, 10) + .nat_list_since_version(&opctx, 0, 10) .await .unwrap() .into_iter(); - let active: Vec = nat_entries + let active: Vec = nat_entries .clone() .filter(|entry| entry.version_removed.is_none()) .collect(); - let inactive: Vec = nat_entries + let inactive: Vec = nat_entries .filter(|entry| entry.version_removed.is_some()) .collect(); assert_eq!(active.len(), 1); assert_eq!(inactive.len(), 1); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 3 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 3); let second_entry = - datastore.ipv4_nat_find_by_values(&opctx, nat1).await.unwrap(); - datastore.ipv4_nat_delete(&opctx, &second_entry).await.unwrap(); + datastore.nat_find_by_values(&opctx, nat1).await.unwrap(); + datastore.nat_delete(&opctx, &second_entry).await.unwrap(); // The NAT table has undergone four changes let nat_entries = datastore - .ipv4_nat_list_since_version(&opctx, 0, 10) + .nat_list_since_version(&opctx, 0, 10) .await .unwrap() .into_iter(); - let active: Vec = nat_entries + let active: Vec = nat_entries .clone() .filter(|entry| entry.version_removed.is_none()) .collect(); - let inactive: Vec = nat_entries + let inactive: Vec = nat_entries .filter(|entry| entry.version_removed.is_some()) .collect(); assert_eq!(active.len(), 0); assert_eq!(inactive.len(), 2); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 4 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 4); db.terminate().await; logctx.cleanup_successful(); @@ -686,20 +657,17 @@ mod test { // Test our ability to reconcile a set of service zone nat entries #[tokio::test] - async fn ipv4_nat_sync_service_zones() { - let logctx = dev::test_setup_log("ipv4_nat_sync_service_zones"); + async fn nat_sync_service_zones() { + let logctx = dev::test_setup_log("nat_sync_service_zones"); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert!(initial_state.is_empty()); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 0 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 0); // create two nat entries: // 1. an entry should be deleted during the next sync @@ -712,7 +680,7 @@ mod test { oxnet::Ipv6Net::host_net("fd00:1122:3344:104::1".parse().unwrap()); // Add a nat entry. - let nat1 = Ipv4NatValues { + let nat1 = NatEntryValues { external_address: external_address.into(), first_port: 0.into(), last_port: 999.into(), @@ -723,17 +691,17 @@ mod test { ), }; - let nat2 = Ipv4NatValues { + let nat2 = NatEntryValues { first_port: 1000.into(), last_port: 1999.into(), ..nat1 }; - datastore.ensure_ipv4_nat_entry(&opctx, nat1.clone()).await.unwrap(); - datastore.ensure_ipv4_nat_entry(&opctx, nat2.clone()).await.unwrap(); + datastore.ensure_nat_entry(&opctx, nat1.clone()).await.unwrap(); + datastore.ensure_nat_entry(&opctx, nat2.clone()).await.unwrap(); let db_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert_eq!(db_entries.len(), 2); @@ -741,14 +709,14 @@ mod test { // 1. a nat entry that already exists // 2. a nat entry that does not already exist - let nat3 = Ipv4NatValues { + let nat3 = NatEntryValues { first_port: 2000.into(), last_port: 2999.into(), ..nat2 }; datastore - .ipv4_nat_sync_service_zones(&opctx, &[nat2.clone(), nat3.clone()]) + .nat_sync_service_zones(&opctx, &[nat2.clone(), nat3.clone()]) .await .unwrap(); @@ -757,7 +725,7 @@ mod test { // 2. the old one that "survived" the last sync // 3. a new one that was added during the last sync let db_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert_eq!(db_entries.len(), 3); @@ -781,7 +749,7 @@ mod test { // add nat1 back // this simulates a zone leaving and then returning, i.e. when a sled gets restarted datastore - .ipv4_nat_sync_service_zones( + .nat_sync_service_zones( &opctx, &[nat1.clone(), nat2.clone(), nat3.clone()], ) @@ -790,7 +758,7 @@ mod test { // we should have four nat entries in the db let db_entries = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert_eq!(db_entries.len(), 4); @@ -808,24 +776,21 @@ mod test { // Test our ability to return all changes interleaved in the correct order #[tokio::test] - async fn ipv4_nat_changeset() { - let logctx = dev::test_setup_log("test_nat_version_tracking"); + async fn nat_changeset() { + let logctx = dev::test_setup_log("nat_changeset"); let db = TestDatabase::new_with_datastore(&logctx.log).await; let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = - datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + datastore.nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert!(initial_state.is_empty()); - assert_eq!( - datastore.ipv4_nat_current_version(&opctx).await.unwrap(), - 0 - ); + assert_eq!(datastore.nat_current_version(&opctx).await.unwrap(), 0); let addresses = (0..=255).map(|i| { let addr = Ipv4Addr::new(10, 0, 0, i); - let net = oxnet::Ipv4Net::new(addr, 32).unwrap(); + let net = oxnet::Ipv4Net::host_net(addr); net }); @@ -834,7 +799,7 @@ mod test { let nat_entries = addresses.map(|external_address| { // build a bunch of nat entries - Ipv4NatValues { + NatEntryValues { external_address: external_address.into(), first_port: u16::MIN.into(), last_port: u16::MAX.into(), @@ -849,7 +814,7 @@ mod test { // create the nat entries for entry in nat_entries { let result = datastore - .ensure_ipv4_nat_entry(&opctx, entry.clone()) + .ensure_nat_entry(&opctx, entry.clone()) .await .unwrap(); @@ -860,15 +825,13 @@ mod test { for entry in db_records.iter().choose_multiple(&mut rand::thread_rng(), 50) { - datastore.ipv4_nat_delete(&opctx, entry).await.unwrap(); + datastore.nat_delete(&opctx, entry).await.unwrap(); } // get the new state of all nat entries // note that this is not the method under test - let db_records = datastore - .ipv4_nat_list_since_version(&opctx, 0, 300) - .await - .unwrap(); + let db_records = + datastore.nat_list_since_version(&opctx, 0, 300).await.unwrap(); // Count the actual number of changes seen. // This check is required because we _were_ getting changes in ascending order, @@ -881,7 +844,7 @@ mod test { let mut version = 0; let limit = 100; let mut changes = - datastore.ipv4_nat_changeset(&opctx, version, limit).await.unwrap(); + datastore.nat_changeset(&opctx, version, limit).await.unwrap(); while !changes.is_empty() { // check ordering @@ -901,7 +864,7 @@ mod test { .expect("did not find a deleted nat entry with a matching version number"); assert_eq!( - deleted_nat.external_address.addr(), + std::net::IpAddr::from(deleted_nat.external_address), change.external_address ); assert_eq!( @@ -926,7 +889,7 @@ mod test { assert!(added_nat.version_removed.is_none()); assert_eq!( - added_nat.external_address.addr(), + std::net::IpAddr::from(added_nat.external_address), change.external_address ); assert_eq!(added_nat.first_port, change.first_port.into()); @@ -944,10 +907,8 @@ mod test { total_changes += changes.len(); version = changes.last().unwrap().gen; - changes = datastore - .ipv4_nat_changeset(&opctx, version, limit) - .await - .unwrap(); + changes = + datastore.nat_changeset(&opctx, version, limit).await.unwrap(); } // did we see everything? diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 0cea5de1cd8..c0f2fe5f833 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -653,7 +653,7 @@ table! { } table! { - ipv4_nat_entry (id) { + nat_entry (id) { id -> Uuid, external_address -> Inet, first_port -> Int4, @@ -668,9 +668,9 @@ table! { } } -// View used for summarizing changes to ipv4_nat_entry +// View used for summarizing changes to nat_entry table! { - ipv4_nat_changes (version) { + nat_changes (version) { external_address -> Inet, first_port -> Int4, last_port -> Int4, @@ -683,9 +683,9 @@ table! { } // This is the sequence used for the version number -// in ipv4_nat_entry. +// in nat_entry. table! { - ipv4_nat_version (last_value) { + nat_version (last_value) { last_value -> Int8, log_cnt -> Int8, is_called -> Bool, diff --git a/nexus/internal-api/src/lib.rs b/nexus/internal-api/src/lib.rs index 6932e223839..b7daffb5af4 100644 --- a/nexus/internal-api/src/lib.rs +++ b/nexus/internal-api/src/lib.rs @@ -28,7 +28,7 @@ use nexus_types::{ SledAgentInfo, SwitchPutRequest, SwitchPutResponse, }, views::{ - BackgroundTask, DemoSaga, Ipv4NatEntryView, MgsUpdateDriverStatus, + BackgroundTask, DemoSaga, MgsUpdateDriverStatus, NatEntryView, QuiesceStatus, Saga, UpdateStatus, }, }, @@ -395,7 +395,7 @@ pub trait NexusInternalApi { rqctx: RequestContext, path_params: Path, query_params: Query, - ) -> Result>, HttpError>; + ) -> Result>, HttpError>; // APIs for managing blueprints // diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 83917c0c43d..51b6d1d8658 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -380,9 +380,8 @@ impl BackgroundTasksInitializer { } driver.register(TaskDefinition { - name: "nat_v4_garbage_collector", - description: - "prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry \ + name: "nat_garbage_collector", + description: "prunes soft-deleted NAT entries from nat_entry \ table based on a predetermined retention policy", period: config.nat_cleanup.period_secs, task_impl: Box::new(nat_cleanup::Ipv4NatGarbageCollector::new( diff --git a/nexus/src/app/background/tasks/nat_cleanup.rs b/nexus/src/app/background/tasks/nat_cleanup.rs index 5374a264eed..6f658fb0261 100644 --- a/nexus/src/app/background/tasks/nat_cleanup.rs +++ b/nexus/src/app/background/tasks/nat_cleanup.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Background task for garbage collecting ipv4_nat_entry table. +//! Background task for garbage collecting nat_entry table. //! Responsible for cleaning up soft deleted entries once they //! have been propagated to running dpd instances. @@ -20,7 +20,7 @@ use serde_json::json; use std::sync::Arc; /// Background task that periodically prunes soft-deleted entries -/// from ipv4_nat_entry table +/// from nat_entry table pub struct Ipv4NatGarbageCollector { datastore: Arc, resolver: Resolver, @@ -43,7 +43,7 @@ impl BackgroundTask for Ipv4NatGarbageCollector { async { let log = &opctx.log; - let result = self.datastore.ipv4_nat_current_version(opctx).await; + let result = self.datastore.nat_current_version(opctx).await; let mut min_gen = match result { Ok(gen) => gen, @@ -111,7 +111,7 @@ impl BackgroundTask for Ipv4NatGarbageCollector { let result = match self .datastore - .ipv4_nat_cleanup(opctx, min_gen, retention_threshold) + .nat_cleanup(opctx, min_gen, retention_threshold) .await { Ok(v) => v, Err(e) => { diff --git a/nexus/src/app/background/tasks/sync_service_zone_nat.rs b/nexus/src/app/background/tasks/sync_service_zone_nat.rs index 9646ba0ec9a..3164657e8ad 100644 --- a/nexus/src/app/background/tasks/sync_service_zone_nat.rs +++ b/nexus/src/app/background/tasks/sync_service_zone_nat.rs @@ -14,14 +14,13 @@ use futures::FutureExt; use futures::future::BoxFuture; use internal_dns_resolver::Resolver; use nexus_db_lookup::LookupPath; -use nexus_db_model::Ipv4NatValues; +use nexus_db_model::NatEntryValues; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use nexus_sled_agent_shared::inventory::OmicronZoneType; use omicron_common::address::{MAX_PORT, MIN_PORT}; use omicron_uuid_kinds::GenericUuid; use serde_json::json; -use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; // Minumum number of boundary NTP zones that should be present in a valid @@ -99,7 +98,7 @@ impl BackgroundTask for ServiceZoneNatTracker { } }; - let mut ipv4_nat_values: Vec = vec![]; + let mut nat_values: Vec = vec![]; let mut ntp_count = 0; let mut nexus_count = 0; let mut dns_count = 0; @@ -144,27 +143,13 @@ impl BackgroundTask for ServiceZoneNatTracker { OmicronZoneType::BoundaryNtp { nic, snat_cfg, .. } => { - let external_ip = match snat_cfg.ip { - IpAddr::V4(addr) => addr, - IpAddr::V6(_) => { - error!( - &log, - "ipv6 addresses for service zone nat not implemented"; - ); - continue; - } - }; - - let external_address = - oxnet::Ipv4Net::new(external_ip, 32) - .unwrap(); - + let external_address = nexus_db_model::IpNet::from( + oxnet::IpNet::host_net(snat_cfg.ip) + ); let (snat_first_port, snat_last_port) = snat_cfg.port_range_raw(); - let nat_value = Ipv4NatValues { - external_address: nexus_db_model::Ipv4Net( - external_address, - ), + let nat_value = NatEntryValues { + external_address, first_port: snat_first_port.into(), last_port: snat_last_port.into(), sled_address: sled_address.into(), @@ -172,30 +157,16 @@ impl BackgroundTask for ServiceZoneNatTracker { mac: nexus_db_model::MacAddr(nic.mac), }; - // Append ipv4 nat entry - ipv4_nat_values.push(nat_value); + // Append NAT entry + nat_values.push(nat_value); ntp_count += 1; } OmicronZoneType::Nexus { nic, external_ip, .. } => { - let external_ip = match external_ip { - IpAddr::V4(addr) => addr, - IpAddr::V6(_) => { - error!( - &log, - "ipv6 addresses for service zone nat not implemented"; - ); - continue; - } - }; - - let external_address = - oxnet::Ipv4Net::new(external_ip, 32) - .unwrap(); - - let nat_value = Ipv4NatValues { - external_address: nexus_db_model::Ipv4Net( - external_address, - ), + let external_address = nexus_db_model::IpNet::from( + oxnet::IpNet::host_net(external_ip) + ); + let nat_value = NatEntryValues { + external_address, first_port: MIN_PORT.into(), last_port: MAX_PORT.into(), sled_address: sled_address.into(), @@ -203,32 +174,16 @@ impl BackgroundTask for ServiceZoneNatTracker { mac: nexus_db_model::MacAddr(nic.mac), }; - // Append ipv4 nat entry - ipv4_nat_values.push(nat_value); + // Append NAT entry + nat_values.push(nat_value); nexus_count += 1; }, OmicronZoneType::ExternalDns { nic, dns_address, .. } => { - let external_ip = match dns_address { - SocketAddr::V4(v4) => { - *v4.ip() - }, - SocketAddr::V6(_) => { - error!( - &log, - "ipv6 addresses for service zone nat not implemented"; - ); - continue; - }, - }; - - let external_address = - oxnet::Ipv4Net::new(external_ip, 32) - .unwrap(); - - let nat_value = Ipv4NatValues { - external_address: nexus_db_model::Ipv4Net( - external_address, - ), + let external_address = nexus_db_model::IpNet::from( + oxnet::IpNet::host_net(dns_address.ip()) + ); + let nat_value = NatEntryValues { + external_address, first_port: MIN_PORT.into(), last_port: MAX_PORT.into(), sled_address: sled_address.into(), @@ -236,8 +191,8 @@ impl BackgroundTask for ServiceZoneNatTracker { mac: nexus_db_model::MacAddr(nic.mac), }; - // Append ipv4 nat entry - ipv4_nat_values.push(nat_value); + // Append NAT entry + nat_values.push(nat_value); dns_count += 1; }, // we explictly list all cases instead of using a wildcard, @@ -260,7 +215,7 @@ impl BackgroundTask for ServiceZoneNatTracker { // if we make it this far this should not be empty: // * nexus is running so we should at least have generated a nat value for it // * nexus requies other services zones that require nat to come up first - if ipv4_nat_values.is_empty() { + if nat_values.is_empty() { error!( &log, "nexus is running but no service zone nat values could be generated from inventory"; @@ -303,7 +258,7 @@ impl BackgroundTask for ServiceZoneNatTracker { } // reconcile service zone nat entries - let result = match self.datastore.ipv4_nat_sync_service_zones(opctx, &ipv4_nat_values).await { + let result = match self.datastore.nat_sync_service_zones(opctx, &nat_values).await { Ok(num) => num, Err(e) => { error!( diff --git a/nexus/src/app/instance_network.rs b/nexus/src/app/instance_network.rs index 8c61b2e3936..ad4e91e029a 100644 --- a/nexus/src/app/instance_network.rs +++ b/nexus/src/app/instance_network.rs @@ -5,13 +5,12 @@ //! Routines that manage instance-related networking state. use crate::app::switch_port; -use ipnetwork::IpNetwork; use nexus_background_task_interface::BackgroundTasks; use nexus_db_lookup::LookupPath; use nexus_db_model::ExternalIp; use nexus_db_model::IpAttachState; -use nexus_db_model::Ipv4NatEntry; -use nexus_db_model::Ipv4NatValues; +use nexus_db_model::NatEntry; +use nexus_db_model::NatEntryValues; use nexus_db_model::Vni as DbVni; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; @@ -21,7 +20,6 @@ use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::SwitchLocation; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; -use oxnet::Ipv4Net; use oxnet::Ipv6Net; use std::collections::HashSet; use std::str::FromStr; @@ -67,7 +65,7 @@ impl Nexus { instance_id: InstanceUuid, sled_ip_address: &std::net::SocketAddrV6, ip_filter: Option, - ) -> Result, Error> { + ) -> Result, Error> { instance_ensure_dpd_config( &self.db_datastore, &self.log, @@ -170,7 +168,7 @@ impl Nexus { pub(crate) async fn delete_dpd_config_by_entry( &self, opctx: &OpContext, - nat_entry: &Ipv4NatEntry, + nat_entry: &NatEntry, ) -> Result<(), Error> { delete_dpd_config_by_entry( &self.db_datastore, @@ -267,22 +265,23 @@ pub(crate) async fn instance_ensure_dpd_config( instance_id: InstanceUuid, sled_ip_address: &std::net::SocketAddrV6, ip_filter: Option, -) -> Result, Error> { - info!(log, "looking up instance's primary network interface"; - "instance_id" => %instance_id); +) -> Result, Error> { + info!( + log, + "looking up instance's primary network interface"; + "instance_id" => %instance_id + ); let (.., authz_instance) = LookupPath::new(opctx, datastore) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::ListChildren) .await?; - // XXX: Need to abstract over v6 and v4 entries here. - let mut nat_entries = vec![]; - // All external IPs map to the primary network interface, so find that // interface. If there is no such interface, there's no way to route // traffic destined to those IPs, so there's nothing to configure and // it's safe to return early. + let mut nat_entries = vec![]; let network_interface = match datastore .derive_guest_network_interface_info(&opctx, &authz_instance) .await? @@ -291,22 +290,20 @@ pub(crate) async fn instance_ensure_dpd_config( { Some(interface) => interface, None => { - info!(log, "Instance has no primary network interface"; - "instance_id" => %instance_id); + info!( + log, + "Instance has no primary network interface"; + "instance_id" => %instance_id + ); return Ok(nat_entries); } }; - let mac_address = - macaddr::MacAddr6::from_str(&network_interface.mac.to_string()) - .map_err(|e| { - Error::internal_error(&format!( - "failed to convert mac address: {e}" - )) - })?; - - info!(log, "looking up instance's external IPs"; - "instance_id" => %instance_id); + info!( + log, + "looking up instance's external IPs"; + "instance_id" => %instance_id + ); let ips = datastore.instance_lookup_external_ips(&opctx, instance_id).await?; @@ -336,8 +333,6 @@ pub(crate) async fn instance_ensure_dpd_config( )); } - let sled_address = Ipv6Net::host_net(*sled_ip_address.ip()); - // If all of our IPs are attached or are guaranteed to be owned // by the saga calling this fn, then we need to disregard and // remove conflicting rows. No other instance/service should be @@ -345,6 +340,8 @@ pub(crate) async fn instance_ensure_dpd_config( // the case where we have a concurrent stop -> detach followed // by an attach to another instance, or other ongoing attach saga // cleanup. + let sled_address = Ipv6Net::host_net(*sled_ip_address.ip()); + let mac_address = network_interface.mac.0; let mut err_and_limit = None; for (i, external_ip) in ips_of_interest.iter().enumerate() { // For each external ip, add a nat entry to the database @@ -589,7 +586,7 @@ pub(crate) async fn probe_delete_dpd_config( let mut errors = vec![]; for entry in external_ips { // Soft delete the NAT entry - match datastore.ipv4_nat_delete_by_external_ip(&opctx, &entry).await { + match datastore.nat_delete_by_external_ip(&opctx, &entry).await { Ok(_) => Ok(()), Err(err) => match err { Error::ObjectNotFound { .. } => { @@ -663,13 +660,16 @@ pub(crate) async fn delete_dpd_config_by_entry( log: &slog::Logger, opctx: &OpContext, opctx_alloc: &OpContext, - nat_entry: &Ipv4NatEntry, + nat_entry: &NatEntry, ) -> Result<(), Error> { - info!(log, "deleting individual NAT entry from dpd configuration"; - "id" => ?nat_entry.id, - "version_added" => %nat_entry.external_address.0); + info!( + log, + "deleting individual NAT entry from dpd configuration"; + "id" => ?nat_entry.id, + "version_added" => %nat_entry.external_address, + ); - match datastore.ipv4_nat_delete(&opctx, nat_entry).await { + match datastore.nat_delete(&opctx, nat_entry).await { Ok(_) => {} Err(err) => match err { Error::ObjectNotFound { .. } => { @@ -704,7 +704,7 @@ async fn external_ip_delete_dpd_config_inner( external_ip: &ExternalIp, ) -> Result<(), Error> { // Soft delete the NAT entry - match datastore.ipv4_nat_delete_by_external_ip(&opctx, external_ip).await { + match datastore.nat_delete_by_external_ip(&opctx, external_ip).await { Ok(_) => Ok(()), Err(err) => match err { Error::ObjectNotFound { .. } => { @@ -788,26 +788,16 @@ async fn ensure_nat_entry( network_interface: &NetworkInterface, mac_address: macaddr::MacAddr6, opctx: &OpContext, -) -> Result { - match target_ip.ip { - IpNetwork::V4(v4net) => { - let nat_entry = Ipv4NatValues { - external_address: Ipv4Net::from(v4net).into(), - first_port: target_ip.first_port, - last_port: target_ip.last_port, - sled_address: sled_address.into(), - vni: DbVni(network_interface.vni), - mac: nexus_db_model::MacAddr( - omicron_common::api::external::MacAddr(mac_address), - ), - }; - Ok(datastore.ensure_ipv4_nat_entry(opctx, nat_entry).await?) - } - IpNetwork::V6(_v6net) => { - // TODO: implement handling of v6 nat. - return Err(Error::InternalError { - internal_message: "ipv6 nat is not yet implemented".into(), - }); - } - } +) -> Result { + let nat_entry = NatEntryValues { + external_address: nexus_db_model::IpNet::from(target_ip.ip), + first_port: target_ip.first_port, + last_port: target_ip.last_port, + sled_address: nexus_db_model::Ipv6Net::from(sled_address), + vni: DbVni(network_interface.vni), + mac: nexus_db_model::MacAddr(omicron_common::api::external::MacAddr( + mac_address, + )), + }; + datastore.ensure_nat_entry(opctx, nat_entry).await } diff --git a/nexus/src/app/sagas/instance_common.rs b/nexus/src/app/sagas/instance_common.rs index b4644201e0c..cdeb10d2c33 100644 --- a/nexus/src/app/sagas/instance_common.rs +++ b/nexus/src/app/sagas/instance_common.rs @@ -9,7 +9,7 @@ use std::net::{IpAddr, Ipv6Addr}; use crate::Nexus; use nexus_db_lookup::LookupPath; use nexus_db_model::{ - ByteCount, ExternalIp, InstanceState, IpAttachState, Ipv4NatEntry, + ByteCount, ExternalIp, InstanceState, IpAttachState, NatEntry, SledReservationConstraints, SledResourceVmm, VmmState, }; use nexus_db_queries::authz; @@ -333,7 +333,7 @@ pub async fn instance_ip_add_nat( authz_instance: &authz::Instance, sled_uuid: Option, target_ip: ModifyStateForExternalIp, -) -> Result, ActionError> { +) -> Result, ActionError> { let osagactx = sagactx.user_data(); let datastore = osagactx.datastore(); let opctx = diff --git a/nexus/src/app/sagas/instance_ip_attach.rs b/nexus/src/app/sagas/instance_ip_attach.rs index f34d63b9ec6..11a0a18f19a 100644 --- a/nexus/src/app/sagas/instance_ip_attach.rs +++ b/nexus/src/app/sagas/instance_ip_attach.rs @@ -10,7 +10,7 @@ use super::instance_common::{ use super::{ActionRegistry, NexusActionContext, NexusSaga}; use crate::app::sagas::declare_saga_actions; use crate::app::{authn, authz}; -use nexus_db_model::{IpAttachState, Ipv4NatEntry}; +use nexus_db_model::{IpAttachState, NatEntry}; use nexus_types::external_api::views; use omicron_common::api::external::Error; use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; @@ -175,7 +175,7 @@ async fn siia_get_instance_state( // XXX: Need to abstract over v4 and v6 NAT entries when the time comes. async fn siia_nat( sagactx: NexusActionContext, -) -> Result, ActionError> { +) -> Result, ActionError> { let params = sagactx.saga_params::()?; let sled_id = sagactx .lookup::>("instance_state")? @@ -198,7 +198,7 @@ async fn siia_nat_undo( let log = sagactx.user_data().log(); let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let nat_entry = sagactx.lookup::>("nat_entry")?; + let nat_entry = sagactx.lookup::>("nat_entry")?; let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 8841d8ec3a1..8604ed5e712 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -47,8 +47,8 @@ use nexus_types::internal_api::params::SwitchPutRequest; use nexus_types::internal_api::params::SwitchPutResponse; use nexus_types::internal_api::views::BackgroundTask; use nexus_types::internal_api::views::DemoSaga; -use nexus_types::internal_api::views::Ipv4NatEntryView; use nexus_types::internal_api::views::MgsUpdateDriverStatus; +use nexus_types::internal_api::views::NatEntryView; use nexus_types::internal_api::views::QuiesceStatus; use nexus_types::internal_api::views::Saga; use nexus_types::internal_api::views::UpdateStatus; @@ -682,7 +682,7 @@ impl NexusInternalApi for NexusInternalApiImpl { rqctx: RequestContext, path_params: Path, query_params: Query, - ) -> Result>, HttpError> { + ) -> Result>, HttpError> { let apictx = &rqctx.context().context; let handler = async { let opctx = @@ -692,7 +692,7 @@ impl NexusInternalApi for NexusInternalApiImpl { let query = query_params.into_inner(); let mut changeset = nexus .datastore() - .ipv4_nat_changeset(&opctx, path.from_gen, query.limit) + .nat_changeset(&opctx, path.from_gen, query.limit) .await?; changeset.sort_by_key(|e| e.gen); Ok(HttpResponseOk(changeset)) diff --git a/nexus/types/src/internal_api/views.rs b/nexus/types/src/internal_api/views.rs index 131dee8d955..96c3ba0cf9d 100644 --- a/nexus/types/src/internal_api/views.rs +++ b/nexus/types/src/internal_api/views.rs @@ -34,7 +34,7 @@ use serde::Serialize; use std::collections::BTreeMap; use std::collections::VecDeque; use std::fmt::Display; -use std::net::Ipv4Addr; +use std::net::IpAddr; use std::net::Ipv6Addr; use std::sync::Arc; use std::time::Duration; @@ -334,9 +334,12 @@ pub struct LastResultCompleted { } /// NAT Record +/// +/// A NAT record maps an external IP address, used by an instance or +/// externally-facing service like Nexus, to the hosting sled. #[derive(Clone, Debug, Serialize, JsonSchema)] -pub struct Ipv4NatEntryView { - pub external_address: Ipv4Addr, +pub struct NatEntryView { + pub external_address: IpAddr, pub first_port: u16, pub last_port: u16, pub sled_address: Ipv6Addr, diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 36f5222a93c..9ed4a369133 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -1471,10 +1471,10 @@ "content": { "application/json": { "schema": { - "title": "Array_of_Ipv4NatEntryView", + "title": "Array_of_NatEntryView", "type": "array", "items": { - "$ref": "#/components/schemas/Ipv4NatEntryView" + "$ref": "#/components/schemas/NatEntryView" } } } @@ -5169,53 +5169,6 @@ } ] }, - "Ipv4NatEntryView": { - "description": "NAT Record", - "type": "object", - "properties": { - "deleted": { - "type": "boolean" - }, - "external_address": { - "type": "string", - "format": "ipv4" - }, - "first_port": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "gen": { - "type": "integer", - "format": "int64" - }, - "last_port": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "mac": { - "$ref": "#/components/schemas/MacAddr" - }, - "sled_address": { - "type": "string", - "format": "ipv6" - }, - "vni": { - "$ref": "#/components/schemas/Vni" - } - }, - "required": [ - "deleted", - "external_address", - "first_port", - "gen", - "last_port", - "mac", - "sled_address", - "vni" - ] - }, "Ipv4Net": { "example": "192.168.1.0/24", "title": "An IPv4 subnet", @@ -5561,6 +5514,53 @@ "minLength": 1, "maxLength": 63 }, + "NatEntryView": { + "description": "NAT Record\n\nA NAT record maps an external IP address, used by an instance or externally-facing service like Nexus, to the hosting sled.", + "type": "object", + "properties": { + "deleted": { + "type": "boolean" + }, + "external_address": { + "type": "string", + "format": "ip" + }, + "first_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "gen": { + "type": "integer", + "format": "int64" + }, + "last_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "sled_address": { + "type": "string", + "format": "ipv6" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "deleted", + "external_address", + "first_port", + "gen", + "last_port", + "mac", + "sled_address", + "vni" + ] + }, "NetworkInterface": { "description": "Information required to construct a virtual network interface", "type": "object", diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 8463bdba2b2..f87d11e0903 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -5149,9 +5149,9 @@ FROM WHERE instance.time_deleted IS NULL AND vmm.time_deleted IS NULL; -CREATE SEQUENCE IF NOT EXISTS omicron.public.ipv4_nat_version START 1 INCREMENT 1; +CREATE SEQUENCE IF NOT EXISTS omicron.public.nat_version START 1 INCREMENT 1; -CREATE TABLE IF NOT EXISTS omicron.public.ipv4_nat_entry ( +CREATE TABLE IF NOT EXISTS omicron.public.nat_entry ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), external_address INET NOT NULL, first_port INT4 NOT NULL, @@ -5159,13 +5159,13 @@ CREATE TABLE IF NOT EXISTS omicron.public.ipv4_nat_entry ( sled_address INET NOT NULL, vni INT4 NOT NULL, mac INT8 NOT NULL, - version_added INT8 NOT NULL DEFAULT nextval('omicron.public.ipv4_nat_version'), + version_added INT8 NOT NULL DEFAULT nextval('omicron.public.nat_version'), version_removed INT8, time_created TIMESTAMPTZ NOT NULL DEFAULT now(), time_deleted TIMESTAMPTZ ); -CREATE UNIQUE INDEX IF NOT EXISTS ipv4_nat_version_added ON omicron.public.ipv4_nat_entry ( +CREATE UNIQUE INDEX IF NOT EXISTS nat_version_added ON omicron.public.nat_entry ( version_added ) STORING ( @@ -5179,15 +5179,15 @@ STORING ( time_deleted ); -CREATE UNIQUE INDEX IF NOT EXISTS overlapping_ipv4_nat_entry ON omicron.public.ipv4_nat_entry ( +CREATE UNIQUE INDEX IF NOT EXISTS overlapping_nat_entry ON omicron.public.nat_entry ( external_address, first_port, last_port ) WHERE time_deleted IS NULL; -CREATE INDEX IF NOT EXISTS ipv4_nat_lookup ON omicron.public.ipv4_nat_entry (external_address, first_port, last_port, sled_address, vni, mac); +CREATE INDEX IF NOT EXISTS nat_lookup ON omicron.public.nat_entry (external_address, first_port, last_port, sled_address, vni, mac); -CREATE UNIQUE INDEX IF NOT EXISTS ipv4_nat_version_removed ON omicron.public.ipv4_nat_entry ( +CREATE UNIQUE INDEX IF NOT EXISTS nat_version_removed ON omicron.public.nat_entry ( version_removed ) STORING ( @@ -5201,31 +5201,7 @@ STORING ( time_deleted ); -CREATE TYPE IF NOT EXISTS omicron.public.bfd_mode AS ENUM ( - 'single_hop', - 'multi_hop' -); - -CREATE TABLE IF NOT EXISTS omicron.public.bfd_session ( - id UUID PRIMARY KEY, - local INET, - remote INET NOT NULL, - detection_threshold INT8 NOT NULL, - required_rx INT8 NOT NULL, - switch TEXT NOT NULL, - mode omicron.public.bfd_mode, - - time_created TIMESTAMPTZ NOT NULL, - time_modified TIMESTAMPTZ NOT NULL, - time_deleted TIMESTAMPTZ -); - -CREATE UNIQUE INDEX IF NOT EXISTS lookup_bfd_session ON omicron.public.bfd_session ( - remote, - switch -) WHERE time_deleted IS NULL; - -CREATE INDEX IF NOT EXISTS ipv4_nat_lookup_by_vni ON omicron.public.ipv4_nat_entry ( +CREATE INDEX IF NOT EXISTS nat_lookup_by_vni ON omicron.public.nat_entry ( vni ) STORING ( @@ -5244,7 +5220,7 @@ STORING ( * A view of the ipv4 nat change history * used to summarize changes for external viewing */ -CREATE VIEW IF NOT EXISTS omicron.public.ipv4_nat_changes +CREATE VIEW IF NOT EXISTS omicron.public.nat_changes AS -- Subquery: -- We need to be able to order partial changesets. ORDER BY on separate columns @@ -5265,7 +5241,7 @@ WITH interleaved_versions AS ( -- create a new virtual column, boolean value representing whether or not -- the record has been soft deleted (version_removed IS NOT NULL) as deleted - FROM omicron.public.ipv4_nat_entry + FROM omicron.public.nat_entry WHERE version_removed IS NULL -- combine the datasets, unifying the version_added and version_removed @@ -5285,7 +5261,7 @@ WITH interleaved_versions AS ( -- create a new virtual column, boolean value representing whether or not -- the record has been soft deleted (version_removed IS NOT NULL) as deleted - FROM omicron.public.ipv4_nat_entry + FROM omicron.public.nat_entry WHERE version_removed IS NOT NULL ) -- this is our new "table" @@ -5301,6 +5277,31 @@ SELECT deleted FROM interleaved_versions; +CREATE TYPE IF NOT EXISTS omicron.public.bfd_mode AS ENUM ( + 'single_hop', + 'multi_hop' +); + +CREATE TABLE IF NOT EXISTS omicron.public.bfd_session ( + id UUID PRIMARY KEY, + local INET, + remote INET NOT NULL, + detection_threshold INT8 NOT NULL, + required_rx INT8 NOT NULL, + switch TEXT NOT NULL, + mode omicron.public.bfd_mode, + + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + time_deleted TIMESTAMPTZ +); + +CREATE UNIQUE INDEX IF NOT EXISTS lookup_bfd_session ON omicron.public.bfd_session ( + remote, + switch +) WHERE time_deleted IS NULL; + + CREATE TABLE IF NOT EXISTS omicron.public.probe ( id UUID NOT NULL PRIMARY KEY, name STRING(63) NOT NULL, @@ -6549,7 +6550,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '180.0.0', NULL) + (TRUE, NOW(), NOW(), '181.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/rename-nat-table/up01.sql b/schema/crdb/rename-nat-table/up01.sql new file mode 100644 index 00000000000..354480c0c99 --- /dev/null +++ b/schema/crdb/rename-nat-table/up01.sql @@ -0,0 +1 @@ +DROP VIEW IF EXISTS omicron.public.ipv4_nat_changes; diff --git a/schema/crdb/rename-nat-table/up02.sql b/schema/crdb/rename-nat-table/up02.sql new file mode 100644 index 00000000000..810b2739701 --- /dev/null +++ b/schema/crdb/rename-nat-table/up02.sql @@ -0,0 +1 @@ +ALTER SEQUENCE IF EXISTS omicron.public.ipv4_nat_version RENAME TO omicron.public.nat_version; diff --git a/schema/crdb/rename-nat-table/up03.sql b/schema/crdb/rename-nat-table/up03.sql new file mode 100644 index 00000000000..26c051558be --- /dev/null +++ b/schema/crdb/rename-nat-table/up03.sql @@ -0,0 +1 @@ +ALTER TABLE IF EXISTS omicron.public.ipv4_nat_entry RENAME TO omicron.public.nat_entry; diff --git a/schema/crdb/rename-nat-table/up04.sql b/schema/crdb/rename-nat-table/up04.sql new file mode 100644 index 00000000000..5f7c37ecca3 --- /dev/null +++ b/schema/crdb/rename-nat-table/up04.sql @@ -0,0 +1,4 @@ +ALTER TABLE IF EXISTS +omicron.public.nat_entry +ALTER COLUMN version_added +SET DEFAULT nextval('omicron.public.nat_version'); diff --git a/schema/crdb/rename-nat-table/up05.sql b/schema/crdb/rename-nat-table/up05.sql new file mode 100644 index 00000000000..8709daeae4b --- /dev/null +++ b/schema/crdb/rename-nat-table/up05.sql @@ -0,0 +1,37 @@ +CREATE VIEW IF NOT EXISTS omicron.public.nat_changes +AS +WITH interleaved_versions AS ( + SELECT + external_address, + first_port, + last_port, + sled_address, + vni, + mac, + version_added AS version, + (version_removed IS NOT NULL) as deleted + FROM omicron.public.nat_entry + WHERE version_removed IS NULL + UNION + SELECT + external_address, + first_port, + last_port, + sled_address, + vni, + mac, + version_removed AS version, + (version_removed IS NOT NULL) as deleted + FROM omicron.public.nat_entry + WHERE version_removed IS NOT NULL +) +SELECT + external_address, + first_port, + last_port, + sled_address, + vni, + mac, + version, + deleted +FROM interleaved_versions; diff --git a/schema/crdb/rename-nat-table/up06.sql b/schema/crdb/rename-nat-table/up06.sql new file mode 100644 index 00000000000..2a1054550a2 --- /dev/null +++ b/schema/crdb/rename-nat-table/up06.sql @@ -0,0 +1 @@ +ALTER INDEX IF EXISTS ipv4_nat_version_added RENAME TO nat_version_added; diff --git a/schema/crdb/rename-nat-table/up07.sql b/schema/crdb/rename-nat-table/up07.sql new file mode 100644 index 00000000000..570bdf423e0 --- /dev/null +++ b/schema/crdb/rename-nat-table/up07.sql @@ -0,0 +1 @@ +ALTER INDEX IF EXISTS overlapping_ipv4_nat_entry RENAME TO overlapping_nat_entry; diff --git a/schema/crdb/rename-nat-table/up08.sql b/schema/crdb/rename-nat-table/up08.sql new file mode 100644 index 00000000000..6a157266e1e --- /dev/null +++ b/schema/crdb/rename-nat-table/up08.sql @@ -0,0 +1 @@ +ALTER INDEX IF EXISTS ipv4_nat_lookup RENAME TO nat_lookup; diff --git a/schema/crdb/rename-nat-table/up09.sql b/schema/crdb/rename-nat-table/up09.sql new file mode 100644 index 00000000000..fa918dfb21c --- /dev/null +++ b/schema/crdb/rename-nat-table/up09.sql @@ -0,0 +1 @@ +ALTER INDEX IF EXISTS ipv4_nat_version_removed RENAME TO nat_version_removed; diff --git a/schema/crdb/rename-nat-table/up10.sql b/schema/crdb/rename-nat-table/up10.sql new file mode 100644 index 00000000000..9e3a436351f --- /dev/null +++ b/schema/crdb/rename-nat-table/up10.sql @@ -0,0 +1 @@ +ALTER INDEX IF EXISTS ipv4_nat_lookup_by_vni RENAME TO nat_lookup_by_vni; diff --git a/schema/crdb/rename-nat-table/up11.sql b/schema/crdb/rename-nat-table/up11.sql new file mode 100644 index 00000000000..8d7127e5572 --- /dev/null +++ b/schema/crdb/rename-nat-table/up11.sql @@ -0,0 +1 @@ +ALTER INDEX IF EXISTS ipv4_nat_entry_pkey RENAME TO nat_entry_pkey;