Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ members = [
"dpdk-sys",
"dpdk-sysroot-helper",
"errno",
"flow-filter",
"flow-info",
"hardware",
"id",
Expand Down Expand Up @@ -56,6 +57,7 @@ dpdk-sys = { path = "./dpdk-sys", package = "dataplane-dpdk-sys", features = []
dpdk-sysroot-helper = { path = "./dpdk-sysroot-helper", package = "dataplane-dpdk-sysroot-helper", features = [] }
dplane-rpc = { git = "https://github.com/githedgehog/dplane-rpc.git", rev = "e8fc33db10e1d00785f2a2b90cbadcad7900f200", features = [] }
errno = { path = "./errno", package = "dataplane-errno", features = [] }
flow-filter = { path = "./flow-filter", package = "dataplane-flow-filter", features = [] }
flow-info = { path = "./flow-info", package = "dataplane-flow-info", features = [] }
hardware = { path = "./hardware", package = "dataplane-hardware", features = [] }
id = { path = "./id", package = "dataplane-id", features = [] }
Expand Down
28 changes: 28 additions & 0 deletions config/src/external/overlay/vpcpeering.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,20 @@ impl VpcExpose {
self.ips.insert(prefix);
self
}
// If the as_range list is empty, then there's no NAT required for the expose, meaning that the
// public IPs are those from the "ips" list. This method extends the list of public prefixes,
// whether it's "ips" or "as_range".
#[must_use]
pub fn insert_public_ip(mut self, prefix: PrefixWithOptionalPorts) -> Self {
if let Some(nat) = self.nat.as_mut()
&& !nat.as_range.is_empty()
{
nat.as_range.insert(prefix);
} else {
self.ips.insert(prefix);
}
self
}
#[must_use]
pub fn not(mut self, prefix: PrefixWithOptionalPorts) -> Self {
self.nots.insert(prefix);
Expand Down Expand Up @@ -207,6 +221,20 @@ impl VpcExpose {
&nat.as_range
}
}
// If the as_range list is empty, then there's no NAT required for the expose, meaning that the
// public IPs are those from the "ips" list. This method returns a mutable reference to the current list of public IPs
// for the VpcExpose.
#[must_use]
pub fn public_ips_mut(&mut self) -> &mut BTreeSet<PrefixWithOptionalPorts> {
let Some(nat) = self.nat.as_mut() else {
return &mut self.ips;
};
if nat.as_range.is_empty() {
&mut self.ips
} else {
&mut nat.as_range
}
}
// Same as public_ips, but returns the list of excluded prefixes
#[must_use]
pub fn public_excludes(&self) -> &BTreeSet<PrefixWithOptionalPorts> {
Expand Down
1 change: 1 addition & 0 deletions dataplane/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ concurrency = { workspace = true }
ctrlc = { workspace = true, features = ["termination"] }
dpdk = { workspace = true }
dyn-iter = { workspace = true }
flow-filter = { workspace = true }
futures = { workspace = true }
hyper = { workspace = true }
hyper-util = { workspace = true }
Expand Down
2 changes: 1 addition & 1 deletion dataplane/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ fn main() {
vpcmapw: setup.vpcmapw,
nattablesw: setup.nattablesw,
natallocatorw: setup.natallocatorw,
vpcdtablesw: setup.vpcdtablesw,
flowfilterw: setup.flowfiltertablesw,
vpc_stats_store: setup.vpc_stats_store,
},
})
Expand Down
14 changes: 7 additions & 7 deletions dataplane/src/packet_processor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use super::packet_processor::ipforward::IpForwarder;

use concurrency::sync::Arc;

use pkt_meta::dst_vpcd_lookup::{DstVpcdLookup, VpcDiscTablesWriter};
use flow_filter::{FlowFilter, FlowFilterTableWriter};
use pkt_meta::flow_table::{ExpirationsNF, FlowTable, LookupNF};

use nat::stateful::NatAllocatorWriter;
Expand All @@ -38,7 +38,7 @@ where
pub vpcmapw: VpcMapWriter<VpcMapName>,
pub nattablesw: NatTablesWriter,
pub natallocatorw: NatAllocatorWriter,
pub vpcdtablesw: VpcDiscTablesWriter,
pub flowfiltertablesw: FlowFilterTableWriter,
pub stats: StatsCollector,
pub vpc_stats_store: Arc<VpcStatsStore>,
}
Expand All @@ -49,7 +49,7 @@ pub(crate) fn start_router<Buf: PacketBufferMut>(
) -> Result<InternalSetup<Buf>, RouterError> {
let nattablesw = NatTablesWriter::new();
let natallocatorw = NatAllocatorWriter::new();
let vpcdtablesw = VpcDiscTablesWriter::new();
let flowfiltertablesw = FlowFilterTableWriter::new();
let router = Router::new(params)?;
let vpcmapw = VpcMapWriter::<VpcMapName>::new();

Expand All @@ -65,7 +65,7 @@ pub(crate) fn start_router<Buf: PacketBufferMut>(

let iftr_factory = router.get_iftabler_factory();
let fibtr_factory = router.get_fibtr_factory();
let vpcdtablesr_factory = vpcdtablesw.get_reader_factory();
let flowfiltertablesr_factory = flowfiltertablesw.get_reader_factory();
let atabler_factory = router.get_atabler_factory();
let nattabler_factory = nattablesw.get_reader_factory();
let natallocator_factory = natallocatorw.get_reader_factory();
Expand All @@ -74,7 +74,6 @@ pub(crate) fn start_router<Buf: PacketBufferMut>(
// Build network functions
let stage_ingress = Ingress::new("Ingress", iftr_factory.handle());
let stage_egress = Egress::new("Egress", iftr_factory.handle(), atabler_factory.handle());
let dst_vpcd_lookup = DstVpcdLookup::new("dst-vni-lookup", vpcdtablesr_factory.handle());
let iprouter1 = IpForwarder::new("IP-Forward-1", fibtr_factory.handle());
let iprouter2 = IpForwarder::new("IP-Forward-2", fibtr_factory.handle());
let stateless_nat = StatelessNat::with_reader("stateless-NAT", nattabler_factory.handle());
Expand All @@ -85,6 +84,7 @@ pub(crate) fn start_router<Buf: PacketBufferMut>(
);
let pktdump = PacketDumper::new("pipeline-end", true, None);
let stats_stage = Stats::new("stats", writer.clone());
let flow_filter = FlowFilter::new("flow-filter", flowfiltertablesr_factory.handle());
let flow_lookup_nf = LookupNF::new("flow-lookup", flow_table.clone());
let flow_expirations_nf = ExpirationsNF::new(flow_table.clone());

Expand All @@ -93,7 +93,7 @@ pub(crate) fn start_router<Buf: PacketBufferMut>(
DynPipeline::new()
.add_stage(stage_ingress)
.add_stage(iprouter1)
.add_stage(dst_vpcd_lookup)
.add_stage(flow_filter)
.add_stage(flow_lookup_nf)
.add_stage(stateless_nat)
.add_stage(stateful_nat)
Expand All @@ -110,7 +110,7 @@ pub(crate) fn start_router<Buf: PacketBufferMut>(
vpcmapw,
nattablesw,
natallocatorw,
vpcdtablesw,
flowfiltertablesw,
stats,
vpc_stats_store,
})
Expand Down
20 changes: 20 additions & 0 deletions flow-filter/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[package]
name = "dataplane-flow-filter"
edition.workspace = true
license.workspace = true
publish.workspace = true
version.workspace = true

[dependencies]
config = { workspace = true }
left-right = { workspace = true }
linkme = { workspace = true }
lpm = { workspace = true }
net = { workspace = true }
pipeline = { workspace = true }
tracectl = { workspace = true }
tracing = { workspace = true }

[dev-dependencies]
lpm = { workspace = true, features = ["testing"] }
tracing-test = { workspace = true, features = [] }
80 changes: 80 additions & 0 deletions flow-filter/src/filter_rw.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright Open Network Fabric Authors

//! Left-right integration for [`FlowFilterTable`]

use crate::tables::FlowFilterTable;
use left_right::{Absorb, ReadGuard, ReadHandle, ReadHandleFactory, WriteHandle, new_from_empty};
use tracing::debug;

#[derive(Debug)]
pub(crate) enum FlowFilterTableChange {
UpdateFlowFilterTable(FlowFilterTable),
}

impl Absorb<FlowFilterTableChange> for FlowFilterTable {
fn absorb_first(&mut self, change: &mut FlowFilterTableChange, _: &Self) {
match change {
FlowFilterTableChange::UpdateFlowFilterTable(table) => {
*self = table.clone();
}
}
}
fn drop_first(self: Box<Self>) {}
fn sync_with(&mut self, first: &Self) {
*self = first.clone();
}
}

#[derive(Debug)]
pub struct FlowFilterTableReader(ReadHandle<FlowFilterTable>);

impl FlowFilterTableReader {
pub(crate) fn enter(&self) -> Option<ReadGuard<'_, FlowFilterTable>> {
self.0.enter()
}

#[must_use]
pub fn factory(&self) -> FlowFilterTableReaderFactory {
FlowFilterTableReaderFactory(self.0.factory())
}
}

#[derive(Debug)]
pub struct FlowFilterTableReaderFactory(ReadHandleFactory<FlowFilterTable>);

impl FlowFilterTableReaderFactory {
#[must_use]
pub fn handle(&self) -> FlowFilterTableReader {
FlowFilterTableReader(self.0.handle())
}
}

#[derive(Debug)]
pub struct FlowFilterTableWriter(WriteHandle<FlowFilterTable, FlowFilterTableChange>);

impl FlowFilterTableWriter {
#[must_use]
#[allow(clippy::new_without_default)]
pub fn new() -> FlowFilterTableWriter {
let (w, _r) =
new_from_empty::<FlowFilterTable, FlowFilterTableChange>(FlowFilterTable::new());
FlowFilterTableWriter(w)
}

#[must_use]
pub fn get_reader(&self) -> FlowFilterTableReader {
FlowFilterTableReader(self.0.clone())
}

pub fn get_reader_factory(&self) -> FlowFilterTableReaderFactory {
self.get_reader().factory()
}

pub fn update_flow_filter_table(&mut self, table: FlowFilterTable) {
self.0
.append(FlowFilterTableChange::UpdateFlowFilterTable(table));
self.0.publish();
debug!("Updated flow filter table");
}
}
Loading
Loading