diff --git a/policy-test/src/grpc.rs b/policy-test/src/grpc.rs index b640c0b26724c..949c5afde2f23 100644 --- a/policy-test/src/grpc.rs +++ b/policy-test/src/grpc.rs @@ -291,22 +291,6 @@ impl OutboundPolicyClient { Ok(rsp.into_inner()) } - pub async fn watch( - &mut self, - ns: &str, - svc: &k8s::Service, - port: u16, - ) -> Result, tonic::Status> { - let address = svc - .spec - .as_ref() - .expect("Service must have a spec") - .cluster_ip - .as_ref() - .expect("Service must have a cluster ip"); - self.watch_ip(ns, address, port).await - } - pub async fn watch_ip( &mut self, ns: &str, diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index da69d55a10d40..6706f94ecf634 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -6,6 +6,7 @@ pub mod bb; pub mod curl; pub mod grpc; pub mod outbound_api; +pub mod test_route; pub mod web; use kube::runtime::wait::Condition; @@ -15,6 +16,7 @@ use linkerd_policy_controller_k8s_api::{ ResourceExt, }; use maplit::{btreemap, convert_args}; +use test_route::TestRoute; use tokio::time; use tracing::Instrument; @@ -206,24 +208,25 @@ pub async fn await_pod_ip(client: &kube::Client, ns: &str, name: &str) -> std::n .expect("pod IP must be valid") } -// Waits until an HttpRoute with the given namespace and name has a status set -// on it, then returns the generic route status representation. -pub async fn await_route_status( - client: &kube::Client, - ns: &str, - name: &str, -) -> k8s::policy::httproute::RouteStatus { - use k8s::policy::httproute as api; - let route_status = await_condition(client, ns, name, |obj: Option<&api::HttpRoute>| -> bool { - obj.and_then(|route| route.status.as_ref()).is_some() - }) - .await - .expect("must fetch route") - .status - .expect("route must contain a status representation") - .inner; - tracing::trace!(?route_status, name, ns, "got route status"); - route_status +// Waits until an HttpRoute with the given namespace and name has been accepted. +pub async fn await_route_accepted(client: &kube::Client, route: &R) { + await_condition( + client, + &route.namespace().unwrap(), + &route.name_unchecked(), + |obj: Option<&R>| -> bool { + obj.map_or(false, |route| { + let conditions = route + .conditions() + .unwrap_or_default() + .into_iter() + .cloned() + .collect::>(); + is_status_accepted(&conditions) + }) + }, + ) + .await; } // Waits until an HttpRoute with the given namespace and name has a status set @@ -591,17 +594,25 @@ pub fn mk_egress_net(ns: &str, name: &str) -> k8s::policy::EgressNetwork { } #[track_caller] -pub fn assert_resource_meta(meta: &Option, resource: &Resource, port: u16) { +pub fn assert_resource_meta( + meta: &Option, + parent_ref: ParentReference, + port: u16, +) { println!("meta: {:?}", meta); - tracing::debug!(?meta, ?resource, port, "Asserting service metadata"); + tracing::debug!(?meta, ?parent_ref, port, "Asserting parent metadata"); + let mut group = parent_ref.group.unwrap(); + if group.is_empty() { + group = "core".to_string(); + } assert_eq!( meta, &Some(grpc::meta::Metadata { kind: Some(grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { - group: resource.group(), - kind: resource.kind(), - name: resource.name(), - namespace: resource.namespace(), + group, + kind: parent_ref.kind.unwrap(), + name: parent_ref.name, + namespace: parent_ref.namespace.unwrap(), section: "".to_string(), port: port.into() })), diff --git a/policy-test/src/outbound_api.rs b/policy-test/src/outbound_api.rs index d8ee90e2189a2..35a7dd7cc870b 100644 --- a/policy-test/src/outbound_api.rs +++ b/policy-test/src/outbound_api.rs @@ -1,27 +1,22 @@ -use crate::{assert_resource_meta, grpc, Resource}; -use kube::ResourceExt; +use crate::{grpc, test_route::TestRoute}; +use k8s_gateway_api::ParentReference; use std::time::Duration; use tokio::time; pub async fn retry_watch_outbound_policy( client: &kube::Client, ns: &str, - resource: &Resource, + ip: &str, port: u16, ) -> tonic::Streaming { // Port-forward to the control plane and start watching the service's // outbound policy. let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(client).await; loop { - match policy_api.watch_ip(ns, &resource.ip(), port).await { + match policy_api.watch_ip(ns, ip, port).await { Ok(rx) => return rx, Err(error) => { - tracing::error!( - ?error, - ns, - resource = resource.name(), - "failed to watch outbound policy for port 4191" - ); + tracing::error!(?error, ns, ip, port, "failed to watch outbound policy"); time::sleep(Duration::from_secs(1)).await; } } @@ -171,300 +166,47 @@ pub fn failure_accrual_consecutive( } #[track_caller] -pub fn route_backends_first_available( - route: &grpc::outbound::HttpRoute, -) -> &[grpc::outbound::http_route::RouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::http_route::distribution::Kind::FirstAvailable(fa) => &fa.backends, - _ => panic!("Distribution must be FirstAvailable"), - } -} - -#[track_caller] -pub fn tls_route_backends_first_available( - route: &grpc::outbound::TlsRoute, -) -> &[grpc::outbound::tls_route::RouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::tls_route::distribution::Kind::FirstAvailable(fa) => &fa.backends, - _ => panic!("Distribution must be FirstAvailable"), - } -} - -#[track_caller] -pub fn route_backends_random_available( - route: &grpc::outbound::HttpRoute, -) -> &[grpc::outbound::http_route::WeightedRouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::http_route::distribution::Kind::RandomAvailable(dist) => &dist.backends, - _ => panic!("Distribution must be RandomAvailable"), - } -} - -#[track_caller] -pub fn tls_route_backends_random_available( - route: &grpc::outbound::TlsRoute, -) -> &[grpc::outbound::tls_route::WeightedRouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::tls_route::distribution::Kind::RandomAvailable(dist) => &dist.backends, - _ => panic!("Distribution must be RandomAvailable"), - } -} - -#[track_caller] -pub fn tcp_route_backends_random_available( - route: &grpc::outbound::OpaqueRoute, -) -> &[grpc::outbound::opaque_route::WeightedRouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::opaque_route::distribution::Kind::RandomAvailable(dist) => &dist.backends, - _ => panic!("Distribution must be RandomAvailable"), - } -} - -#[track_caller] -pub fn route_name(route: &grpc::outbound::HttpRoute) -> &str { - match route.metadata.as_ref().unwrap().kind.as_ref().unwrap() { - grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { ref name, .. }) => name, - _ => panic!("route must be a resource kind"), - } -} - -#[track_caller] -pub fn tls_route_name(route: &grpc::outbound::TlsRoute) -> &str { - match route.metadata.as_ref().unwrap().kind.as_ref().unwrap() { - grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { ref name, .. }) => name, - _ => panic!("route must be a resource kind"), - } -} - -#[track_caller] -pub fn tcp_route_name(route: &grpc::outbound::OpaqueRoute) -> &str { - match route.metadata.as_ref().unwrap().kind.as_ref().unwrap() { - grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { ref name, .. }) => name, - _ => panic!("route must be a resource kind"), - } -} - -#[track_caller] -pub fn assert_backend_has_failure_filter( - backend: &grpc::outbound::http_route::WeightedRouteBackend, +pub fn assert_route_is_default( + route: &R::Route, + parent: &ParentReference, + port: u16, ) { - let filter = assert_singleton(&backend.backend.as_ref().unwrap().filters); - match filter.kind.as_ref().unwrap() { - grpc::outbound::http_route::filter::Kind::FailureInjector(_) => {} - _ => panic!("backend must have FailureInjector filter"), - }; -} - -#[track_caller] -pub fn assert_route_is_default(route: &grpc::outbound::HttpRoute, parent: &Resource, port: u16) { - let kind = route.metadata.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { - grpc::meta::metadata::Kind::Default(_) => {} - grpc::meta::metadata::Kind::Resource(r) => { - panic!("route expected to be default but got resource {r:?}") - } - } - - let backends = route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend, parent, port); - - let rule = assert_singleton(&route.rules); - let route_match = assert_singleton(&rule.matches); - let path_match = route_match.path.as_ref().unwrap().kind.as_ref().unwrap(); - assert_eq!( - *path_match, - grpc::http_route::path_match::Kind::Prefix("/".to_string()) - ); -} + let rules = &R::rules_first_available(route); + let backends = assert_singleton(rules); + let backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference(backend, parent, port); -#[track_caller] -pub fn assert_tls_route_is_default(route: &grpc::outbound::TlsRoute, parent: &Resource, port: u16) { - let kind = route.metadata.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { + let route_meta = R::extract_meta(route); + match route_meta.kind.as_ref().unwrap() { grpc::meta::metadata::Kind::Default(_) => {} grpc::meta::metadata::Kind::Resource(r) => { panic!("route expected to be default but got resource {r:?}") } } - - let backends = tls_route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_tls_backend_matches_parent(backend, parent, port); - assert_singleton(&route.rules); } #[track_caller] -pub fn assert_backend_matches_parent( - backend: &grpc::outbound::http_route::RouteBackend, - parent: &Resource, +pub fn assert_backend_matches_reference( + backend: &grpc::outbound::Backend, + obj_ref: &ParentReference, port: u16, ) { - let backend = backend.backend.as_ref().unwrap(); - - match parent { - Resource::Service(svc) => { - let dst = match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Balancer(balance) => { - let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { - grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, - } - } - grpc::outbound::backend::Kind::Forward(_) => { - panic!("service default route backend must be Balancer") - } - }; - assert_eq!( - *dst, - format!( - "{}.{}.svc.{}:{}", - svc.name_unchecked(), - svc.namespace().unwrap(), - "cluster.local", - port - ) - ); + let mut group = obj_ref.group.as_deref(); + if group == Some("") { + group = Some("core"); + } + match backend.metadata.as_ref().unwrap().kind.as_ref().unwrap() { + grpc::meta::metadata::Kind::Resource(resource) => { + assert_eq!(resource.name, obj_ref.name); + assert_eq!(Some(&resource.namespace), obj_ref.namespace.as_ref()); + assert_eq!(Some(resource.group.as_str()), group); + assert_eq!(Some(&resource.kind), obj_ref.kind.as_ref()); + assert_eq!(resource.port, u32::from(port)); } - - Resource::EgressNetwork(_) => { - match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Forward(_) => {} - grpc::outbound::backend::Kind::Balancer(_) => { - panic!("egress net default route backend must be Forward") - } - }; + grpc::meta::metadata::Kind::Default(_) => { + panic!("backend expected to be resource but got default") } } - - assert_resource_meta(&backend.metadata, parent, port) -} - -#[track_caller] -pub fn assert_tls_backend_matches_parent( - backend: &grpc::outbound::tls_route::RouteBackend, - parent: &Resource, - port: u16, -) { - let backend = backend.backend.as_ref().unwrap(); - - match parent { - Resource::Service(svc) => { - let dst = match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Balancer(balance) => { - let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { - grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, - } - } - grpc::outbound::backend::Kind::Forward(_) => { - panic!("service default route backend must be Balancer") - } - }; - assert_eq!( - *dst, - format!( - "{}.{}.svc.{}:{}", - svc.name_unchecked(), - svc.namespace().unwrap(), - "cluster.local", - port - ) - ); - } - - Resource::EgressNetwork(_) => { - match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Forward(_) => {} - grpc::outbound::backend::Kind::Balancer(_) => { - panic!("egress net default route backend must be Forward") - } - }; - } - } - - assert_resource_meta(&backend.metadata, parent, port) -} - -#[track_caller] -pub fn assert_tcp_backend_matches_parent( - backend: &grpc::outbound::opaque_route::RouteBackend, - parent: &Resource, - port: u16, -) { - let backend = backend.backend.as_ref().unwrap(); - - match parent { - Resource::Service(svc) => { - let dst = match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Balancer(balance) => { - let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { - grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, - } - } - grpc::outbound::backend::Kind::Forward(_) => { - panic!("service default route backend must be Balancer") - } - }; - assert_eq!( - *dst, - format!( - "{}.{}.svc.{}:{}", - svc.name_unchecked(), - svc.namespace().unwrap(), - "cluster.local", - port - ) - ); - } - - Resource::EgressNetwork(_) => { - match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Forward(_) => {} - grpc::outbound::backend::Kind::Balancer(_) => { - panic!("egress net default route backend must be Forward") - } - }; - } - } - - assert_resource_meta(&backend.metadata, parent, port) } #[track_caller] @@ -472,40 +214,3 @@ pub fn assert_singleton(ts: &[T]) -> &T { assert_eq!(ts.len(), 1); ts.first().unwrap() } - -#[track_caller] -pub fn assert_route_attached<'a, T>(routes: &'a [T], parent: &Resource) -> &'a T { - match parent { - Resource::EgressNetwork(_) => { - assert_eq!(routes.len(), 2); - routes.first().unwrap() - } - Resource::Service(_) => assert_singleton(routes), - } -} - -#[track_caller] -pub fn assert_route_name_eq(route: &grpc::outbound::HttpRoute, name: &str) { - assert_name_eq(route.metadata.as_ref().unwrap(), name) -} - -#[track_caller] -pub fn assert_tls_route_name_eq(route: &grpc::outbound::TlsRoute, name: &str) { - assert_name_eq(route.metadata.as_ref().unwrap(), name) -} - -#[track_caller] -pub fn assert_tcp_route_name_eq(route: &grpc::outbound::OpaqueRoute, name: &str) { - assert_name_eq(route.metadata.as_ref().unwrap(), name) -} - -#[track_caller] -pub fn assert_name_eq(meta: &grpc::meta::Metadata, name: &str) { - let kind = meta.kind.as_ref().unwrap(); - match kind { - grpc::meta::metadata::Kind::Default(d) => { - panic!("route expected to not be default, but got default {d:?}") - } - grpc::meta::metadata::Kind::Resource(resource) => assert_eq!(resource.name, *name), - } -} diff --git a/policy-test/src/test_route.rs b/policy-test/src/test_route.rs new file mode 100644 index 0000000000000..7f895d82cbe43 --- /dev/null +++ b/policy-test/src/test_route.rs @@ -0,0 +1,831 @@ +use k8s_gateway_api::{self as gateway, BackendRef, ParentReference}; +use k8s_openapi::Resource; +use linkerd2_proxy_api::{meta, meta::Metadata, outbound}; +use linkerd_policy_controller_k8s_api::{ + self as k8s, policy, Condition, Resource as _, ResourceExt, +}; + +use crate::outbound_api::{detect_http_routes, grpc_routes, tcp_routes, tls_routes}; + +pub trait TestRoute: + kube::Resource + + serde::Serialize + + serde::de::DeserializeOwned + + Clone + + std::fmt::Debug + + Send + + Sync + + 'static +{ + type Route; + type Backend; + type Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self; + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[Self::Route]); + fn parents_mut(&mut self) -> Vec<&mut ParentReference>; + fn extract_meta(route: &Self::Route) -> &Metadata; + fn backend_filters(backend: &Self::Backend) -> Vec<&Self::Filter>; + fn rules_first_available(route: &Self::Route) -> Vec>; + fn rules_random_available(route: &Self::Route) -> Vec>; + fn backend(backend: &Self::Backend) -> &outbound::Backend; + fn conditions(&self) -> Option>; + fn is_failure_filter(filter: &Self::Filter) -> bool; + + fn meta_eq(&self, meta: &Metadata) -> bool { + let meta = match &meta.kind { + Some(meta::metadata::Kind::Resource(r)) => r, + _ => return false, + }; + let dt = Default::default(); + self.meta().name.as_ref() == Some(&meta.name) + && self.meta().namespace.as_ref() == Some(&meta.namespace) + && Self::kind(&dt) == meta.kind + && Self::group(&dt) == meta.group + } +} + +#[allow(async_fn_in_trait)] +pub trait TestParent: + kube::Resource + + serde::Serialize + + serde::de::DeserializeOwned + + Clone + + std::fmt::Debug + + Send + + Sync +{ + fn make_parent(ns: impl ToString) -> Self; + fn make_backend(ns: impl ToString) -> Option; + fn conditions(&self) -> Vec<&Condition>; + fn obj_ref(&self) -> ParentReference; + fn backend_ref(&self, port: u16) -> gateway::BackendRef { + let dt = Default::default(); + gateway::BackendRef { + weight: None, + inner: gateway::BackendObjectReference { + group: Some(Self::group(&dt).to_string()), + kind: Some(Self::kind(&dt).to_string()), + name: self.name_unchecked(), + namespace: self.namespace(), + port: Some(port), + }, + } + } + fn ip(&self) -> &str; +} + +impl TestRoute for gateway::HttpRoute { + type Route = outbound::HttpRoute; + type Backend = outbound::http_route::RouteBackend; + type Filter = outbound::http_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| { + let backends = backends + .into_iter() + .map(|backend| gateway::HttpBackendRef { + backend_ref: Some(backend), + filters: None, + }) + .collect(); + gateway::HttpRouteRule { + matches: Some(vec![]), + filters: None, + backend_refs: Some(backends), + } + }) + .collect(); + gateway::HttpRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: gateway::HttpRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + hostnames: None, + rules: Some(rules), + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::HttpRoute]), + { + detect_http_routes(config, f); + } + + fn extract_meta(route: &outbound::HttpRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::http_route::RouteBackend, + ) -> Vec<&outbound::http_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::HttpRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::http_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::HttpRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::http_route::distribution::Kind::RandomAvailable(random_available) => { + random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::http_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .flat_map(|parent_status| &parent_status.conditions) + .collect() + }) + } + + fn is_failure_filter(filter: &outbound::http_route::Filter) -> bool { + matches!( + filter.kind.as_ref().unwrap(), + outbound::http_route::filter::Kind::FailureInjector(_) + ) + } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } +} + +impl TestRoute for policy::HttpRoute { + type Route = outbound::HttpRoute; + type Backend = outbound::http_route::RouteBackend; + type Filter = outbound::http_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| { + let backends = backends + .into_iter() + .map(|backend| gateway::HttpBackendRef { + backend_ref: Some(backend), + filters: None, + }) + .collect(); + policy::httproute::HttpRouteRule { + matches: Some(vec![]), + filters: None, + timeouts: None, + backend_refs: Some(backends), + } + }) + .collect(); + policy::HttpRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: policy::HttpRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + hostnames: None, + rules: Some(rules), + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::HttpRoute]), + { + detect_http_routes(config, f); + } + + fn extract_meta(route: &outbound::HttpRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::http_route::RouteBackend, + ) -> Vec<&outbound::http_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::HttpRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::http_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::HttpRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::http_route::distribution::Kind::RandomAvailable(random_available) => { + random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::http_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .flat_map(|parent_status| &parent_status.conditions) + .collect() + }) + } + + fn is_failure_filter(filter: &outbound::http_route::Filter) -> bool { + matches!( + filter.kind.as_ref().unwrap(), + outbound::http_route::filter::Kind::FailureInjector(_) + ) + } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } +} + +impl TestRoute for gateway::GrpcRoute { + type Route = outbound::GrpcRoute; + type Backend = outbound::grpc_route::RouteBackend; + type Filter = outbound::grpc_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| { + let backends = backends + .into_iter() + .map(|backend| gateway::GrpcRouteBackendRef { + filters: None, + inner: backend.inner, + weight: None, + }) + .collect(); + gateway::GrpcRouteRule { + matches: Some(vec![]), + filters: None, + backend_refs: Some(backends), + } + }) + .collect(); + gateway::GrpcRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: gateway::GrpcRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + hostnames: None, + rules: Some(rules), + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::GrpcRoute]), + { + f(grpc_routes(config)); + } + + fn extract_meta(route: &outbound::GrpcRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::grpc_route::RouteBackend, + ) -> Vec<&outbound::grpc_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::GrpcRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::grpc_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::GrpcRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::grpc_route::distribution::Kind::RandomAvailable(random_available) => { + random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::grpc_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .flat_map(|parent_status| &parent_status.conditions) + .collect() + }) + } + + fn is_failure_filter(filter: &outbound::grpc_route::Filter) -> bool { + matches!( + filter.kind.as_ref().unwrap(), + outbound::grpc_route::filter::Kind::FailureInjector(_) + ) + } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } +} + +impl TestRoute for gateway::TlsRoute { + type Route = outbound::TlsRoute; + type Backend = outbound::tls_route::RouteBackend; + type Filter = outbound::tls_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| gateway::TlsRouteRule { + backend_refs: backends, + }) + .collect(); + gateway::TlsRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: gateway::TlsRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + hostnames: None, + rules, + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::TlsRoute]), + { + f(tls_routes(config)); + } + + fn extract_meta(route: &outbound::TlsRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::tls_route::RouteBackend, + ) -> Vec<&outbound::tls_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::TlsRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::tls_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::TlsRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::tls_route::distribution::Kind::RandomAvailable(random_available) => { + random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::tls_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .flat_map(|parent_status| &parent_status.conditions) + .collect() + }) + } + + fn is_failure_filter(filter: &outbound::tls_route::Filter) -> bool { + matches!( + filter.kind.as_ref().unwrap(), + outbound::tls_route::filter::Kind::Invalid(_) + ) + } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } +} + +impl TestRoute for gateway::TcpRoute { + type Route = outbound::OpaqueRoute; + type Backend = outbound::opaque_route::RouteBackend; + type Filter = outbound::opaque_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| gateway::TcpRouteRule { + backend_refs: backends, + }) + .collect(); + gateway::TcpRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: gateway::TcpRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + rules, + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::OpaqueRoute]), + { + f(tcp_routes(config)); + } + + fn extract_meta(route: &outbound::OpaqueRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::opaque_route::RouteBackend, + ) -> Vec<&outbound::opaque_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::OpaqueRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::opaque_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::OpaqueRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::opaque_route::distribution::Kind::RandomAvailable( + random_available, + ) => random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect(), + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::opaque_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .flat_map(|parent_status| &parent_status.conditions) + .collect() + }) + } + + fn is_failure_filter(filter: &outbound::opaque_route::Filter) -> bool { + matches!( + filter.kind.as_ref().unwrap(), + outbound::opaque_route::filter::Kind::Invalid(_) + ) + } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } +} + +impl TestParent for k8s::Service { + fn make_parent(ns: impl ToString) -> Self { + k8s::Service { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("my-svc".to_string()), + ..Default::default() + }, + spec: Some(k8s::ServiceSpec { + ports: Some(vec![k8s::ServicePort { + port: 4191, + ..Default::default() + }]), + ..Default::default() + }), + ..k8s::Service::default() + } + } + + fn make_backend(ns: impl ToString) -> Option { + let service = k8s::Service { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("backend".to_string()), + ..Default::default() + }, + spec: Some(k8s::ServiceSpec { + ports: Some(vec![k8s::ServicePort { + port: 4191, + ..Default::default() + }]), + ..Default::default() + }), + ..k8s::Service::default() + }; + Some(service) + } + + fn conditions(&self) -> Vec<&Condition> { + self.status + .as_ref() + .unwrap() + .conditions + .as_ref() + .unwrap() + .iter() + .collect() + } + + fn obj_ref(&self) -> ParentReference { + ParentReference { + kind: Some(k8s::Service::KIND.to_string()), + name: self.name_unchecked(), + namespace: self.namespace(), + group: Some(k8s::Service::GROUP.to_string()), + section_name: None, + port: Some(4191), + } + } + + fn ip(&self) -> &str { + self.spec.as_ref().unwrap().cluster_ip.as_ref().unwrap() + } +} + +impl TestParent for policy::EgressNetwork { + fn make_parent(ns: impl ToString) -> Self { + policy::EgressNetwork { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("my-egress".to_string()), + ..Default::default() + }, + spec: policy::EgressNetworkSpec { + networks: None, + traffic_policy: policy::egress_network::TrafficPolicy::Allow, + }, + status: None, + } + } + + fn make_backend(_ns: impl ToString) -> Option { + None + } + + fn conditions(&self) -> Vec<&Condition> { + self.status.as_ref().unwrap().conditions.iter().collect() + } + + fn obj_ref(&self) -> ParentReference { + ParentReference { + kind: Some(policy::EgressNetwork::kind(&()).to_string()), + name: self.name_unchecked(), + namespace: self.namespace(), + group: Some(policy::EgressNetwork::group(&()).to_string()), + section_name: None, + port: Some(4191), + } + } + + fn ip(&self) -> &str { + // For EgressNetwork, we can just return a non-private + // IP address as our default cluster setup dictates that + // all non-private networks are considered egress. Since + // we do not modify this setting in tests for the time being, + // returning 1.1.1.1 is fine. + "1.1.1.1" + } +} diff --git a/policy-test/tests/inbound_http_route_status.rs b/policy-test/tests/inbound_http_route_status.rs index 51166aeed0c6e..b37030c05e50a 100644 --- a/policy-test/tests/inbound_http_route_status.rs +++ b/policy-test/tests/inbound_http_route_status.rs @@ -1,8 +1,7 @@ use kube::ResourceExt; use linkerd_policy_controller_k8s_api as k8s; use linkerd_policy_test::{ - await_condition, await_route_status, create, find_route_condition, mk_route, update, - with_temp_ns, + await_condition, create, find_route_condition, mk_route, update, with_temp_ns, }; #[tokio::test(flavor = "current_thread")] @@ -349,3 +348,23 @@ async fn inbound_accepted_reconcile_parent_delete() { }) .await; } + +// Waits until an HttpRoute with the given namespace and name has a status set +// on it, then returns the generic route status representation. +async fn await_route_status( + client: &kube::Client, + ns: &str, + name: &str, +) -> k8s::policy::httproute::RouteStatus { + use k8s::policy::httproute as api; + let route_status = await_condition(client, ns, name, |obj: Option<&api::HttpRoute>| -> bool { + obj.and_then(|route| route.status.as_ref()).is_some() + }) + .await + .expect("must fetch route") + .status + .expect("route must contain a status representation") + .inner; + tracing::trace!(?route_status, name, ns, "got route status"); + route_status +} diff --git a/policy-test/tests/outbound_api.rs b/policy-test/tests/outbound_api.rs new file mode 100644 index 0000000000000..9de3fe820663d --- /dev/null +++ b/policy-test/tests/outbound_api.rs @@ -0,0 +1,1120 @@ +use futures::{FutureExt, StreamExt}; +use k8s_gateway_api::{self as gateway}; +use linkerd_policy_controller_k8s_api::{self as k8s, policy}; +use linkerd_policy_test::{ + assert_resource_meta, await_route_accepted, create, create_cluster_scoped, + delete_cluster_scoped, grpc, + outbound_api::{ + assert_backend_matches_reference, assert_route_is_default, assert_singleton, + retry_watch_outbound_policy, + }, + test_route::{TestParent, TestRoute}, + update, with_temp_ns, +}; +use maplit::{btreemap, convert_args}; + +#[tokio::test(flavor = "current_thread")] +async fn parent_does_not_exist() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + let port = 4191; + // Some IP address in the cluster networks which we assume is not + // used. + let ip = "10.8.255.255"; + + let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; + let rsp: Result, tonic::Status> = + policy_api.watch_ip(&ns, ip, port).await; + + assert!(rsp.is_err()); + assert_eq!(rsp.err().unwrap().code(), tonic::Code::NotFound); + }) + .await + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn parent_with_no_routes() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + let port = 4191; + // Create a parent with no routes. + // let parent = P::create_parent(&client.clone(), &ns).await; + let parent = create(&client, P::make_parent(&ns)).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn route_with_no_rules() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let route = create( + &client, + R::make_route(ns.clone(), vec![parent.obj_ref()], vec![]), + ) + .await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with no rules. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + let rules = &R::rules_first_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + assert!(rules.is_empty()); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn routes_without_backends() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // Create a route with one rule with no backends. + let route = create( + &client, + R::make_route(ns.clone(), vec![parent.obj_ref()], vec![vec![]]), + ) + .await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with the logical backend. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + let rules = &R::rules_first_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + let backends = assert_singleton(rules); + let backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference(backend, &parent.obj_ref(), port); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn routes_with_backend() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let route = create( + &client, + R::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with a backend with no filters. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + let rules = &R::rules_random_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + let backends = assert_singleton(rules); + + let filters = R::backend_filters(*assert_singleton(backends)); + assert!(filters.is_empty()); + + let outbound_backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference( + outbound_backend, + &backend.obj_ref(), + backend_port, + ); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_with_routes_with_cross_namespace_backend() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let backend_ns_name = format!("{}-backend", ns); + let backend_ns = create_cluster_scoped( + &client, + k8s::Namespace { + metadata: k8s::ObjectMeta { + name: Some(backend_ns_name.clone()), + labels: Some(convert_args!(btreemap!( + "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), + ))), + ..Default::default() + }, + ..Default::default() + }, + ) + .await; + + // Create a cross namespace backend + let backend_port = 8888; + let backend = match P::make_backend(&backend_ns_name) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + let route = create( + &client, + R::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with a backend with no filters. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + let rules = &R::rules_random_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + let backends = assert_singleton(rules); + + let filters = R::backend_filters(*assert_singleton(backends)); + assert!(filters.is_empty()); + + let outbound_backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference( + outbound_backend, + &backend.obj_ref(), + backend_port, + ); + }); + + delete_cluster_scoped(&client, backend_ns).await + }) + .await + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn routes_with_invalid_backend() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let backend_port = 8888; + let mut backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + backend.meta_mut().name = Some("invalid".to_string()); + let route = create( + &client, + R::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with a backend with a failure filter. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + let rules = &R::rules_random_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + let backends = assert_singleton(rules); + + let filters = R::backend_filters(*assert_singleton(backends)); + let filter = assert_singleton(&filters); + assert!(R::is_failure_filter(filter)); + + let outbound_backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference( + outbound_backend, + &backend.obj_ref(), + backend_port, + ); + }); + }) + .await + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn multiple_routes() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // Routes should be returned in sorted order by creation timestamp then + // name. To ensure that this test isn't timing dependant, routes should + // be created in alphabetical order. + let mut route_a = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_a.meta_mut().name = Some("a-route".to_string()); + let route_a = create(&client, route_a).await; + await_route_accepted(&client, &route_a).await; + + // First route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + let mut route_b = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_b.meta_mut().name = Some("b-route".to_string()); + let route_b = create(&client, route_b).await; + await_route_accepted(&client, &route_b).await; + + // Second route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + R::routes(&config, |routes| { + assert!(route_a.meta_eq(R::extract_meta(&routes[0]))); + assert!(route_b.meta_eq(R::extract_meta(&routes[1]))); + }); + }) + .await + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn opaque_service() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "config.linkerd.io/opaque-ports".to_string() => port.to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // Proxy protocol should be opaque. + match config.protocol.unwrap().kind.unwrap() { + grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} + _ => panic!("proxy protocol must be Opaque"), + }; + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn route_with_no_port() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let port_a = 4191; + let port_b = 9999; + + let mut rx_a = retry_watch_outbound_policy(&client, &ns, parent.ip(), port_a).await; + let config_a = rx_a + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config_a); + + let mut rx_b = retry_watch_outbound_policy(&client, &ns, parent.ip(), port_b).await; + let config_b = rx_b + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config_b); + + // There should be a default route. + gateway::HttpRoute::routes(&config_a, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port_a); + }); + gateway::HttpRoute::routes(&config_b, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port_b); + }); + + // Create a route with no port in the parent_ref. + let mut parent_ref = parent.obj_ref(); + parent_ref.port = None; + let route = create( + &client, + R::make_route( + ns.clone(), + vec![parent_ref], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let config_a = rx_a + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config_a); + assert_resource_meta(&config_a.metadata, parent.obj_ref(), port_a); + + let config_b = rx_b + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config_b); + assert_resource_meta(&config_b.metadata, parent.obj_ref(), port_b); + + // The route should apply to both ports. + R::routes(&config_a, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + R::routes(&config_b, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn producer_route() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut producer_rx = + retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + assert_resource_meta(&producer_config.metadata, parent.obj_ref(), port); + + let mut consumer_rx = + retry_watch_outbound_policy(&client, "consumer_ns", parent.ip(), port).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&producer_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + gateway::HttpRoute::routes(&consumer_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // A route created in the same namespace as its parent service is called + // a producer route. It should be returned in outbound policy requests + // for that service from ALL namespaces. + let route = create( + &client, + R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?producer_config); + assert_resource_meta(&producer_config.metadata, parent.obj_ref(), port); + + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + // The route should be returned in queries from the producer namespace. + R::routes(&producer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + // The route should be returned in queries from a consumer namespace. + R::routes(&consumer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn pre_existing_producer_route() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + // We test the scenario where outbound policy watches are initiated after + // a produce route already exists. + with_temp_ns(|client, ns| async move { + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + // A route created in the same namespace as its parent service is called + // a producer route. It should be returned in outbound policy requests + // for that service from ALL namespaces. + let route = create( + &client, + R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let mut producer_rx = + retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + assert_resource_meta(&producer_config.metadata, parent.obj_ref(), port); + + let mut consumer_rx = + retry_watch_outbound_policy(&client, "consumer_ns", parent.ip(), port).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + // The route should be returned in queries from the producer namespace. + R::routes(&producer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + // The route should be returned in queries from a consumer namespace. + R::routes(&consumer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consumer_route() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let consumer_ns_name = format!("{}-consumer", ns); + let consumer_ns = create_cluster_scoped( + &client, + k8s::Namespace { + metadata: k8s::ObjectMeta { + name: Some(consumer_ns_name.clone()), + labels: Some(convert_args!(btreemap!( + "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), + ))), + ..Default::default() + }, + ..Default::default() + }, + ) + .await; + + let mut producer_rx = + retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + assert_resource_meta(&producer_config.metadata, parent.obj_ref(), port); + + let mut consumer_rx = + retry_watch_outbound_policy(&client, &consumer_ns_name, parent.ip(), port).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + let mut other_rx = + retry_watch_outbound_policy(&client, "other_ns", parent.ip(), port).await; + let other_config = other_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?other_config); + assert_resource_meta(&other_config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&producer_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + gateway::HttpRoute::routes(&consumer_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + gateway::HttpRoute::routes(&other_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // A route created in a different namespace as its parent service is + // called a consumer route. It should be returned in outbound policy + // requests for that service ONLY when the request comes from the + // consumer namespace. + let route = create( + &client, + R::make_route( + consumer_ns_name.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + // The route should NOT be returned in queries from the producer namespace. + // There should be a default route. + assert!(producer_rx.next().now_or_never().is_none()); + + // The route should be returned in queries from the same consumer + // namespace. + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + R::routes(&consumer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + + // The route should NOT be returned in queries from a different consumer + // namespace. + assert!(other_rx.next().now_or_never().is_none()); + + delete_cluster_scoped(&client, consumer_ns).await; + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn route_reattachment() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut route = create( + &client, + R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // The route should be attached. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + + // Detatch route. + route.parents_mut().first_mut().unwrap().name = "other".to_string(); + update(&client, route.clone()).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // The route should be unattached and the default route should be present. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // Reattach route. + route.parents_mut().first_mut().unwrap().name = parent.meta().name.clone().unwrap(); + update(&client, route.clone()).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // The route should be attached again. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} diff --git a/policy-test/tests/outbound_api_failure_accrual.rs b/policy-test/tests/outbound_api_failure_accrual.rs new file mode 100644 index 0000000000000..26ea11c33bb0e --- /dev/null +++ b/policy-test/tests/outbound_api_failure_accrual.rs @@ -0,0 +1,244 @@ +use std::time::Duration; + +use futures::StreamExt; +use linkerd_policy_controller_k8s_api::{self as k8s, policy}; +use linkerd_policy_test::{ + assert_default_accrual_backoff, assert_resource_meta, create, grpc, + outbound_api::{ + detect_failure_accrual, failure_accrual_consecutive, retry_watch_outbound_policy, + }, + test_route::TestParent, + with_temp_ns, +}; +use maplit::btreemap; + +#[tokio::test(flavor = "current_thread")] +async fn consecutive_failure_accrual() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string() => "8".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string() => "10s".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string() => "10m".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string() => "1.0".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(8, consecutive.max_failures); + assert_eq!( + &grpc::outbound::ExponentialBackoff { + min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), + max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), + jitter_ratio: 1.0_f32, + }, + consecutive + .backoff + .as_ref() + .expect("backoff must be configured") + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consecutive_failure_accrual_defaults_no_config() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a service configured to do consecutive failure accrual, but + // with no additional configuration + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // Expect default max_failures and default backoff + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(7, consecutive.max_failures); + assert_default_accrual_backoff!(consecutive + .backoff + .as_ref() + .expect("backoff must be configured")); + }); + }) + .await + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consecutive_failure_accrual_defaults_max_fails() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a service configured to do consecutive failure accrual with + // max number of failures and with default backoff + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string() => "8".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // Expect default backoff and overridden max_failures + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(8, consecutive.max_failures); + assert_default_accrual_backoff!(consecutive + .backoff + .as_ref() + .expect("backoff must be configured")); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consecutive_failure_accrual_defaults_jitter() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a service configured to do consecutive failure accrual with + // max number of failures and with default backoff + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string() => "1.0".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // Expect defaults for everything except for the jitter ratio + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(7, consecutive.max_failures); + assert_eq!( + &grpc::outbound::ExponentialBackoff { + min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), + max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), + jitter_ratio: 1.0_f32, + }, + consecutive + .backoff + .as_ref() + .expect("backoff must be configured") + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn default_failure_accrual() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create Service with consecutive failure accrual config for + // max_failures but no mode + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string() => "8".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // Expect failure accrual config to be default (no failure accrual) + detect_failure_accrual(&config, |accrual| { + assert!( + accrual.is_none(), + "consecutive failure accrual should not be configured for service" + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs deleted file mode 100644 index 1e96c40f92b9c..0000000000000 --- a/policy-test/tests/outbound_api_gateway.rs +++ /dev/null @@ -1,2025 +0,0 @@ -use futures::prelude::*; -use kube::ResourceExt; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - assert_default_accrual_backoff, assert_resource_meta, assert_status_accepted, - await_egress_net_status, await_gateway_route_status, create, create_annotated_egress_network, - create_annotated_service, create_cluster_scoped, create_egress_network, - create_opaque_egress_network, create_opaque_service, create_service, delete_cluster_scoped, - grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -}; -use maplit::{btreemap, convert_args}; -use std::{collections::BTreeMap, time::Duration}; - -// These tests are copies of the tests in outbound_api_gateway.rs but using the -// policy.linkerd.io HttpRoute kubernetes types instead of the Gateway API ones. -// These two files should be kept in sync to ensure that Linkerd can read and -// function correctly with both types of resources. - -#[tokio::test(flavor = "current_thread")] -async fn service_does_not_exist() { - with_temp_ns(|client, ns| async move { - // Build a service but don't apply it to the cluster. - let mut svc = mk_service(&ns, "my-svc", 4191); - // Give it a bogus cluster ip. - svc.spec.as_mut().unwrap().cluster_ip = Some("192.168.0.2".to_string()); - - let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; - let rsp = policy_api.watch(&ns, &svc, 4191).await; - - assert!(rsp.is_err()); - assert_eq!(rsp.err().unwrap().code(), tonic::Code::NotFound); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_no_http_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_no_http_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_no_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_no_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_route_without_rules() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_http_route_without_rules(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_route_without_rules() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_route_without_rules(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_without_backends() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_http_routes_without_backends(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_without_backends() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_routes_without_backends(Resource::EgressNetwork(egress), &client, &ns) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - parent_with_http_routes_with_backend( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_routes_with_backend( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_cross_namespace_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_ns_name = format!("{}-backend", ns); - let backend_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(backend_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - let backend_name = "backend"; - let backend_svc = - Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); - let backends = [backend_svc.clone()]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( - Some(&backends), - Some(backend_ns_name), - None, - ); - let _route = create(&client, route.build()).await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); - - delete_cluster_scoped(&client, backend_ns).await - }) - .await; -} - -// TODO: Test fails until handling of invalid backends is implemented. -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "invalid", 4191); - - parent_with_http_routes_with_invalid_backend( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -// TODO: Test fails until handling of invalid backends is implemented. -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - let backend = mk_egress_net(&ns, "invalid"); - - parent_with_http_routes_with_invalid_backend( - Resource::EgressNetwork(egress), - Resource::EgressNetwork(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -// TODO: Investigate why the policy controller is only returning one route in this -// case instead of two. -#[tokio::test(flavor = "current_thread")] -async fn service_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -// TODO: Investigate why the policy controller is only returning one route in this -// case instead of two. -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual() { - with_temp_ns(|client, ns| async move { - let svc = create_annotated_service( - &client, - &ns, - "consecutive-accrual-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), - "10s".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), - "10m".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual() { - with_temp_ns(|client, ns| async move { - let egress = create_annotated_egress_network( - &client, - &ns, - "consecutive-accrual-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), - "10s".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), - "10m".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_no_config() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual, but - // with no additional configuration - let svc_no_config = create_annotated_service( - &client, - &ns, - "default-accrual-svc", - 80, - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - )]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_no_config( - Resource::Service(svc_no_config), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_max_fails() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual with - // max number of failures and with default backoff - let svc_max_fails = create_annotated_service( - &client, - &ns, - "no-backoff-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_max_fails( - Resource::Service(svc_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_jitter() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual with - // only the jitter ratio configured in the backoff - let svc_jitter = create_annotated_service( - &client, - &ns, - "only-jitter-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_max_jitter( - Resource::Service(svc_jitter), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { - with_temp_ns(|client, ns| async move { - // Create a egress network configured to do consecutive failure accrual, but - // with no additional configuration - let egress_no_config = create_annotated_egress_network( - &client, - &ns, - "default-accrual-egress", - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - )]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "default-accrual-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_no_config( - Resource::EgressNetwork(egress_no_config), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { - with_temp_ns(|client, ns| async move { - // Create a egress network configured to do consecutive failure accrual with - // max number of failures and with default backoff - let egress_max_fails = create_annotated_egress_network( - &client, - &ns, - "no-backoff-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "no-backoff-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_max_fails( - Resource::EgressNetwork(egress_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { - with_temp_ns(|client, ns| async move { - // Create an egress net configured to do consecutive failure accrual with - // only the jitter ratio configured in the backoff - let egress_jitter = create_annotated_egress_network( - &client, - &ns, - "only-jitter-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "only-jitter-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_max_jitter( - Resource::EgressNetwork(egress_jitter), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_default_failure_accrual() { - with_temp_ns(|client, ns| async move { - // Default config for Service, no failure accrual - let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; - - // Create Service with consecutive failure accrual config for - // max_failures but no mode - let svc_max_fails = create_annotated_service( - &client, - &ns, - "default-max-failure-svc", - 80, - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - )]), - ) - .await; - - parent_with_default_failure_accrual( - Resource::Service(svc_default), - Resource::Service(svc_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_default_failure_accrual() { - with_temp_ns(|client, ns| async move { - // Default config for EgressNetwork, no failure accrual - let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; - let status = await_egress_net_status(&client, &ns, "default-failure-accrual").await; - assert_status_accepted(status.conditions); - - // Create EgressNetwork with consecutive failure accrual config for - // max_failures but no mode - let egress_max_fails = create_annotated_egress_network( - &client, - &ns, - "default-max-failure-egress", - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - )]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "default-max-failure-egress").await; - assert_status_accepted(status.conditions); - - parent_with_default_failure_accrual( - Resource::EgressNetwork(egress_default), - Resource::EgressNetwork(egress_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn opaque_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; - opaque_parent(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn opaque_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; - opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn route_with_filters_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "backend", 4191); - - route_with_filters( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn route_with_filters_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - route_with_filters( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn backend_with_filters_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - backend_with_filters( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn backend_with_filters_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - backend_with_filters( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_with_no_port() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config_4191); - - let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config_9999); - - // There should be a default route. - detect_http_routes(&config_4191, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&config_9999, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 9999); - }); - - let _route = create(&client, mk_http_route(&ns, "foo-route", &svc, None).build()).await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_4191); - - // The route should apply to the service. - detect_http_routes(&config_4191, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_9999); - - // The route should apply to other ports too. - detect_http_routes(&config_9999, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn producer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // There should be a default route. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?producer_config); - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should be returned in queries from a consumer namespace. - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn pre_existing_producer_route() { - // We test the scenario where outbound policy watches are initiated after - // a produce route already exists. - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should be returned in queries from a consumer namespace. - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn consumer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let consumer_ns_name = format!("{}-consumer", ns); - let consumer_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(consumer_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = - retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; - let other_config = other_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?other_config); - - // There should be a default route. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&other_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // A route created in a different namespace as its parent service is - // called a consumer route. It should be returned in outbound policy - // requests for that service ONLY when the request comes from the - // consumer namespace. - let _route = create( - &client, - mk_http_route(&consumer_ns_name, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_gateway_route_status(&client, &consumer_ns_name, "foo-route").await; - - // The route should NOT be returned in queries from the producer namespace. - // There should be a default route. - assert!(producer_rx.next().now_or_never().is_none()); - - // The route should be returned in queries from the same consumer - // namespace. - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should NOT be returned in queries from a different consumer - // namespace. - assert!(other_rx.next().now_or_never().is_none()); - - delete_cluster_scoped(&client, consumer_ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a service - let mut svc = mk_service(&ns, "my-svc", 4191); - svc.annotations_mut() - .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); - svc.annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let svc = Resource::Service(create(&client, svc).await); - - retries_and_timeouts(svc, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let mut egress = mk_egress_net(&ns, "my-egress"); - egress - .annotations_mut() - .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); - egress - .annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let egress = Resource::EgressNetwork(create(&client, egress).await); - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - retries_and_timeouts(egress, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_http_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - http_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_http_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct HttpRouteBuilder(k8s_gateway_api::HttpRoute); - -fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { - use k8s_gateway_api as api; - - HttpRouteBuilder(api::HttpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::HttpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - hostnames: None, - rules: Some(vec![api::HttpRouteRule { - matches: Some(vec![api::HttpRouteMatch { - path: Some(api::HttpPathMatch::Exact { - value: "/foo".to_string(), - }), - headers: None, - query_params: None, - method: Some("GET".to_string()), - }]), - filters: None, - backend_refs: None, - }]), - }, - status: None, - }) -} - -impl HttpRouteBuilder { - fn with_backends( - self, - backends: Option<&[Resource]>, - backends_ns: Option, - backend_filters: Option>, - ) -> Self { - let mut route = self.0; - let backend_refs = backends.map(|backends| { - backends - .iter() - .map(|backend| k8s_gateway_api::HttpBackendRef { - backend_ref: Some(k8s_gateway_api::BackendRef { - weight: None, - inner: k8s_gateway_api::BackendObjectReference { - name: backend.name(), - port: Some(8888), - group: Some(backend.group()), - kind: Some(backend.kind()), - namespace: backends_ns.clone(), - }, - }), - filters: backend_filters.clone(), - }) - .collect() - }); - route.spec.rules.iter_mut().flatten().for_each(|rule| { - rule.backend_refs = backend_refs.clone(); - }); - Self(route) - } - - fn with_filters(self, filters: Option>) -> Self { - let mut route = self.0; - route - .spec - .rules - .iter_mut() - .flatten() - .for_each(|rule| rule.filters = filters.clone()); - Self(route) - } - - fn with_annotations(self, annotations: BTreeMap) -> Self { - let mut route = self.0; - route.metadata.annotations = Some(annotations); - Self(route) - } - - fn build(self) -> k8s_gateway_api::HttpRoute { - self.0 - } -} - -fn mk_empty_http_route( - ns: &str, - name: &str, - parent: &Resource, - port: u16, -) -> k8s_gateway_api::HttpRoute { - use k8s_gateway_api as api; - api::HttpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::HttpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port: Some(port), - }]), - }, - hostnames: None, - rules: Some(vec![]), - }, - status: None, - } -} - -async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); -} - -async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with no rules. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - assert_eq!(route.rules.len(), 0); - }); -} - -async fn parent_with_http_routes_without_backends( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), - ) - .await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with the logical backend. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend, &parent, 4191); - }); -} - -async fn parent_with_http_routes_with_backend( - parent: Resource, - rule_backend: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [rule_backend.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(client, route.build()).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); -} - -async fn parent_with_http_routes_with_invalid_backend( - parent: Resource, - backend: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(client, route.build()).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with a backend. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_has_failure_filter(backend); - }); -} - -async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - client, - mk_http_route(ns, "a-route", &parent, Some(4191)).build(), - ) - .await; - await_gateway_route_status(client, ns, "a-route").await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let _b_route = create( - client, - mk_http_route(ns, "b-route", &parent, Some(4191)).build(), - ) - .await; - await_gateway_route_status(client, ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - detect_http_routes(&config, |routes| { - let num_routes = match parent { - Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default - Resource::Service(_) => 2, // two routes for service - }; - assert_eq!(routes.len(), num_routes); - assert_eq!(route_name(&routes[0]), "a-route"); - assert_eq!(route_name(&routes[1]), "b-route"); - }); -} - -async fn parent_with_consecutive_failure_accrual( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_no_config( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default max_failures and default backoff - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_max_fails( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default backoff and overridden max_failures - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect defaults for everything except for the jitter ratio - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); -} - -async fn parent_with_default_failure_accrual( - parent_default_config: Resource, - parent_max_failures: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ); - }); - - let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ) - }); -} - -async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Proxy protocol should be opaque. - match config.protocol.unwrap().kind.unwrap() { - grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} - _ => panic!("proxy protocol must be Opaque"), - }; -} - -async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_backends(Some(&backends), None, None) - .with_filters(Some(vec![ - k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s_gateway_api::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ])); - let _route = create(client, route.build()).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - // There should be a route with filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let filters = &rule.filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { - replace: Some( - linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( - "/path".to_string() - ) - ) - }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); -} - -async fn backend_with_filters( - parent: Resource, - backend_for_parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend_for_parent.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - Some(vec![ - k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s_gateway_api::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ]), - ); - let _route = create(client, route.build()).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - // There should be a route without rule filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - assert_eq!(rule.filters.len(), 0); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { - replace: Some( - linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( - "/path".to_string() - ) - ) - }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); -} - -async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - ("retry.linkerd.io/http".to_string(), "5xx".to_string()), - ("timeout.linkerd.io/response".to_string(), "10s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - - await_gateway_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeout = rule - .timeouts - .as_ref() - .expect("timeouts expected") - .response - .as_ref() - .expect("response timeout expected"); - assert_eq!(timeout.seconds, 10); - }); -} - -async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - // Route annotations override the timeout config specified - // on the service. - ("timeout.linkerd.io/request".to_string(), "5s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_gateway_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - // Retry config inherited from the service. - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); - // Service timeout config overridden by route timeout config. - assert_eq!(timeouts.response, None); - let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); - assert_eq!(request_timeout.seconds, 5); - }); -} - -async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached. - detect_http_routes(&config, |routes| { - let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); - assert_route_name_eq(route, "foo-route"); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached again. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - assert_route_name_eq(route, "foo-route"); - }); -} diff --git a/policy-test/tests/outbound_api_grpc.rs b/policy-test/tests/outbound_api_grpc.rs index e2493694fa53a..5232603431885 100644 --- a/policy-test/tests/outbound_api_grpc.rs +++ b/policy-test/tests/outbound_api_grpc.rs @@ -1,314 +1,358 @@ -use futures::prelude::*; -use kube::ResourceExt; +use futures::StreamExt; +use kube::Resource; +use linkerd2_proxy_api::{self as api, outbound}; +use linkerd_policy_controller_k8s_api::{self as k8s, gateway, policy}; use linkerd_policy_test::{ - assert_resource_meta, assert_status_accepted, await_egress_net_status, await_grpc_route_status, - create, create_egress_network, create_service, mk_egress_net, mk_service, outbound_api::*, - update, with_temp_ns, Resource, + assert_resource_meta, await_route_accepted, create, + outbound_api::{assert_route_is_default, assert_singleton, retry_watch_outbound_policy}, + test_route::{TestParent, TestRoute}, + with_temp_ns, }; -use std::collections::BTreeMap; +use maplit::btreemap; #[tokio::test(flavor = "current_thread")] -async fn service_grpc_route_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - grpc_route_retries_and_timeouts(svc, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_grpc_route_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = - Resource::EgressNetwork(create_egress_network(&client, &ns, "my-egress").await); - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - grpc_route_retries_and_timeouts(egress, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a service - let mut svc = mk_service(&ns, "my-svc", 4191); - svc.annotations_mut() - .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); - svc.annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let svc = Resource::Service(create(&client, svc).await); +async fn grpc_route_with_filters_service() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = gateway::GrpcRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + rule.filters = Some(vec![gateway::GrpcRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }]); + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with filters. + gateway::GrpcRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::GrpcRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let filters = &rule.filters; + assert_eq!( + *filters, + vec![outbound::grpc_route::Filter { + kind: Some(outbound::grpc_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + )) + }] + ); + }); + }) + .await; + } - parent_retries_and_timeouts(svc, &client, &ns).await; - }) - .await; + test::().await; + test::().await; } #[tokio::test(flavor = "current_thread")] -async fn egress_net_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let mut egress = mk_egress_net(&ns, "my-egress"); - egress - .annotations_mut() - .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); - egress - .annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let egress = Resource::EgressNetwork(create(&client, egress).await); - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_retries_and_timeouts(egress, &client, &ns).await; - }) - .await; -} +async fn policy_grpc_route_with_backend_filters() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = gateway::GrpcRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + for backend in rule.backend_refs.iter_mut().flatten() { + backend.filters = Some(vec![gateway::GrpcRouteFilter::RequestHeaderModifier { + request_header_modifier: gateway::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }]); + } + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with backend filters. + gateway::GrpcRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::GrpcRoute::extract_meta(outbound_route))); + let rules = gateway::GrpcRoute::rules_random_available(outbound_route); + let rule = assert_singleton(&rules); + let backend = assert_singleton(rule); + assert_eq!( + backend.filters, + vec![outbound::grpc_route::Filter { + kind: Some(outbound::grpc_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + )) + }] + ); + }); + }) + .await; + } -#[tokio::test(flavor = "current_thread")] -async fn service_grpc_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - grpc_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; + test::().await; + test::().await; } #[tokio::test(flavor = "current_thread")] -async fn egress_net_grpc_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - grpc_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct GrpcRouteBuilder(k8s_gateway_api::GrpcRoute); - -fn mk_grpc_route(ns: &str, name: &str, parent: &Resource, port: Option) -> GrpcRouteBuilder { - GrpcRouteBuilder(k8s_gateway_api::GrpcRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: k8s_gateway_api::GrpcRouteSpec { - inner: k8s_gateway_api::CommonRouteSpec { - parent_refs: Some(vec![k8s_gateway_api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - hostnames: None, - rules: Some(vec![k8s_gateway_api::GrpcRouteRule { - matches: Some(vec![k8s_gateway_api::GrpcRouteMatch { - method: Some(k8s_gateway_api::GrpcMethodMatch::Exact { - method: Some("foo".to_string()), - service: Some("my-gprc-service".to_string()), - }), - headers: None, - }]), - filters: None, - backend_refs: None, - }]), - }, - status: None, - }) -} - -impl GrpcRouteBuilder { - fn with_annotations(self, annotations: BTreeMap) -> Self { - let mut route = self.0; - route.metadata.annotations = Some(annotations); - Self(route) +async fn grpc_route_retries_and_timeouts() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut route = gateway::GrpcRoute::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route.meta_mut().annotations = Some(btreemap! { + "retry.linkerd.io/grpc".to_string() => "internal".to_string(), + "timeout.linkerd.io/response".to_string() => "10s".to_string(), + }); + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + gateway::GrpcRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::GrpcRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + assert!(conditions.internal); + let timeout = rule + .timeouts + .as_ref() + .expect("timeouts expected") + .response + .as_ref() + .expect("response timeout expected"); + assert_eq!(timeout.seconds, 10); + }); + }) + .await; } - fn build(self) -> k8s_gateway_api::GrpcRoute { - self.0 - } + test::().await; + test::().await; } -async fn grpc_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create( - client, - mk_grpc_route(ns, "foo-route", &parent, Some(4191)).build(), - ) - .await; - await_grpc_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - { - // The route should be attached. - let routes = grpc_routes(&config); - let route = assert_route_attached(routes, &parent); - assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); - } - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The grpc route should be unattached and the default (http) route - // should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached again. - { - // The route should be attached. - let routes = grpc_routes(&config); - let route = assert_route_attached(routes, &parent); - assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); +#[tokio::test(flavor = "current_thread")] +async fn parent_retries_and_timeouts() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "retry.linkerd.io/grpc".to_string() => "internal".to_string(), + "timeout.linkerd.io/response".to_string() => "10s".to_string(), + }); + let parent = create(&client, parent).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut route = gateway::GrpcRoute::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route.meta_mut().annotations = Some(btreemap! { + // Route annotations override the retry config specified on the parent. + "timeout.linkerd.io/request".to_string() => "5s".to_string(), + }); + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + gateway::GrpcRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::GrpcRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + + // Retry config inherited from the service. + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + assert!(conditions.internal); + + // Parent timeout config overridden by route timeout config. + let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); + assert_eq!(timeouts.response, None); + let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); + assert_eq!(request_timeout.seconds, 5); + }); + }) + .await; } -} - -async fn grpc_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_grpc_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - ("retry.linkerd.io/grpc".to_string(), "internal".to_string()), - ("timeout.linkerd.io/response".to_string(), "10s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_grpc_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - let routes = grpc_routes(&config); - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - assert!(conditions.internal); - let timeout = rule - .timeouts - .as_ref() - .expect("timeouts expected") - .response - .as_ref() - .expect("response timeout expected"); - assert_eq!(timeout.seconds, 10); -} - -async fn parent_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_grpc_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - // Route annotations override the timeout config specified - // on the service. - ("timeout.linkerd.io/request".to_string(), "5s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_grpc_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - let routes = grpc_routes(&config); - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - // Retry config inherited from the service. - assert!(conditions.internal); - let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); - // Parent timeout config overridden by route timeout config. - assert_eq!(timeouts.response, None); - let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); - assert_eq!(request_timeout.seconds, 5); + test::().await; + test::().await; } diff --git a/policy-test/tests/outbound_api_http.rs b/policy-test/tests/outbound_api_http.rs new file mode 100644 index 0000000000000..264146d246a31 --- /dev/null +++ b/policy-test/tests/outbound_api_http.rs @@ -0,0 +1,723 @@ +use futures::StreamExt; +use linkerd2_proxy_api::{self as api, outbound}; +use linkerd_policy_controller_k8s_api::{self as k8s, gateway, policy}; +use linkerd_policy_test::{ + assert_resource_meta, await_route_accepted, create, + outbound_api::{assert_route_is_default, assert_singleton, retry_watch_outbound_policy}, + test_route::{TestParent, TestRoute}, + with_temp_ns, +}; +use maplit::btreemap; + +#[tokio::test(flavor = "current_thread")] +async fn gateway_http_route_with_filters_service() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = gateway::HttpRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + rule.filters = Some(vec![ + gateway::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + gateway::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]); + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with filters. + gateway::HttpRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::HttpRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let filters = &rule.filters; + assert_eq!( + *filters, + vec![ + outbound::http_route::Filter { + kind: Some( + outbound::http_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::Redirect( + api::http_route::RequestRedirect { + scheme: Some(api::http_types::Scheme { + r#type: Some(api::http_types::scheme::Type::Registered( + api::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn policy_http_route_with_filters_service() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = policy::HttpRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + rule.filters = Some(vec![ + policy::httproute::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + policy::httproute::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]); + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with filters. + policy::HttpRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(policy::HttpRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let filters = &rule.filters; + assert_eq!( + *filters, + vec![ + outbound::http_route::Filter { + kind: Some( + outbound::http_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::Redirect( + api::http_route::RequestRedirect { + scheme: Some(api::http_types::Scheme { + r#type: Some(api::http_types::scheme::Type::Registered( + api::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn gateway_http_route_with_backend_filters() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = gateway::HttpRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + for backend in rule.backend_refs.iter_mut().flatten() { + backend.filters = Some(vec![ + gateway::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + gateway::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]); + } + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with backend filters. + gateway::HttpRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::HttpRoute::extract_meta(outbound_route))); + let rules = gateway::HttpRoute::rules_random_available(outbound_route); + let rule = assert_singleton(&rules); + let backend = assert_singleton(rule); + assert_eq!( + backend.filters, + vec![ + outbound::http_route::Filter { + kind: Some( + outbound::http_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::Redirect( + api::http_route::RequestRedirect { + scheme: Some(api::http_types::Scheme { + r#type: Some(api::http_types::scheme::Type::Registered( + api::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn policy_http_route_with_backend_filters() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = policy::HttpRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + for backend in rule.backend_refs.iter_mut().flatten() { + backend.filters = Some(vec![ + gateway::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: gateway::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + gateway::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]); + } + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with backend filters. + policy::HttpRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(policy::HttpRoute::extract_meta(outbound_route))); + let rules = policy::HttpRoute::rules_random_available(outbound_route); + let rule = assert_singleton(&rules); + let backend = assert_singleton(rule); + assert_eq!( + backend.filters, + vec![ + outbound::http_route::Filter { + kind: Some( + outbound::http_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::Redirect( + api::http_route::RequestRedirect { + scheme: Some(api::http_types::Scheme { + r#type: Some(api::http_types::scheme::Type::Registered( + api::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn http_route_retries_and_timeouts() { + async fn test>() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut route = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route.meta_mut().annotations = Some(btreemap! { + "retry.linkerd.io/http".to_string() => "5xx".to_string(), + "timeout.linkerd.io/response".to_string() => "10s".to_string(), + }); + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(policy::HttpRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + let status_range = assert_singleton(&conditions.status_ranges); + assert_eq!(status_range.start, 500); + assert_eq!(status_range.end, 599); + let timeout = rule + .timeouts + .as_ref() + .expect("timeouts expected") + .response + .as_ref() + .expect("response timeout expected"); + assert_eq!(timeout.seconds, 10); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn parent_retries_and_timeouts() { + async fn test>() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "retry.linkerd.io/http".to_string() => "5xx".to_string(), + "timeout.linkerd.io/response".to_string() => "10s".to_string(), + }); + let parent = create(&client, parent).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut route = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route.meta_mut().annotations = Some(btreemap! { + // Route annotations override the retry config specified on the parent. + "timeout.linkerd.io/request".to_string() => "5s".to_string(), + }); + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(policy::HttpRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + let status_range = assert_singleton(&conditions.status_ranges); + // Retry config inherited from the service. + assert_eq!(status_range.start, 500); + assert_eq!(status_range.end, 599); + let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); + // Parent timeout config overridden by route timeout config. + assert_eq!(timeouts.response, None); + let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); + assert_eq!(request_timeout.seconds, 5); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; +} diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs deleted file mode 100644 index 4e1619c8b69cd..0000000000000 --- a/policy-test/tests/outbound_api_linkerd.rs +++ /dev/null @@ -1,2027 +0,0 @@ -use std::{collections::BTreeMap, time::Duration}; - -use futures::prelude::*; -use kube::ResourceExt; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - assert_default_accrual_backoff, assert_resource_meta, assert_status_accepted, - await_egress_net_status, await_route_status, create, create_annotated_egress_network, - create_annotated_service, create_cluster_scoped, create_egress_network, - create_opaque_egress_network, create_opaque_service, create_service, delete_cluster_scoped, - grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -}; -use maplit::{btreemap, convert_args}; - -// These tests are copies of the tests in outbound_api_gateway.rs but using the -// policy.linkerd.io HttpRoute kubernetes types instead of the Gateway API ones. -// These two files should be kept in sync to ensure that Linkerd can read and -// function correctly with both types of resources. - -#[tokio::test(flavor = "current_thread")] -async fn service_does_not_exist() { - with_temp_ns(|client, ns| async move { - // Build a service but don't apply it to the cluster. - let mut svc = mk_service(&ns, "my-svc", 4191); - // Give it a bogus cluster ip. - svc.spec.as_mut().unwrap().cluster_ip = Some("192.168.0.2".to_string()); - - let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; - let rsp = policy_api.watch(&ns, &svc, 4191).await; - - assert!(rsp.is_err()); - assert_eq!(rsp.err().unwrap().code(), tonic::Code::NotFound); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_no_http_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_no_http_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_no_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_no_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_route_without_rules() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_http_route_without_rules(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_route_without_rules() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_route_without_rules(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_without_backends() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_http_routes_without_backends(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_without_backends() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_routes_without_backends(Resource::EgressNetwork(egress), &client, &ns) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - parent_with_http_routes_with_backend( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_routes_with_backend( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_cross_namespace_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_ns_name = format!("{}-backend", ns); - let backend_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(backend_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - let backend_name = "backend"; - let backend_svc = - Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); - let backends = [backend_svc.clone()]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( - Some(&backends), - Some(backend_ns_name), - None, - ); - let _route = create(&client, route.build()).await; - await_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); - - delete_cluster_scoped(&client, backend_ns).await - }) - .await; -} - -// TODO: Test fails until handling of invalid backends is implemented. -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "invalid", 4191); - - parent_with_http_routes_with_invalid_backend( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -// TODO: Test fails until handling of invalid backends is implemented. -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - let backend = mk_egress_net(&ns, "invalid"); - - parent_with_http_routes_with_invalid_backend( - Resource::EgressNetwork(egress), - Resource::EgressNetwork(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -// TODO: Investigate why the policy controller is only returning one route in this -// case instead of two. -#[tokio::test(flavor = "current_thread")] -async fn service_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -// TODO: Investigate why the policy controller is only returning one route in this -// case instead of two. -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual() { - with_temp_ns(|client, ns| async move { - let svc = create_annotated_service( - &client, - &ns, - "consecutive-accrual-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), - "10s".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), - "10m".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual() { - with_temp_ns(|client, ns| async move { - let egress = create_annotated_egress_network( - &client, - &ns, - "consecutive-accrual-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), - "10s".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), - "10m".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_no_config() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual, but - // with no additional configuration - let svc_no_config = create_annotated_service( - &client, - &ns, - "default-accrual-svc", - 80, - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - )]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_no_config( - Resource::Service(svc_no_config), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_max_fails() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual with - // max number of failures and with default backoff - let svc_max_fails = create_annotated_service( - &client, - &ns, - "no-backoff-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_max_fails( - Resource::Service(svc_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_jitter() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual with - // only the jitter ratio configured in the backoff - let svc_jitter = create_annotated_service( - &client, - &ns, - "only-jitter-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_max_jitter( - Resource::Service(svc_jitter), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { - with_temp_ns(|client, ns| async move { - // Create a egress network configured to do consecutive failure accrual, but - // with no additional configuration - let egress_no_config = create_annotated_egress_network( - &client, - &ns, - "default-accrual-egress", - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - )]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "default-accrual-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_no_config( - Resource::EgressNetwork(egress_no_config), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { - with_temp_ns(|client, ns| async move { - // Create a egress network configured to do consecutive failure accrual with - // max number of failures and with default backoff - let egress_max_fails = create_annotated_egress_network( - &client, - &ns, - "no-backoff-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "no-backoff-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_max_fails( - Resource::EgressNetwork(egress_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { - with_temp_ns(|client, ns| async move { - // Create an egress net configured to do consecutive failure accrual with - // only the jitter ratio configured in the backoff - let egress_jitter = create_annotated_egress_network( - &client, - &ns, - "only-jitter-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "only-jitter-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_max_jitter( - Resource::EgressNetwork(egress_jitter), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_default_failure_accrual() { - with_temp_ns(|client, ns| async move { - // Default config for Service, no failure accrual - let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; - - // Create Service with consecutive failure accrual config for - // max_failures but no mode - let svc_max_fails = create_annotated_service( - &client, - &ns, - "default-max-failure-svc", - 80, - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - )]), - ) - .await; - - parent_with_default_failure_accrual( - Resource::Service(svc_default), - Resource::Service(svc_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_default_failure_accrual() { - with_temp_ns(|client, ns| async move { - // Default config for EgressNetwork, no failure accrual - let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; - let status = await_egress_net_status(&client, &ns, "default-failure-accrual").await; - assert_status_accepted(status.conditions); - - // Create EgressNetwork with consecutive failure accrual config for - // max_failures but no mode - let egress_max_fails = create_annotated_egress_network( - &client, - &ns, - "default-max-failure-egress", - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - )]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "default-max-failure-egress").await; - assert_status_accepted(status.conditions); - - parent_with_default_failure_accrual( - Resource::EgressNetwork(egress_default), - Resource::EgressNetwork(egress_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn opaque_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; - opaque_parent(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn opaque_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; - opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn route_with_filters_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "backend", 4191); - - route_with_filters( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn route_with_filters_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - route_with_filters( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn backend_with_filters_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - backend_with_filters( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn backend_with_filters_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - backend_with_filters( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_with_no_port() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config_4191); - - let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config_9999); - - // There should be a default route. - detect_http_routes(&config_4191, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&config_9999, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 9999); - }); - - let _route = create(&client, mk_http_route(&ns, "foo-route", &svc, None).build()).await; - await_route_status(&client, &ns, "foo-route").await; - - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_4191); - - // The route should apply to the service. - detect_http_routes(&config_4191, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_9999); - - // The route should apply to other ports too. - detect_http_routes(&config_9999, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn producer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // There should be a default route. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_route_status(&client, &ns, "foo-route").await; - - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?producer_config); - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should be returned in queries from a consumer namespace. - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn pre_existing_producer_route() { - // We test the scenario where outbound policy watches are initiated after - // a produce route already exists. - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_route_status(&client, &ns, "foo-route").await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should be returned in queries from a consumer namespace. - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn consumer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let consumer_ns_name = format!("{}-consumer", ns); - let consumer_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(consumer_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = - retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; - let other_config = other_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?other_config); - - // There should be a default route. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&other_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // A route created in a different namespace as its parent service is - // called a consumer route. It should be returned in outbound policy - // requests for that service ONLY when the request comes from the - // consumer namespace. - let _route = create( - &client, - mk_http_route(&consumer_ns_name, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_route_status(&client, &consumer_ns_name, "foo-route").await; - - // The route should NOT be returned in queries from the producer namespace. - // There should be a default route. - assert!(producer_rx.next().now_or_never().is_none()); - - // The route should be returned in queries from the same consumer - // namespace. - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should NOT be returned in queries from a different consumer - // namespace. - assert!(other_rx.next().now_or_never().is_none()); - - delete_cluster_scoped(&client, consumer_ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a service - let mut svc = mk_service(&ns, "my-svc", 4191); - svc.annotations_mut() - .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); - svc.annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let svc = Resource::Service(create(&client, svc).await); - - retries_and_timeouts(svc, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let mut egress = mk_egress_net(&ns, "my-egress"); - egress - .annotations_mut() - .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); - egress - .annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let egress = Resource::EgressNetwork(create(&client, egress).await); - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - retries_and_timeouts(egress, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_http_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - http_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_http_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct HttpRouteBuilder(k8s::policy::HttpRoute); - -fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { - use k8s::policy::httproute as api; - - HttpRouteBuilder(api::HttpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::HttpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - hostnames: None, - rules: Some(vec![api::HttpRouteRule { - matches: Some(vec![api::HttpRouteMatch { - path: Some(api::HttpPathMatch::Exact { - value: "/foo".to_string(), - }), - headers: None, - query_params: None, - method: Some("GET".to_string()), - }]), - filters: None, - backend_refs: None, - timeouts: None, - }]), - }, - status: None, - }) -} - -impl HttpRouteBuilder { - fn with_backends( - self, - backends: Option<&[Resource]>, - backends_ns: Option, - backend_filters: Option>, - ) -> Self { - let mut route = self.0; - let backend_refs = backends.map(|backends| { - backends - .iter() - .map(|backend| k8s::policy::httproute::HttpBackendRef { - backend_ref: Some(k8s_gateway_api::BackendRef { - weight: None, - inner: k8s_gateway_api::BackendObjectReference { - name: backend.name(), - port: Some(8888), - group: Some(backend.group()), - kind: Some(backend.kind()), - namespace: backends_ns.clone(), - }, - }), - filters: backend_filters.clone(), - }) - .collect() - }); - route.spec.rules.iter_mut().flatten().for_each(|rule| { - rule.backend_refs = backend_refs.clone(); - }); - Self(route) - } - - fn with_filters(self, filters: Option>) -> Self { - let mut route = self.0; - route - .spec - .rules - .iter_mut() - .flatten() - .for_each(|rule| rule.filters = filters.clone()); - Self(route) - } - - fn with_annotations(self, annotations: BTreeMap) -> Self { - let mut route = self.0; - route.metadata.annotations = Some(annotations); - Self(route) - } - - fn build(self) -> k8s::policy::HttpRoute { - self.0 - } -} - -fn mk_empty_http_route( - ns: &str, - name: &str, - parent: &Resource, - port: u16, -) -> k8s::policy::HttpRoute { - use k8s::policy::httproute as api; - api::HttpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::HttpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port: Some(port), - }]), - }, - hostnames: None, - rules: Some(vec![]), - }, - status: None, - } -} - -async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); -} - -async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with no rules. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - assert_eq!(route.rules.len(), 0); - }); -} - -async fn parent_with_http_routes_without_backends( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), - ) - .await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with the logical backend. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend, &parent, 4191); - }); -} - -async fn parent_with_http_routes_with_backend( - parent: Resource, - rule_backend: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [rule_backend.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(client, route.build()).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); -} - -async fn parent_with_http_routes_with_invalid_backend( - parent: Resource, - backend: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(client, route.build()).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with a backend. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_has_failure_filter(backend); - }); -} - -async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - client, - mk_http_route(ns, "a-route", &parent, Some(4191)).build(), - ) - .await; - await_route_status(client, ns, "a-route").await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let _b_route = create( - client, - mk_http_route(ns, "b-route", &parent, Some(4191)).build(), - ) - .await; - await_route_status(client, ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - detect_http_routes(&config, |routes| { - let num_routes = match parent { - Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default - Resource::Service(_) => 2, // two routes for service - }; - assert_eq!(routes.len(), num_routes); - assert_eq!(route_name(&routes[0]), "a-route"); - assert_eq!(route_name(&routes[1]), "b-route"); - }); -} - -async fn parent_with_consecutive_failure_accrual( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_no_config( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default max_failures and default backoff - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_max_fails( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default backoff and overridden max_failures - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect defaults for everything except for the jitter ratio - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); -} - -async fn parent_with_default_failure_accrual( - parent_default_config: Resource, - parent_max_failures: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ); - }); - - let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ) - }); -} - -async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Proxy protocol should be opaque. - match config.protocol.unwrap().kind.unwrap() { - grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} - _ => panic!("proxy protocol must be Opaque"), - }; -} - -async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_backends(Some(&backends), None, None) - .with_filters(Some(vec![ - k8s::policy::httproute::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s::policy::httproute::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ])); - let _route = create(client, route.build()).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - // There should be a route with filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let filters = &rule.filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { - replace: Some( - linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( - "/path".to_string() - ) - ) - }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); -} - -async fn backend_with_filters( - parent: Resource, - backend_for_parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend_for_parent.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - Some(vec![ - k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s_gateway_api::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ]), - ); - let _route = create(client, route.build()).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - // There should be a route without rule filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - assert_eq!(rule.filters.len(), 0); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { - replace: Some( - linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( - "/path".to_string() - ) - ) - }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); -} - -async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - ("retry.linkerd.io/http".to_string(), "5xx".to_string()), - ("timeout.linkerd.io/response".to_string(), "10s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - - await_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeout = rule - .timeouts - .as_ref() - .expect("timeouts expected") - .response - .as_ref() - .expect("response timeout expected"); - assert_eq!(timeout.seconds, 10); - }); -} - -async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - // Route annotations override the timeout config specified - // on the service. - ("timeout.linkerd.io/request".to_string(), "5s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - // Retry config inherited from the service. - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); - // Service timeout config overridden by route timeout config. - assert_eq!(timeouts.response, None); - let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); - assert_eq!(request_timeout.seconds, 5); - }); -} - -async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; - await_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached. - detect_http_routes(&config, |routes| { - let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); - assert_route_name_eq(route, "foo-route"); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached again. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - assert_route_name_eq(route, "foo-route"); - }); -} diff --git a/policy-test/tests/outbound_api_tcp.rs b/policy-test/tests/outbound_api_tcp.rs index 7749a6fa92fe0..53be8c9153c72 100644 --- a/policy-test/tests/outbound_api_tcp.rs +++ b/policy-test/tests/outbound_api_tcp.rs @@ -1,644 +1,97 @@ -use futures::prelude::*; -use linkerd_policy_controller_k8s_api as k8s; +use futures::StreamExt; +use linkerd_policy_controller_k8s_api::{self as k8s, gateway, policy}; use linkerd_policy_test::{ - assert_resource_meta, assert_status_accepted, await_egress_net_status, await_tcp_route_status, - create, create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, - mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, + assert_resource_meta, await_route_accepted, create, + outbound_api::{assert_route_is_default, assert_singleton, retry_watch_outbound_policy}, + test_route::{TestParent, TestRoute}, + with_temp_ns, }; -use maplit::{btreemap, convert_args}; #[tokio::test(flavor = "current_thread")] -async fn service_with_tcp_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - parent_with_tcp_routes_with_backend( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_tcp_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_tcp_routes_with_backend( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tcp_routes_with_cross_namespace_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let backend_ns_name = format!("{}-backend", ns); - let backend_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(backend_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - let backend_name = "backend"; - let backend_svc = - Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); - let backends = [backend_svc.clone()]; - let route = mk_tcp_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); - let _route = create(&client, route.build()).await; - await_tcp_route_status(&client, &ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - let routes = tcp_routes(&config); - let route = assert_singleton(routes); - let backends = tcp_route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - - delete_cluster_scoped(&client, backend_ns).await - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tcp_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "invalid", 4191); - - parent_with_tcp_routes_with_invalid_backend( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_tcp_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - let backend = mk_egress_net(&ns, "invalid"); - - parent_with_tcp_routes_with_invalid_backend( - Resource::EgressNetwork(egress), - Resource::EgressNetwork(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_multiple_tcp_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_multiple_tcp_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_multiple_tcp_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_multiple_tcp_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn tcp_route_with_no_port() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let _route = create( - &client, - mk_tcp_route(&ns, "foo-route", &svc, None) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tcp_route_status(&client, &ns, "foo-route").await; - - let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_4191); - - let routes = tcp_routes(&config_4191); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_9999); - - let routes = tcp_routes(&config_9999); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn producer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tcp_route_status(&client, &ns, "foo-route").await; - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?producer_config); - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let routes = tcp_routes(&producer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - - let routes = tcp_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn pre_existing_producer_route() { - // We test the scenario where outbound policy watches are initiated after - // a produce route already exists. - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tcp_route_status(&client, &ns, "foo-route").await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - let routes = tcp_routes(&producer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - - // The route should be returned in queries from a consumer namespace. - let routes = tcp_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn consumer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let consumer_ns_name = format!("{}-consumer", ns); - let consumer_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(consumer_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = - retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; - let other_config = other_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?other_config); - - // A route created in a different namespace as its parent service is - // called a consumer route. It should be returned in outbound policy - // requests for that service ONLY when the request comes from the - // consumer namespace. - let _route = create( - &client, - mk_tcp_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) - .with_backends(&[svc]) - .build(), - ) - .await; - await_tcp_route_status(&client, &consumer_ns_name, "foo-route").await; - - // The route should NOT be returned in queries from the producer namespace. - // There should be a default route. - assert!(producer_rx.next().now_or_never().is_none()); - - // The route should be returned in queries from the same consumer - // namespace. - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let routes = tcp_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - - // The route should NOT be returned in queries from a different consumer - // namespace. - assert!(other_rx.next().now_or_never().is_none()); - - delete_cluster_scoped(&client, consumer_ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_tcp_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - tcp_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_tcp_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - tcp_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct TcpRouteBuilder(k8s_gateway_api::TcpRoute); - -fn mk_tcp_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TcpRouteBuilder { - use k8s_gateway_api as api; - - TcpRouteBuilder(api::TcpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::TcpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - rules: vec![api::TcpRouteRule { - backend_refs: Vec::default(), - }], - }, - status: None, - }) -} - -impl TcpRouteBuilder { - fn with_backends(self, backends: &[Resource]) -> Self { - let mut route = self.0; - let backend_refs: Vec<_> = backends - .iter() - .map(|backend| k8s_gateway_api::BackendRef { - weight: None, - inner: k8s_gateway_api::BackendObjectReference { - name: backend.name(), - port: Some(8888), - group: Some(backend.group()), - kind: Some(backend.kind()), - namespace: Some(backend.namespace()), - }, - }) - .collect(); - route.spec.rules.iter_mut().for_each(|rule| { - rule.backend_refs = backend_refs.clone(); - }); - Self(route) - } - - fn build(self) -> k8s_gateway_api::TcpRoute { - self.0 - } -} - -async fn parent_with_tcp_routes_with_backend( - parent: Resource, - rule_backend: Resource, - client: &kube::Client, - ns: &str, -) { - let backends = [rule_backend.clone()]; - let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); - let _route = create(client, route.build()).await; - await_tcp_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() +async fn multiple_tcp_routes() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // Routes should be returned in sorted order by creation timestamp then + // name. To ensure that this test isn't timing dependant, routes should + // be created in alphabetical order. + let mut route_a = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_a.meta_mut().name = Some("a-route".to_string()); + let route_a = create(&client, route_a).await; + await_route_accepted(&client, &route_a).await; + + // First route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + let mut route_b = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_b.meta_mut().name = Some("b-route".to_string()); + let route_b = create(&client, route_b).await; + await_route_accepted(&client, &route_b).await; + + // Second route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + R::routes(&config, |routes| { + // Only the first TCPRoute should be returned in the config. + assert!(route_a.meta_eq(R::extract_meta(&routes[0]))); + assert_eq!(routes.len(), 1); + }); + }) .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tcp_routes(&config); - let route = assert_singleton(routes); - let backends = tcp_route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); -} - -async fn parent_with_tcp_routes_with_invalid_backend( - parent: Resource, - backend: Resource, - client: &kube::Client, - ns: &str, -) { - let backends = [backend]; - let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); - let _route = create(client, route.build()).await; - await_tcp_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tcp_routes(&config); - let route = assert_singleton(routes); - let backends = tcp_route_backends_random_available(route); - assert_singleton(backends); -} - -async fn parent_with_multiple_tcp_routes(parent: Resource, client: &kube::Client, ns: &str) { - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - client, - mk_tcp_route(ns, "a-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tcp_route_status(client, ns, "a-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let _b_route = create( - client, - mk_tcp_route(ns, "b-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tcp_route_status(client, ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tcp_routes(&config); - assert_eq!(routes.len(), 1); - assert_eq!(tcp_route_name(&routes[0]), "a-route"); -} - -async fn tcp_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create( - client, - mk_tcp_route(ns, "foo-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tcp_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached. - let routes = tcp_routes(&config); - let tcp_route = assert_singleton(routes); - assert_tcp_route_name_eq(tcp_route, "foo-route"); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); + } - // The route should be attached again. - // The route should be attached. - let routes = tcp_routes(&config); - let tcp_route = assert_singleton(routes); - assert_tcp_route_name_eq(tcp_route, "foo-route"); + test::().await; + test::().await; } diff --git a/policy-test/tests/outbound_api_tls.rs b/policy-test/tests/outbound_api_tls.rs deleted file mode 100644 index cf7569dd15040..0000000000000 --- a/policy-test/tests/outbound_api_tls.rs +++ /dev/null @@ -1,650 +0,0 @@ -use futures::prelude::*; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - assert_resource_meta, assert_status_accepted, await_egress_net_status, await_tls_route_status, - create, create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, - grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -}; -use maplit::{btreemap, convert_args}; - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tls_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - parent_with_tls_routes_with_backend( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_tls_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_tls_routes_with_backend( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tls_routes_with_cross_namespace_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let backend_ns_name = format!("{}-backend", ns); - let backend_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(backend_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - let backend_name = "backend"; - let backend_svc = - Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); - let backends = [backend_svc.clone()]; - let route = mk_tls_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); - let _route = create(&client, route.build()).await; - await_tls_route_status(&client, &ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - let routes = tls_routes(&config); - let route = assert_singleton(routes); - let backends = tls_route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - - delete_cluster_scoped(&client, backend_ns).await - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tls_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "invalid", 4191); - - parent_with_tls_routes_with_invalid_backend( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_tls_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - let backend = mk_egress_net(&ns, "invalid"); - - parent_with_tls_routes_with_invalid_backend( - Resource::EgressNetwork(egress), - Resource::EgressNetwork(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_multiple_tls_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_multiple_tls_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_multiple_tls_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn tls_route_with_no_port() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let _route = create( - &client, - mk_tls_route(&ns, "foo-route", &svc, None) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tls_route_status(&client, &ns, "foo-route").await; - - let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_4191); - - let routes = tls_routes(&config_4191); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_9999); - - let routes = tls_routes(&config_9999); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn producer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_tls_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tls_route_status(&client, &ns, "foo-route").await; - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?producer_config); - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let routes = tls_routes(&producer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - - let routes = tls_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn pre_existing_producer_route() { - // We test the scenario where outbound policy watches are initiated after - // a produce route already exists. - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_tls_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tls_route_status(&client, &ns, "foo-route").await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - let routes = tls_routes(&producer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - - // The route should be returned in queries from a consumer namespace. - let routes = tls_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn consumer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let consumer_ns_name = format!("{}-consumer", ns); - let consumer_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(consumer_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = - retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; - let other_config = other_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?other_config); - - // A route created in a different namespace as its parent service is - // called a consumer route. It should be returned in outbound policy - // requests for that service ONLY when the request comes from the - // consumer namespace. - let _route = create( - &client, - mk_tls_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) - .with_backends(&[svc]) - .build(), - ) - .await; - await_tls_route_status(&client, &consumer_ns_name, "foo-route").await; - - // The route should NOT be returned in queries from the producer namespace. - // There should be a default route. - assert!(producer_rx.next().now_or_never().is_none()); - - // The route should be returned in queries from the same consumer - // namespace. - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let routes = tls_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - - // The route should NOT be returned in queries from a different consumer - // namespace. - assert!(other_rx.next().now_or_never().is_none()); - - delete_cluster_scoped(&client, consumer_ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_tls_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - tls_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_tls_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - tls_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct TlsRouteBuilder(k8s_gateway_api::TlsRoute); - -fn mk_tls_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TlsRouteBuilder { - use k8s_gateway_api as api; - - TlsRouteBuilder(api::TlsRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::TlsRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - hostnames: None, - rules: vec![api::TlsRouteRule { - backend_refs: Vec::default(), - }], - }, - status: None, - }) -} - -impl TlsRouteBuilder { - fn with_backends(self, backends: &[Resource]) -> Self { - let mut route = self.0; - let backend_refs: Vec<_> = backends - .iter() - .map(|backend| k8s_gateway_api::BackendRef { - weight: None, - inner: k8s_gateway_api::BackendObjectReference { - name: backend.name(), - port: Some(8888), - group: Some(backend.group()), - kind: Some(backend.kind()), - namespace: Some(backend.namespace()), - }, - }) - .collect(); - route.spec.rules.iter_mut().for_each(|rule| { - rule.backend_refs = backend_refs.clone(); - }); - Self(route) - } - - fn build(self) -> k8s_gateway_api::TlsRoute { - self.0 - } -} - -async fn parent_with_tls_routes_with_backend( - parent: Resource, - rule_backend: Resource, - client: &kube::Client, - ns: &str, -) { - let backends = [rule_backend.clone()]; - let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); - let _route = create(client, route.build()).await; - await_tls_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tls_routes(&config); - let route = assert_route_attached(routes, &parent); - let backends = tls_route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); -} - -async fn parent_with_tls_routes_with_invalid_backend( - parent: Resource, - backend: Resource, - client: &kube::Client, - ns: &str, -) { - let backends = [backend]; - let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); - let _route = create(client, route.build()).await; - await_tls_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tls_routes(&config); - let route = assert_route_attached(routes, &parent); - let backends = tls_route_backends_random_available(route); - assert_singleton(backends); -} - -async fn parent_with_multiple_tls_routes(parent: Resource, client: &kube::Client, ns: &str) { - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - client, - mk_tls_route(ns, "a-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tls_route_status(client, ns, "a-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let _b_route = create( - client, - mk_tls_route(ns, "b-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tls_route_status(client, ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tls_routes(&config); - let num_routes = match parent { - Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default - Resource::Service(_) => 2, // two routes for service - }; - assert_eq!(routes.len(), num_routes); - assert_eq!(tls_route_name(&routes[0]), "a-route"); - assert_eq!(tls_route_name(&routes[1]), "b-route"); -} - -async fn tls_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create( - client, - mk_tls_route(ns, "foo-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tls_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached. - let routes = tls_routes(&config); - let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); - assert_tls_route_name_eq(tls_route, "foo-route"); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached again. - // The route should be attached. - let routes = tls_routes(&config); - let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); - assert_tls_route_name_eq(tls_route, "foo-route"); -} diff --git a/policy-test/tests/outbound_http_route_status.rs b/policy-test/tests/outbound_http_route_status.rs index 430760a0ae825..916320b979fd1 100644 --- a/policy-test/tests/outbound_http_route_status.rs +++ b/policy-test/tests/outbound_http_route_status.rs @@ -1,250 +1,250 @@ -use k8s::Condition; -use k8s_gateway_api::{ParentReference, RouteParentStatus, RouteStatus}; -use k8s_openapi::chrono::Utc; -use kube::ResourceExt; -use linkerd_policy_controller_core::POLICY_CONTROLLER_NAME; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - await_condition, await_route_status, create, find_route_condition, mk_route, with_temp_ns, -}; - -#[tokio::test(flavor = "current_thread")] -async fn accepted_parent() { - with_temp_ns(|client, ns| async move { - // Create a parent Service - let svc_name = "test-service"; - let svc = k8s::Service { - metadata: k8s::ObjectMeta { - namespace: Some(ns.clone()), - name: Some(svc_name.to_string()), - ..Default::default() - }, - spec: Some(k8s::ServiceSpec { - type_: Some("ClusterIP".to_string()), - ports: Some(vec![k8s::ServicePort { - port: 80, - ..Default::default() - }]), - ..Default::default() - }), - ..k8s::Service::default() - }; - let svc = create(&client, svc).await; - let svc_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), - section_name: None, - port: Some(80), - }]; - - // Create a route that references the Service resource. - let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; - // Wait until route is updated with a status - let statuses = await_route_status(&client, &ns, "test-route").await.parents; - - let route_status = statuses - .clone() - .into_iter() - .find(|route_status| route_status.parent_ref.name == svc_name) - .expect("must have at least one parent status"); - - // Check status references to parent we have created - assert_eq!(route_status.parent_ref.group.as_deref(), Some("core")); - assert_eq!(route_status.parent_ref.kind.as_deref(), Some("Service")); - - // Check status is accepted with a status of 'True' - let cond = find_route_condition(&statuses, svc_name) - .expect("must have at least one 'Accepted' condition for accepted servuce"); - assert_eq!(cond.status, "True"); - assert_eq!(cond.reason, "Accepted") - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn no_cluster_ip() { - with_temp_ns(|client, ns| async move { - // Create a parent Service - let svc = k8s::Service { - metadata: k8s::ObjectMeta { - namespace: Some(ns.clone()), - name: Some("test-service".to_string()), - ..Default::default() - }, - spec: Some(k8s::ServiceSpec { - cluster_ip: Some("None".to_string()), - type_: Some("ClusterIP".to_string()), - ports: Some(vec![k8s::ServicePort { - port: 80, - ..Default::default() - }]), - ..Default::default() - }), - ..k8s::Service::default() - }; - let svc = create(&client, svc).await; - let svc_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), - section_name: None, - port: Some(80), - }]; - - // Create a route that references the Service resource. - let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; - // Wait until route is updated with a status - let status = await_route_status(&client, &ns, "test-route").await; - let cond = find_route_condition(&status.parents, "test-service") - .expect("must have at least one 'Accepted' condition set for parent"); - // Parent with no ClusterIP should not match. - assert_eq!(cond.status, "False"); - assert_eq!(cond.reason, "NoMatchingParent"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn external_name() { - with_temp_ns(|client, ns| async move { - // Create a parent Service - let svc = k8s::Service { - metadata: k8s::ObjectMeta { - namespace: Some(ns.clone()), - name: Some("test-service".to_string()), - ..Default::default() - }, - spec: Some(k8s::ServiceSpec { - type_: Some("ExternalName".to_string()), - external_name: Some("linkerd.io".to_string()), - ports: Some(vec![k8s::ServicePort { - port: 80, - ..Default::default() - }]), - ..Default::default() - }), - ..k8s::Service::default() - }; - let svc = create(&client, svc).await; - let svc_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), - section_name: None, - port: Some(80), - }]; - - // Create a route that references the Service resource. - let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; - // Wait until route is updated with a status - let status = await_route_status(&client, &ns, "test-route").await; - let cond = find_route_condition(&status.parents, "test-service") - .expect("must have at least one 'Accepted' condition set for parent"); - // Parent with ExternalName should not match. - assert_eq!(cond.status, "False"); - assert_eq!(cond.reason, "NoMatchingParent"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn multiple_statuses() { - with_temp_ns(|client, ns| async move { - // Create a parent Service - let svc_name = "test-service"; - let svc = k8s::Service { - metadata: k8s::ObjectMeta { - namespace: Some(ns.clone()), - name: Some(svc_name.to_string()), - ..Default::default() - }, - spec: Some(k8s::ServiceSpec { - type_: Some("ClusterIP".to_string()), - ports: Some(vec![k8s::ServicePort { - port: 80, - ..Default::default() - }]), - ..Default::default() - }), - ..k8s::Service::default() - }; - let svc = create(&client, svc).await; - let svc_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), - section_name: None, - port: Some(80), - }]; - - // Create a route that references the Service resource. - let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; - - // Patch a status onto the HttpRoute. - let value = serde_json::json!({ - "apiVersion": "policy.linkerd.io", - "kind": "HTTPRoute", - "name": "test-route", - "status": k8s::policy::httproute::HttpRouteStatus { - inner: RouteStatus { - parents: vec![RouteParentStatus { - conditions: vec![Condition { - last_transition_time: k8s::Time(Utc::now()), - message: "".to_string(), - observed_generation: None, - reason: "Accepted".to_string(), - status: "True".to_string(), - type_: "Accepted".to_string(), - }], - controller_name: "someone/else".to_string(), - parent_ref: ParentReference { - group: Some("gateway.networking.k8s.io".to_string()), - name: "foo".to_string(), - kind: Some("Gateway".to_string()), - namespace: Some("bar".to_string()), - port: None, - section_name: None, - }, - }], - }, - }, - }); - let patch = k8s::Patch::Merge(value); - let patch_params = k8s::PatchParams::apply("someone/else"); - let api = k8s::Api::::namespaced(client.clone(), &ns); - api.patch_status("test-route", &patch_params, &patch) - .await - .expect("failed to patch status"); - - await_condition( - &client, - &ns, - "test-route", - |obj: Option<&k8s::policy::HttpRoute>| -> bool { - obj.and_then(|route| route.status.as_ref()) - .map(|status| { - let statuses = &status.inner.parents; - - let other_status_found = statuses - .iter() - .any(|route_status| route_status.controller_name == "someone/else"); - - let linkerd_status_found = statuses.iter().any(|route_status| { - route_status.controller_name == POLICY_CONTROLLER_NAME - }); - - other_status_found && linkerd_status_found - }) - .unwrap_or(false) - }, - ) - .await - .expect("must have both statuses"); - }) - .await; -} +// use k8s::Condition; +// use k8s_gateway_api::{ParentReference, RouteParentStatus, RouteStatus}; +// use k8s_openapi::chrono::Utc; +// use kube::ResourceExt; +// use linkerd_policy_controller_core::POLICY_CONTROLLER_NAME; +// use linkerd_policy_controller_k8s_api as k8s; +// use linkerd_policy_test::{ +// await_condition, await_route_status, create, find_route_condition, mk_route, with_temp_ns, +// }; + +// #[tokio::test(flavor = "current_thread")] +// async fn accepted_parent() { +// with_temp_ns(|client, ns| async move { +// // Create a parent Service +// let svc_name = "test-service"; +// let svc = k8s::Service { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.clone()), +// name: Some(svc_name.to_string()), +// ..Default::default() +// }, +// spec: Some(k8s::ServiceSpec { +// type_: Some("ClusterIP".to_string()), +// ports: Some(vec![k8s::ServicePort { +// port: 80, +// ..Default::default() +// }]), +// ..Default::default() +// }), +// ..k8s::Service::default() +// }; +// let svc = create(&client, svc).await; +// let svc_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("core".to_string()), +// kind: Some("Service".to_string()), +// namespace: svc.namespace(), +// name: svc.name_unchecked(), +// section_name: None, +// port: Some(80), +// }]; + +// // Create a route that references the Service resource. +// let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; +// // Wait until route is updated with a status +// let statuses = await_route_status(&client, &ns, "test-route").await.parents; + +// let route_status = statuses +// .clone() +// .into_iter() +// .find(|route_status| route_status.parent_ref.name == svc_name) +// .expect("must have at least one parent status"); + +// // Check status references to parent we have created +// assert_eq!(route_status.parent_ref.group.as_deref(), Some("core")); +// assert_eq!(route_status.parent_ref.kind.as_deref(), Some("Service")); + +// // Check status is accepted with a status of 'True' +// let cond = find_route_condition(&statuses, svc_name) +// .expect("must have at least one 'Accepted' condition for accepted servuce"); +// assert_eq!(cond.status, "True"); +// assert_eq!(cond.reason, "Accepted") +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn no_cluster_ip() { +// with_temp_ns(|client, ns| async move { +// // Create a parent Service +// let svc = k8s::Service { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.clone()), +// name: Some("test-service".to_string()), +// ..Default::default() +// }, +// spec: Some(k8s::ServiceSpec { +// cluster_ip: Some("None".to_string()), +// type_: Some("ClusterIP".to_string()), +// ports: Some(vec![k8s::ServicePort { +// port: 80, +// ..Default::default() +// }]), +// ..Default::default() +// }), +// ..k8s::Service::default() +// }; +// let svc = create(&client, svc).await; +// let svc_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("core".to_string()), +// kind: Some("Service".to_string()), +// namespace: svc.namespace(), +// name: svc.name_unchecked(), +// section_name: None, +// port: Some(80), +// }]; + +// // Create a route that references the Service resource. +// let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; +// // Wait until route is updated with a status +// let status = await_route_status(&client, &ns, "test-route").await; +// let cond = find_route_condition(&status.parents, "test-service") +// .expect("must have at least one 'Accepted' condition set for parent"); +// // Parent with no ClusterIP should not match. +// assert_eq!(cond.status, "False"); +// assert_eq!(cond.reason, "NoMatchingParent"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn external_name() { +// with_temp_ns(|client, ns| async move { +// // Create a parent Service +// let svc = k8s::Service { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.clone()), +// name: Some("test-service".to_string()), +// ..Default::default() +// }, +// spec: Some(k8s::ServiceSpec { +// type_: Some("ExternalName".to_string()), +// external_name: Some("linkerd.io".to_string()), +// ports: Some(vec![k8s::ServicePort { +// port: 80, +// ..Default::default() +// }]), +// ..Default::default() +// }), +// ..k8s::Service::default() +// }; +// let svc = create(&client, svc).await; +// let svc_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("core".to_string()), +// kind: Some("Service".to_string()), +// namespace: svc.namespace(), +// name: svc.name_unchecked(), +// section_name: None, +// port: Some(80), +// }]; + +// // Create a route that references the Service resource. +// let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; +// // Wait until route is updated with a status +// let status = await_route_status(&client, &ns, "test-route").await; +// let cond = find_route_condition(&status.parents, "test-service") +// .expect("must have at least one 'Accepted' condition set for parent"); +// // Parent with ExternalName should not match. +// assert_eq!(cond.status, "False"); +// assert_eq!(cond.reason, "NoMatchingParent"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn multiple_statuses() { +// with_temp_ns(|client, ns| async move { +// // Create a parent Service +// let svc_name = "test-service"; +// let svc = k8s::Service { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.clone()), +// name: Some(svc_name.to_string()), +// ..Default::default() +// }, +// spec: Some(k8s::ServiceSpec { +// type_: Some("ClusterIP".to_string()), +// ports: Some(vec![k8s::ServicePort { +// port: 80, +// ..Default::default() +// }]), +// ..Default::default() +// }), +// ..k8s::Service::default() +// }; +// let svc = create(&client, svc).await; +// let svc_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("core".to_string()), +// kind: Some("Service".to_string()), +// namespace: svc.namespace(), +// name: svc.name_unchecked(), +// section_name: None, +// port: Some(80), +// }]; + +// // Create a route that references the Service resource. +// let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; + +// // Patch a status onto the HttpRoute. +// let value = serde_json::json!({ +// "apiVersion": "policy.linkerd.io", +// "kind": "HTTPRoute", +// "name": "test-route", +// "status": k8s::policy::httproute::HttpRouteStatus { +// inner: RouteStatus { +// parents: vec![RouteParentStatus { +// conditions: vec![Condition { +// last_transition_time: k8s::Time(Utc::now()), +// message: "".to_string(), +// observed_generation: None, +// reason: "Accepted".to_string(), +// status: "True".to_string(), +// type_: "Accepted".to_string(), +// }], +// controller_name: "someone/else".to_string(), +// parent_ref: ParentReference { +// group: Some("gateway.networking.k8s.io".to_string()), +// name: "foo".to_string(), +// kind: Some("Gateway".to_string()), +// namespace: Some("bar".to_string()), +// port: None, +// section_name: None, +// }, +// }], +// }, +// }, +// }); +// let patch = k8s::Patch::Merge(value); +// let patch_params = k8s::PatchParams::apply("someone/else"); +// let api = k8s::Api::::namespaced(client.clone(), &ns); +// api.patch_status("test-route", &patch_params, &patch) +// .await +// .expect("failed to patch status"); + +// await_condition( +// &client, +// &ns, +// "test-route", +// |obj: Option<&k8s::policy::HttpRoute>| -> bool { +// obj.and_then(|route| route.status.as_ref()) +// .map(|status| { +// let statuses = &status.inner.parents; + +// let other_status_found = statuses +// .iter() +// .any(|route_status| route_status.controller_name == "someone/else"); + +// let linkerd_status_found = statuses.iter().any(|route_status| { +// route_status.controller_name == POLICY_CONTROLLER_NAME +// }); + +// other_status_found && linkerd_status_found +// }) +// .unwrap_or(false) +// }, +// ) +// .await +// .expect("must have both statuses"); +// }) +// .await; +// }