From ee11a7855843eff68b99ff3bd5e91645f345999f Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Fri, 13 Dec 2024 18:29:07 +0000 Subject: [PATCH 1/9] WIP Signed-off-by: Alex Leong --- policy-test/src/grpc.rs | 16 - policy-test/src/lib.rs | 43 +- policy-test/src/outbound_api.rs | 95 +-- policy-test/src/test_route.rs | 736 ++++++++++++++++++++++ policy-test/tests/outbound_api.rs | 271 ++++++++ policy-test/tests/outbound_api_gateway.rs | 40 -- policy-test/tests/outbound_api_linkerd.rs | 17 - 7 files changed, 1063 insertions(+), 155 deletions(-) create mode 100644 policy-test/src/test_route.rs create mode 100644 policy-test/tests/outbound_api.rs diff --git a/policy-test/src/grpc.rs b/policy-test/src/grpc.rs index b640c0b26724c..949c5afde2f23 100644 --- a/policy-test/src/grpc.rs +++ b/policy-test/src/grpc.rs @@ -291,22 +291,6 @@ impl OutboundPolicyClient { Ok(rsp.into_inner()) } - pub async fn watch( - &mut self, - ns: &str, - svc: &k8s::Service, - port: u16, - ) -> Result, tonic::Status> { - let address = svc - .spec - .as_ref() - .expect("Service must have a spec") - .cluster_ip - .as_ref() - .expect("Service must have a cluster ip"); - self.watch_ip(ns, address, port).await - } - pub async fn watch_ip( &mut self, ns: &str, diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index da69d55a10d40..f0c9ce233d038 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -6,6 +6,7 @@ pub mod bb; pub mod curl; pub mod grpc; pub mod outbound_api; +pub mod test_route; pub mod web; use kube::runtime::wait::Condition; @@ -15,6 +16,7 @@ use linkerd_policy_controller_k8s_api::{ ResourceExt, }; use maplit::{btreemap, convert_args}; +use test_route::TestRoute; use tokio::time; use tracing::Instrument; @@ -208,22 +210,23 @@ pub async fn await_pod_ip(client: &kube::Client, ns: &str, name: &str) -> std::n // Waits until an HttpRoute with the given namespace and name has a status set // on it, then returns the generic route status representation. -pub async fn await_route_status( +pub async fn await_route_status( client: &kube::Client, - ns: &str, - name: &str, -) -> k8s::policy::httproute::RouteStatus { - use k8s::policy::httproute as api; - let route_status = await_condition(client, ns, name, |obj: Option<&api::HttpRoute>| -> bool { - obj.and_then(|route| route.status.as_ref()).is_some() - }) + route: &R, +) -> Vec { + await_condition( + client, + &route.namespace().unwrap(), + &route.name_unchecked(), + |obj: Option<&R>| -> bool { obj.and_then(|route| route.conditions()).is_some() }, + ) .await .expect("must fetch route") - .status + .conditions() .expect("route must contain a status representation") - .inner; - tracing::trace!(?route_status, name, ns, "got route status"); - route_status + .into_iter() + .map(|c| c.clone()) + .collect() } // Waits until an HttpRoute with the given namespace and name has a status set @@ -591,17 +594,21 @@ pub fn mk_egress_net(ns: &str, name: &str) -> k8s::policy::EgressNetwork { } #[track_caller] -pub fn assert_resource_meta(meta: &Option, resource: &Resource, port: u16) { +pub fn assert_resource_meta( + meta: &Option, + parent_ref: ParentReference, + port: u16, +) { println!("meta: {:?}", meta); - tracing::debug!(?meta, ?resource, port, "Asserting service metadata"); + tracing::debug!(?meta, ?parent_ref, port, "Asserting parent metadata"); assert_eq!( meta, &Some(grpc::meta::Metadata { kind: Some(grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { - group: resource.group(), - kind: resource.kind(), - name: resource.name(), - namespace: resource.namespace(), + group: parent_ref.group.unwrap(), + kind: parent_ref.kind.unwrap(), + name: parent_ref.name, + namespace: parent_ref.namespace.unwrap(), section: "".to_string(), port: port.into() })), diff --git a/policy-test/src/outbound_api.rs b/policy-test/src/outbound_api.rs index d8ee90e2189a2..93c04f34b99be 100644 --- a/policy-test/src/outbound_api.rs +++ b/policy-test/src/outbound_api.rs @@ -1,4 +1,5 @@ -use crate::{assert_resource_meta, grpc, Resource}; +use crate::{grpc, test_route::TestRoute, Resource}; +use k8s_gateway_api::ParentReference; use kube::ResourceExt; use std::time::Duration; use tokio::time; @@ -6,22 +7,17 @@ use tokio::time; pub async fn retry_watch_outbound_policy( client: &kube::Client, ns: &str, - resource: &Resource, + ip: &str, port: u16, ) -> tonic::Streaming { // Port-forward to the control plane and start watching the service's // outbound policy. let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(client).await; loop { - match policy_api.watch_ip(ns, &resource.ip(), port).await { + match policy_api.watch_ip(ns, ip, port).await { Ok(rx) => return rx, Err(error) => { - tracing::error!( - ?error, - ns, - resource = resource.name(), - "failed to watch outbound policy for port 4191" - ); + tracing::error!(?error, ns, ip, port, "failed to watch outbound policy"); time::sleep(Duration::from_secs(1)).await; } } @@ -291,26 +287,23 @@ pub fn assert_backend_has_failure_filter( } #[track_caller] -pub fn assert_route_is_default(route: &grpc::outbound::HttpRoute, parent: &Resource, port: u16) { - let kind = route.metadata.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { +pub fn assert_route_is_default( + route: &R::Route, + parent: &ParentReference, + port: u16, +) { + let rules = &R::rules_first_available(route); + let backends = assert_singleton(rules); + let backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference(backend, parent, port); + + let route_meta = R::extract_meta(route); + match route_meta.kind.as_ref().unwrap() { grpc::meta::metadata::Kind::Default(_) => {} grpc::meta::metadata::Kind::Resource(r) => { panic!("route expected to be default but got resource {r:?}") } } - - let backends = route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend, parent, port); - - let rule = assert_singleton(&route.rules); - let route_match = assert_singleton(&rule.matches); - let path_match = route_match.path.as_ref().unwrap().kind.as_ref().unwrap(); - assert_eq!( - *path_match, - grpc::http_route::path_match::Kind::Prefix("/".to_string()) - ); } #[track_caller] @@ -330,49 +323,23 @@ pub fn assert_tls_route_is_default(route: &grpc::outbound::TlsRoute, parent: &Re } #[track_caller] -pub fn assert_backend_matches_parent( - backend: &grpc::outbound::http_route::RouteBackend, - parent: &Resource, +pub fn assert_backend_matches_reference( + backend: &grpc::outbound::Backend, + obj_ref: &ParentReference, port: u16, ) { - let backend = backend.backend.as_ref().unwrap(); - - match parent { - Resource::Service(svc) => { - let dst = match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Balancer(balance) => { - let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { - grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, - } - } - grpc::outbound::backend::Kind::Forward(_) => { - panic!("service default route backend must be Balancer") - } - }; - assert_eq!( - *dst, - format!( - "{}.{}.svc.{}:{}", - svc.name_unchecked(), - svc.namespace().unwrap(), - "cluster.local", - port - ) - ); + match backend.metadata.as_ref().unwrap().kind.as_ref().unwrap() { + grpc::meta::metadata::Kind::Resource(resource) => { + assert_eq!(resource.name, obj_ref.name); + assert_eq!(Some(&resource.namespace), obj_ref.namespace.as_ref()); + assert_eq!(Some(&resource.group), obj_ref.group.as_ref()); + assert_eq!(Some(&resource.kind), obj_ref.kind.as_ref()); + assert_eq!(resource.port, u32::from(port)); } - - Resource::EgressNetwork(_) => { - match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Forward(_) => {} - grpc::outbound::backend::Kind::Balancer(_) => { - panic!("egress net default route backend must be Forward") - } - }; + grpc::meta::metadata::Kind::Default(_) => { + panic!("backend expected to be resource but got default") } } - - assert_resource_meta(&backend.metadata, parent, port) } #[track_caller] @@ -418,7 +385,7 @@ pub fn assert_tls_backend_matches_parent( } } - assert_resource_meta(&backend.metadata, parent, port) + //assert_resource_meta(&backend.metadata, parent, port) } #[track_caller] @@ -464,7 +431,7 @@ pub fn assert_tcp_backend_matches_parent( } } - assert_resource_meta(&backend.metadata, parent, port) + //assert_resource_meta(&backend.metadata, parent, port) } #[track_caller] diff --git a/policy-test/src/test_route.rs b/policy-test/src/test_route.rs new file mode 100644 index 0000000000000..acae070142fd3 --- /dev/null +++ b/policy-test/src/test_route.rs @@ -0,0 +1,736 @@ +use k8s_gateway_api::{self as gateway, BackendRef, ParentReference}; +use k8s_openapi::Resource; +use linkerd2_proxy_api::{meta, meta::Metadata, outbound}; +use linkerd_policy_controller_k8s_api::{ + self as k8s, policy, Condition, Resource as _, ResourceExt, +}; + +use crate::outbound_api::{detect_http_routes, grpc_routes, tcp_routes, tls_routes}; + +pub trait TestRoute: + kube::Resource + + serde::Serialize + + serde::de::DeserializeOwned + + Clone + + std::fmt::Debug + + Send + + Sync + + 'static +{ + type Route; + type Backend; + type Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self; + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[Self::Route]); + fn extract_meta(route: &Self::Route) -> &Metadata; + fn backend_filters(backend: &Self::Backend) -> Vec<&Self::Filter>; + fn rules_first_available(route: &Self::Route) -> Vec>; + fn rules_random_available(route: &Self::Route) -> Vec>; + fn backend(backend: &Self::Backend) -> &outbound::Backend; + fn conditions(&self) -> Option>; + + fn meta_eq(&self, meta: &Metadata) -> bool { + let meta = match &meta.kind { + Some(meta::metadata::Kind::Resource(r)) => r, + _ => return false, + }; + let dt = Default::default(); + self.meta().name.as_ref() == Some(&meta.name) + && self.meta().namespace.as_ref() == Some(&meta.namespace) + && Self::kind(&dt) == meta.kind + && Self::group(&dt) == meta.group + } +} + +pub trait TestParent: + kube::Resource + + serde::Serialize + + serde::de::DeserializeOwned + + Clone + + std::fmt::Debug + + Send + + Sync +{ + fn make_parent(ns: impl ToString) -> Self; + fn make_backend(ns: impl ToString) -> Self; + fn conditions(&self) -> Vec<&Condition>; + fn obj_ref(&self) -> ParentReference; + fn ip(&self) -> &str; +} + +impl TestRoute for gateway::HttpRoute { + type Route = outbound::HttpRoute; + type Backend = outbound::http_route::RouteBackend; + type Filter = outbound::http_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| { + let backends = backends + .into_iter() + .map(|backend| gateway::HttpBackendRef { + backend_ref: Some(backend), + filters: None, + }) + .collect(); + gateway::HttpRouteRule { + matches: Some(vec![]), + filters: None, + backend_refs: Some(backends), + } + }) + .collect(); + gateway::HttpRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: gateway::HttpRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + hostnames: None, + rules: Some(rules), + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::HttpRoute]), + { + detect_http_routes(config, f); + } + + fn extract_meta(route: &outbound::HttpRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::http_route::RouteBackend, + ) -> Vec<&outbound::http_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::HttpRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::http_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::HttpRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::http_route::distribution::Kind::RandomAvailable(random_available) => { + random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::http_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .map(|parent_status| &parent_status.conditions) + .flatten() + .collect() + }) + } +} + +impl TestRoute for policy::HttpRoute { + type Route = outbound::HttpRoute; + type Backend = outbound::http_route::RouteBackend; + type Filter = outbound::http_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| { + let backends = backends + .into_iter() + .map(|backend| gateway::HttpBackendRef { + backend_ref: Some(backend), + filters: None, + }) + .collect(); + policy::httproute::HttpRouteRule { + matches: Some(vec![]), + filters: None, + timeouts: None, + backend_refs: Some(backends), + } + }) + .collect(); + policy::HttpRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: policy::HttpRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + hostnames: None, + rules: Some(rules), + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::HttpRoute]), + { + detect_http_routes(config, f); + } + + fn extract_meta(route: &outbound::HttpRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::http_route::RouteBackend, + ) -> Vec<&outbound::http_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::HttpRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::http_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::HttpRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::http_route::distribution::Kind::RandomAvailable(random_available) => { + random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::http_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .map(|parent_status| &parent_status.conditions) + .flatten() + .collect() + }) + } +} + +impl TestRoute for gateway::GrpcRoute { + type Route = outbound::GrpcRoute; + type Backend = outbound::grpc_route::RouteBackend; + type Filter = outbound::grpc_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| { + let backends = backends + .into_iter() + .map(|backend| gateway::GrpcRouteBackendRef { + filters: None, + inner: backend.inner, + weight: None, + }) + .collect(); + gateway::GrpcRouteRule { + matches: Some(vec![]), + filters: None, + backend_refs: Some(backends), + } + }) + .collect(); + gateway::GrpcRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: gateway::GrpcRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + hostnames: None, + rules: Some(rules), + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::GrpcRoute]), + { + f(grpc_routes(config)); + } + + fn extract_meta(route: &outbound::GrpcRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::grpc_route::RouteBackend, + ) -> Vec<&outbound::grpc_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::GrpcRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::grpc_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::GrpcRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::grpc_route::distribution::Kind::RandomAvailable(random_available) => { + random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::grpc_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .map(|parent_status| &parent_status.conditions) + .flatten() + .collect() + }) + } +} + +impl TestRoute for gateway::TlsRoute { + type Route = outbound::TlsRoute; + type Backend = outbound::tls_route::RouteBackend; + type Filter = outbound::tls_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| gateway::TlsRouteRule { + backend_refs: backends, + }) + .collect(); + gateway::TlsRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: gateway::TlsRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + hostnames: None, + rules, + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::TlsRoute]), + { + f(tls_routes(config)); + } + + fn extract_meta(route: &outbound::TlsRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::tls_route::RouteBackend, + ) -> Vec<&outbound::tls_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::TlsRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::tls_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::TlsRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::tls_route::distribution::Kind::RandomAvailable(random_available) => { + random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::tls_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .map(|parent_status| &parent_status.conditions) + .flatten() + .collect() + }) + } +} + +impl TestRoute for gateway::TcpRoute { + type Route = outbound::OpaqueRoute; + type Backend = outbound::opaque_route::RouteBackend; + type Filter = outbound::opaque_route::Filter; + + fn make_route( + ns: impl ToString, + parents: Vec, + rules: Vec>, + ) -> Self { + let rules = rules + .into_iter() + .map(|backends| gateway::TcpRouteRule { + backend_refs: backends, + }) + .collect(); + gateway::TcpRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("foo-route".to_string()), + ..Default::default() + }, + spec: gateway::TcpRouteSpec { + inner: gateway::CommonRouteSpec { + parent_refs: Some(parents), + }, + rules, + }, + status: None, + } + } + + fn routes(config: &outbound::OutboundPolicy, f: F) + where + F: Fn(&[outbound::OpaqueRoute]), + { + f(tcp_routes(config)); + } + + fn extract_meta(route: &outbound::OpaqueRoute) -> &Metadata { + route.metadata.as_ref().unwrap() + } + + fn backend_filters( + backend: &outbound::opaque_route::RouteBackend, + ) -> Vec<&outbound::opaque_route::Filter> { + backend.filters.iter().collect() + } + + fn rules_first_available( + route: &outbound::OpaqueRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::opaque_route::distribution::Kind::FirstAvailable(first_available) => { + first_available.backends.iter().collect() + } + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn rules_random_available( + route: &outbound::OpaqueRoute, + ) -> Vec> { + route + .rules + .iter() + .map( + |rule| match rule.backends.as_ref().unwrap().kind.as_ref().unwrap() { + outbound::opaque_route::distribution::Kind::RandomAvailable( + random_available, + ) => random_available + .backends + .iter() + .map(|backend| backend.backend.as_ref().unwrap()) + .collect(), + _ => panic!("unexpected distribution kind"), + }, + ) + .collect() + } + + fn backend(backend: &outbound::opaque_route::RouteBackend) -> &outbound::Backend { + backend.backend.as_ref().unwrap() + } + + fn conditions(&self) -> Option> { + self.status.as_ref().map(|status| { + status + .inner + .parents + .iter() + .map(|parent_status| &parent_status.conditions) + .flatten() + .collect() + }) + } +} + +impl TestParent for k8s::Service { + fn make_parent(ns: impl ToString) -> Self { + k8s::Service { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("my-svc".to_string()), + ..Default::default() + }, + spec: Some(k8s::ServiceSpec { + ports: Some(vec![k8s::ServicePort { + port: 4191, + ..Default::default() + }]), + cluster_ip: Some("192.168.0.2".to_string()), + ..Default::default() + }), + ..k8s::Service::default() + } + } + + fn make_backend(ns: impl ToString) -> Self { + k8s::Service { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("backend".to_string()), + ..Default::default() + }, + spec: Some(k8s::ServiceSpec { + ports: Some(vec![k8s::ServicePort { + port: 4191, + ..Default::default() + }]), + cluster_ip: Some("192.168.0.3".to_string()), + ..Default::default() + }), + ..k8s::Service::default() + } + } + + fn conditions(&self) -> Vec<&Condition> { + self.status + .as_ref() + .unwrap() + .conditions + .as_ref() + .unwrap() + .iter() + .collect() + } + + fn obj_ref(&self) -> ParentReference { + ParentReference { + kind: Some(k8s::Service::KIND.to_string()), + name: self.name_unchecked(), + namespace: self.namespace(), + group: Some(k8s::Service::GROUP.to_string()), + section_name: None, + port: Some(4191), + } + } + + fn ip(&self) -> &str { + self.spec.as_ref().unwrap().cluster_ip.as_ref().unwrap() + } +} + +impl TestParent for policy::EgressNetwork { + fn make_parent(ns: impl ToString) -> Self { + policy::EgressNetwork { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("my-egress".to_string()), + ..Default::default() + }, + spec: policy::EgressNetworkSpec { + networks: None, + traffic_policy: policy::egress_network::TrafficPolicy::Allow, + }, + status: None, + } + } + + fn make_backend(ns: impl ToString) -> Self { + Self::make_parent(ns) + } + + fn conditions(&self) -> Vec<&Condition> { + self.status.as_ref().unwrap().conditions.iter().collect() + } + + fn obj_ref(&self) -> ParentReference { + ParentReference { + kind: Some(policy::EgressNetwork::kind(&()).to_string()), + name: self.name_unchecked(), + namespace: self.namespace(), + group: Some(k8s::Service::GROUP.to_string()), + section_name: None, + port: Some(4191), + } + } + + fn ip(&self) -> &str { + // For EgressNetwork, we can just return a non-private + // IP address as our default cluster setup dictates that + // all non-private networks are considered egress. Since + // we do not modify this setting in tests for the time being, + // returning 1.1.1.1 is fine. + "1.1.1.1" + } +} diff --git a/policy-test/tests/outbound_api.rs b/policy-test/tests/outbound_api.rs new file mode 100644 index 0000000000000..24551b838c735 --- /dev/null +++ b/policy-test/tests/outbound_api.rs @@ -0,0 +1,271 @@ +use futures::StreamExt; +use k8s_gateway_api::{self as gateway}; +use linkerd_policy_controller_k8s_api::{self as k8s, policy, ResourceExt}; +use linkerd_policy_test::{ + assert_resource_meta, assert_status_accepted, await_route_status, create, grpc, + outbound_api::{ + assert_backend_matches_reference, assert_route_is_default, assert_singleton, + retry_watch_outbound_policy, + }, + test_route::{TestParent, TestRoute}, + with_temp_ns, +}; + +#[tokio::test(flavor = "current_thread")] +async fn parent_does_not_exist() { + async fn test() { + with_temp_ns(|client, ns| async move { + let port = 4191; + // Build a parent but don't apply it to the cluster. + let parent = P::make_parent(&ns); + + let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; + let rsp: Result, tonic::Status> = + policy_api.watch_ip(&ns, parent.ip(), port).await; + + assert!(rsp.is_err()); + assert_eq!(rsp.err().unwrap().code(), tonic::Code::NotFound); + }) + .await + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn parent_with_no_routes() { + async fn test() { + with_temp_ns(|client, ns| async move { + let port = 4191; + // Create a parent with no routes. + let parent = P::make_parent(&ns); + create(&client, parent.clone()).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + R::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn http_route_with_no_rules() { + async fn test() { + with_temp_ns(|client, ns| async move { + let port = 4191; + let parent = P::make_parent(&ns); + create(&client, parent.clone()).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + R::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let route = R::make_route(ns.clone(), vec![parent.obj_ref()], vec![]); + let _ = create(&client, route.clone()).await; + let status = await_route_status(&client, &route).await; + assert_status_accepted(status); + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with no rules. + R::routes(&config, |routes| { + let outbound_route = assert_singleton(routes); + let rules = &R::rules_first_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + assert!(rules.is_empty()); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn http_routes_without_backends() { + async fn test() { + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = P::make_parent(&ns); + create(&client, parent.clone()).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + R::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // Create a route with one rule with no backends. + let route = R::make_route(ns.clone(), vec![parent.obj_ref()], vec![vec![]]); + let _ = create(&client, route.clone()).await; + let status = await_route_status(&client, &route).await; + assert_status_accepted(status); + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with the logical backend. + R::routes(&config, |routes| { + let outbound_route = assert_singleton(routes); + let rules = &R::rules_first_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + let backends = assert_singleton(rules); + let backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference(backend, &parent.obj_ref(), port); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn routes_with_backend() { + async fn test() { + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = P::make_parent(&ns); + create(&client, parent.clone()).await; + + // Create a backend + let backend_port = 8888; + let backend = P::make_backend(&ns); + create(&client, backend.clone()).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + R::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let dt = Default::default(); + let route = R::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![gateway::BackendRef { + weight: None, + inner: gateway::BackendObjectReference { + group: Some(P::group(&dt).to_string()), + kind: Some(P::kind(&dt).to_string()), + name: backend.name_unchecked(), + namespace: backend.namespace(), + port: Some(backend_port), + }, + }]], + ); + create(&client, route.clone()).await; + let status = await_route_status(&client, &route).await; + assert_status_accepted(status); + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with a backend with no filters. + R::routes(&config, |routes| { + let outbound_route = assert_singleton(routes); + let rules = &R::rules_random_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + let backends = assert_singleton(rules); + + let filters = R::backend_filters(*assert_singleton(backends)); + assert!(filters.is_empty()); + + let outbound_backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference( + outbound_backend, + &backend.obj_ref(), + backend_port, + ); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs index 1e96c40f92b9c..4a07158a99b80 100644 --- a/policy-test/tests/outbound_api_gateway.rs +++ b/policy-test/tests/outbound_api_gateway.rs @@ -16,46 +16,6 @@ use std::{collections::BTreeMap, time::Duration}; // These two files should be kept in sync to ensure that Linkerd can read and // function correctly with both types of resources. -#[tokio::test(flavor = "current_thread")] -async fn service_does_not_exist() { - with_temp_ns(|client, ns| async move { - // Build a service but don't apply it to the cluster. - let mut svc = mk_service(&ns, "my-svc", 4191); - // Give it a bogus cluster ip. - svc.spec.as_mut().unwrap().cluster_ip = Some("192.168.0.2".to_string()); - - let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; - let rsp = policy_api.watch(&ns, &svc, 4191).await; - - assert!(rsp.is_err()); - assert_eq!(rsp.err().unwrap().code(), tonic::Code::NotFound); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_no_http_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_no_http_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_no_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_no_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - #[tokio::test(flavor = "current_thread")] async fn service_with_http_route_without_rules() { with_temp_ns(|client, ns| async move { diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index 4e1619c8b69cd..83f5ff7bd4535 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -17,23 +17,6 @@ use maplit::{btreemap, convert_args}; // These two files should be kept in sync to ensure that Linkerd can read and // function correctly with both types of resources. -#[tokio::test(flavor = "current_thread")] -async fn service_does_not_exist() { - with_temp_ns(|client, ns| async move { - // Build a service but don't apply it to the cluster. - let mut svc = mk_service(&ns, "my-svc", 4191); - // Give it a bogus cluster ip. - svc.spec.as_mut().unwrap().cluster_ip = Some("192.168.0.2".to_string()); - - let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; - let rsp = policy_api.watch(&ns, &svc, 4191).await; - - assert!(rsp.is_err()); - assert_eq!(rsp.err().unwrap().code(), tonic::Code::NotFound); - }) - .await; -} - #[tokio::test(flavor = "current_thread")] async fn service_with_no_http_routes() { with_temp_ns(|client, ns| async move { From 0f3a9fd539b92b1567d28b2d2643a7d2f3bfe273 Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Tue, 17 Dec 2024 05:32:09 +0000 Subject: [PATCH 2/9] WIP; I need help with Send futures Signed-off-by: Alex Leong --- policy-test/src/lib.rs | 6 +- policy-test/src/outbound_api.rs | 6 +- policy-test/src/test_route.rs | 123 +- .../tests/inbound_http_route_status.rs | 658 ++- policy-test/tests/outbound_api.rs | 50 +- policy-test/tests/outbound_api_gateway.rs | 3969 ++++++++--------- policy-test/tests/outbound_api_grpc.rs | 627 ++- policy-test/tests/outbound_api_linkerd.rs | 3944 ++++++++-------- policy-test/tests/outbound_api_tcp.rs | 1287 +++--- policy-test/tests/outbound_api_tls.rs | 1299 +++--- .../tests/outbound_http_route_status.rs | 500 +-- 11 files changed, 6219 insertions(+), 6250 deletions(-) diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index f0c9ce233d038..73020517c1302 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -601,11 +601,15 @@ pub fn assert_resource_meta( ) { println!("meta: {:?}", meta); tracing::debug!(?meta, ?parent_ref, port, "Asserting parent metadata"); + let mut group = parent_ref.group.unwrap(); + if group.is_empty() { + group = "core".to_string(); + } assert_eq!( meta, &Some(grpc::meta::Metadata { kind: Some(grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { - group: parent_ref.group.unwrap(), + group, kind: parent_ref.kind.unwrap(), name: parent_ref.name, namespace: parent_ref.namespace.unwrap(), diff --git a/policy-test/src/outbound_api.rs b/policy-test/src/outbound_api.rs index 93c04f34b99be..e32447b93deb4 100644 --- a/policy-test/src/outbound_api.rs +++ b/policy-test/src/outbound_api.rs @@ -328,11 +328,15 @@ pub fn assert_backend_matches_reference( obj_ref: &ParentReference, port: u16, ) { + let mut group = obj_ref.group.as_deref(); + if group == Some("") { + group = Some("core"); + } match backend.metadata.as_ref().unwrap().kind.as_ref().unwrap() { grpc::meta::metadata::Kind::Resource(resource) => { assert_eq!(resource.name, obj_ref.name); assert_eq!(Some(&resource.namespace), obj_ref.namespace.as_ref()); - assert_eq!(Some(&resource.group), obj_ref.group.as_ref()); + assert_eq!(Some(resource.group.as_str()), group); assert_eq!(Some(&resource.kind), obj_ref.kind.as_ref()); assert_eq!(resource.port, u32::from(port)); } diff --git a/policy-test/src/test_route.rs b/policy-test/src/test_route.rs index acae070142fd3..a74f1deb3c782 100644 --- a/policy-test/src/test_route.rs +++ b/policy-test/src/test_route.rs @@ -1,11 +1,17 @@ +use std::future::Future; + use k8s_gateway_api::{self as gateway, BackendRef, ParentReference}; use k8s_openapi::Resource; +use kube::Client; use linkerd2_proxy_api::{meta, meta::Metadata, outbound}; use linkerd_policy_controller_k8s_api::{ self as k8s, policy, Condition, Resource as _, ResourceExt, }; -use crate::outbound_api::{detect_http_routes, grpc_routes, tcp_routes, tls_routes}; +use crate::{ + create, + outbound_api::{detect_http_routes, grpc_routes, tcp_routes, tls_routes}, +}; pub trait TestRoute: kube::Resource @@ -21,11 +27,12 @@ pub trait TestRoute: type Backend; type Filter; - fn make_route( + fn create_route( + client: &Client, ns: impl ToString, parents: Vec, rules: Vec>, - ) -> Self; + ) -> impl Future; fn routes(config: &outbound::OutboundPolicy, f: F) where F: Fn(&[Self::Route]); @@ -49,6 +56,7 @@ pub trait TestRoute: } } +#[allow(async_fn_in_trait)] pub trait TestParent: kube::Resource + serde::Serialize @@ -58,8 +66,9 @@ pub trait TestParent: + Send + Sync { + async fn create_parent(client: &Client, ns: impl ToString) -> Self; fn make_parent(ns: impl ToString) -> Self; - fn make_backend(ns: impl ToString) -> Self; + async fn create_backend(client: &Client, ns: impl ToString) -> Self; fn conditions(&self) -> Vec<&Condition>; fn obj_ref(&self) -> ParentReference; fn ip(&self) -> &str; @@ -70,7 +79,8 @@ impl TestRoute for gateway::HttpRoute { type Backend = outbound::http_route::RouteBackend; type Filter = outbound::http_route::Filter; - fn make_route( + async fn create_route( + client: &Client, ns: impl ToString, parents: Vec, rules: Vec>, @@ -92,7 +102,7 @@ impl TestRoute for gateway::HttpRoute { } }) .collect(); - gateway::HttpRoute { + let route = gateway::HttpRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -106,7 +116,8 @@ impl TestRoute for gateway::HttpRoute { rules: Some(rules), }, status: None, - } + }; + create(client, route).await } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -186,7 +197,8 @@ impl TestRoute for policy::HttpRoute { type Backend = outbound::http_route::RouteBackend; type Filter = outbound::http_route::Filter; - fn make_route( + async fn create_route( + client: &Client, ns: impl ToString, parents: Vec, rules: Vec>, @@ -209,7 +221,7 @@ impl TestRoute for policy::HttpRoute { } }) .collect(); - policy::HttpRoute { + let route = policy::HttpRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -223,7 +235,8 @@ impl TestRoute for policy::HttpRoute { rules: Some(rules), }, status: None, - } + }; + create(client, route).await } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -303,7 +316,8 @@ impl TestRoute for gateway::GrpcRoute { type Backend = outbound::grpc_route::RouteBackend; type Filter = outbound::grpc_route::Filter; - fn make_route( + async fn create_route( + client: &Client, ns: impl ToString, parents: Vec, rules: Vec>, @@ -326,7 +340,7 @@ impl TestRoute for gateway::GrpcRoute { } }) .collect(); - gateway::GrpcRoute { + let route = gateway::GrpcRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -340,7 +354,8 @@ impl TestRoute for gateway::GrpcRoute { rules: Some(rules), }, status: None, - } + }; + create(client, route).await } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -420,7 +435,8 @@ impl TestRoute for gateway::TlsRoute { type Backend = outbound::tls_route::RouteBackend; type Filter = outbound::tls_route::Filter; - fn make_route( + async fn create_route( + client: &Client, ns: impl ToString, parents: Vec, rules: Vec>, @@ -431,7 +447,7 @@ impl TestRoute for gateway::TlsRoute { backend_refs: backends, }) .collect(); - gateway::TlsRoute { + let route = gateway::TlsRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -445,7 +461,8 @@ impl TestRoute for gateway::TlsRoute { rules, }, status: None, - } + }; + create(client, route).await } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -525,7 +542,8 @@ impl TestRoute for gateway::TcpRoute { type Backend = outbound::opaque_route::RouteBackend; type Filter = outbound::opaque_route::Filter; - fn make_route( + async fn create_route( + client: &Client, ns: impl ToString, parents: Vec, rules: Vec>, @@ -536,7 +554,7 @@ impl TestRoute for gateway::TcpRoute { backend_refs: backends, }) .collect(); - gateway::TcpRoute { + let route = gateway::TcpRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -549,7 +567,8 @@ impl TestRoute for gateway::TcpRoute { rules, }, status: None, - } + }; + create(client, route).await } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -625,6 +644,25 @@ impl TestRoute for gateway::TcpRoute { } impl TestParent for k8s::Service { + async fn create_parent(client: &Client, ns: impl ToString) -> Self { + let service = k8s::Service { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("my-svc".to_string()), + ..Default::default() + }, + spec: Some(k8s::ServiceSpec { + ports: Some(vec![k8s::ServicePort { + port: 4191, + ..Default::default() + }]), + ..Default::default() + }), + ..k8s::Service::default() + }; + create(client, service).await + } + fn make_parent(ns: impl ToString) -> Self { k8s::Service { metadata: k8s::ObjectMeta { @@ -637,15 +675,14 @@ impl TestParent for k8s::Service { port: 4191, ..Default::default() }]), - cluster_ip: Some("192.168.0.2".to_string()), ..Default::default() }), ..k8s::Service::default() } } - fn make_backend(ns: impl ToString) -> Self { - k8s::Service { + async fn create_backend(client: &Client, ns: impl ToString) -> Self { + let service = k8s::Service { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("backend".to_string()), @@ -656,11 +693,11 @@ impl TestParent for k8s::Service { port: 4191, ..Default::default() }]), - cluster_ip: Some("192.168.0.3".to_string()), ..Default::default() }), ..k8s::Service::default() - } + }; + create(client, service).await } fn conditions(&self) -> Vec<&Condition> { @@ -690,24 +727,32 @@ impl TestParent for k8s::Service { } } +fn make_egress(ns: impl ToString) -> policy::EgressNetwork { + policy::EgressNetwork { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("my-egress".to_string()), + ..Default::default() + }, + spec: policy::EgressNetworkSpec { + networks: None, + traffic_policy: policy::egress_network::TrafficPolicy::Allow, + }, + status: None, + } +} + impl TestParent for policy::EgressNetwork { + async fn create_parent(client: &Client, ns: impl ToString) -> Self { + create(client, make_egress(ns)).await + } + fn make_parent(ns: impl ToString) -> Self { - policy::EgressNetwork { - metadata: k8s::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some("my-egress".to_string()), - ..Default::default() - }, - spec: policy::EgressNetworkSpec { - networks: None, - traffic_policy: policy::egress_network::TrafficPolicy::Allow, - }, - status: None, - } + make_egress(ns) } - fn make_backend(ns: impl ToString) -> Self { - Self::make_parent(ns) + async fn create_backend(_client: &Client, ns: impl ToString) -> Self { + make_egress(ns) } fn conditions(&self) -> Vec<&Condition> { @@ -719,7 +764,7 @@ impl TestParent for policy::EgressNetwork { kind: Some(policy::EgressNetwork::kind(&()).to_string()), name: self.name_unchecked(), namespace: self.namespace(), - group: Some(k8s::Service::GROUP.to_string()), + group: Some(policy::EgressNetwork::group(&()).to_string()), section_name: None, port: Some(4191), } diff --git a/policy-test/tests/inbound_http_route_status.rs b/policy-test/tests/inbound_http_route_status.rs index 51166aeed0c6e..f4d22cae3bd7e 100644 --- a/policy-test/tests/inbound_http_route_status.rs +++ b/policy-test/tests/inbound_http_route_status.rs @@ -1,351 +1,349 @@ -use kube::ResourceExt; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - await_condition, await_route_status, create, find_route_condition, mk_route, update, - with_temp_ns, -}; +// use kube::ResourceExt; +// use linkerd_policy_controller_k8s_api as k8s; +// use linkerd_policy_test::{ +// await_condition, await_route_status, create, find_route_condition, mk_route, update, +// with_temp_ns, +// }; -#[tokio::test(flavor = "current_thread")] -async fn inbound_accepted_parent() { - with_temp_ns(|client, ns| async move { - // Create a test 'Server' - let server_name = "test-accepted-server"; - let server = k8s::policy::Server { - metadata: k8s::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(server_name.to_string()), - ..Default::default() - }, - spec: k8s::policy::ServerSpec { - selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( - Some(("app", server_name)), - )), - port: k8s::policy::server::Port::Name("http".to_string()), - proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), - access_policy: None, - }, - }; - let server = create(&client, server).await; - let srv_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("policy.linkerd.io".to_string()), - kind: Some("Server".to_string()), - namespace: server.namespace(), - name: server.name_unchecked(), - section_name: None, - port: None, - }]; +// #[tokio::test(flavor = "current_thread")] +// async fn inbound_accepted_parent() { +// with_temp_ns(|client, ns| async move { +// // Create a test 'Server' +// let server_name = "test-accepted-server"; +// let server = k8s::policy::Server { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(server_name.to_string()), +// ..Default::default() +// }, +// spec: k8s::policy::ServerSpec { +// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( +// Some(("app", server_name)), +// )), +// port: k8s::policy::server::Port::Name("http".to_string()), +// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), +// access_policy: None, +// }, +// }; +// let server = create(&client, server).await; +// let srv_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("policy.linkerd.io".to_string()), +// kind: Some("Server".to_string()), +// namespace: server.namespace(), +// name: server.name_unchecked(), +// section_name: None, +// port: None, +// }]; - // Create a route that references the Server resource. - let _route = create(&client, mk_route(&ns, "test-accepted-route", Some(srv_ref))).await; - // Wait until route is updated with a status - let statuses = await_route_status(&client, &ns, "test-accepted-route") - .await - .parents; +// // Create a route that references the Server resource. +// let route = create(&client, mk_route(&ns, "test-accepted-route", Some(srv_ref))).await; +// // Wait until route is updated with a status +// let statuses = await_route_status(&client, &route).await.parents; - let route_status = statuses - .clone() - .into_iter() - .find(|route_status| route_status.parent_ref.name == server_name) - .expect("must have at least one parent status"); +// let route_status = statuses +// .clone() +// .into_iter() +// .find(|route_status| route_status.parent_ref.name == server_name) +// .expect("must have at least one parent status"); - // Check status references to parent we have created - assert_eq!( - route_status.parent_ref.group.as_deref(), - Some("policy.linkerd.io") - ); - assert_eq!(route_status.parent_ref.kind.as_deref(), Some("Server")); +// // Check status references to parent we have created +// assert_eq!( +// route_status.parent_ref.group.as_deref(), +// Some("policy.linkerd.io") +// ); +// assert_eq!(route_status.parent_ref.kind.as_deref(), Some("Server")); - // Check status is accepted with a status of 'True' - let cond = find_route_condition(&statuses, server_name) - .expect("must have at least one 'Accepted' condition for accepted server"); - assert_eq!(cond.status, "True"); - assert_eq!(cond.reason, "Accepted") - }) - .await; -} +// // Check status is accepted with a status of 'True' +// let cond = find_route_condition(&statuses, server_name) +// .expect("must have at least one 'Accepted' condition for accepted server"); +// assert_eq!(cond.status, "True"); +// assert_eq!(cond.reason, "Accepted") +// }) +// .await; +// } -#[tokio::test(flavor = "current_thread")] -async fn inbound_multiple_parents() { - with_temp_ns(|client, ns| async move { - // Exercise accepted test with a valid, and an invalid parent reference - let srv_refs = vec![ - k8s::policy::httproute::ParentReference { - group: Some("policy.linkerd.io".to_string()), - kind: Some("Server".to_string()), - namespace: Some(ns.clone()), - name: "test-valid-server".to_string(), - section_name: None, - port: None, - }, - k8s::policy::httproute::ParentReference { - group: Some("policy.linkerd.io".to_string()), - kind: Some("Server".to_string()), - namespace: Some(ns.clone()), - name: "test-invalid-server".to_string(), - section_name: None, - port: None, - }, - ]; +// #[tokio::test(flavor = "current_thread")] +// async fn inbound_multiple_parents() { +// with_temp_ns(|client, ns| async move { +// // Exercise accepted test with a valid, and an invalid parent reference +// let srv_refs = vec![ +// k8s::policy::httproute::ParentReference { +// group: Some("policy.linkerd.io".to_string()), +// kind: Some("Server".to_string()), +// namespace: Some(ns.clone()), +// name: "test-valid-server".to_string(), +// section_name: None, +// port: None, +// }, +// k8s::policy::httproute::ParentReference { +// group: Some("policy.linkerd.io".to_string()), +// kind: Some("Server".to_string()), +// namespace: Some(ns.clone()), +// name: "test-invalid-server".to_string(), +// section_name: None, +// port: None, +// }, +// ]; - // Create only one of the parents - let server = k8s::policy::Server { - metadata: k8s::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some("test-valid-server".to_string()), - ..Default::default() - }, - spec: k8s::policy::ServerSpec { - selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( - Some(("app", "test-valid-server")), - )), - port: k8s::policy::server::Port::Name("http".to_string()), - proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), - access_policy: None, - }, - }; - let _server = create(&client, server).await; +// // Create only one of the parents +// let server = k8s::policy::Server { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some("test-valid-server".to_string()), +// ..Default::default() +// }, +// spec: k8s::policy::ServerSpec { +// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( +// Some(("app", "test-valid-server")), +// )), +// port: k8s::policy::server::Port::Name("http".to_string()), +// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), +// access_policy: None, +// }, +// }; +// let _server = create(&client, server).await; - // Create a route that references both parents. - let _route = create( - &client, - mk_route(&ns, "test-multiple-parents-route", Some(srv_refs)), - ) - .await; - // Wait until route is updated with a status - let parent_status = await_route_status(&client, &ns, "test-multiple-parents-route") - .await - .parents; +// // Create a route that references both parents. +// let _route = create( +// &client, +// mk_route(&ns, "test-multiple-parents-route", Some(srv_refs)), +// ) +// .await; +// // Wait until route is updated with a status +// let parent_status = await_route_status(&client, &ns, "test-multiple-parents-route") +// .await +// .parents; - // Find status for invalid parent and extract the condition - let invalid_cond = find_route_condition(&parent_status, "test-invalid-server") - .expect("must have at least one 'Accepted' condition set for invalid parent"); - // Route shouldn't be accepted - assert_eq!(invalid_cond.status, "False"); - assert_eq!(invalid_cond.reason, "NoMatchingParent"); +// // Find status for invalid parent and extract the condition +// let invalid_cond = find_route_condition(&parent_status, "test-invalid-server") +// .expect("must have at least one 'Accepted' condition set for invalid parent"); +// // Route shouldn't be accepted +// assert_eq!(invalid_cond.status, "False"); +// assert_eq!(invalid_cond.reason, "NoMatchingParent"); - // Find status for valid parent and extract the condition - let valid_cond = find_route_condition(&parent_status, "test-valid-server") - .expect("must have at least one 'Accepted' condition set for valid parent"); - assert_eq!(valid_cond.status, "True"); - assert_eq!(valid_cond.reason, "Accepted") - }) - .await -} +// // Find status for valid parent and extract the condition +// let valid_cond = find_route_condition(&parent_status, "test-valid-server") +// .expect("must have at least one 'Accepted' condition set for valid parent"); +// assert_eq!(valid_cond.status, "True"); +// assert_eq!(valid_cond.reason, "Accepted") +// }) +// .await +// } -#[tokio::test(flavor = "current_thread")] -async fn inbound_no_parent_ref_patch() { - with_temp_ns(|client, ns| async move { - // Create a test 'Server' - let server_name = "test-accepted-server"; - let server = k8s::policy::Server { - metadata: k8s::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(server_name.to_string()), - ..Default::default() - }, - spec: k8s::policy::ServerSpec { - selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( - Some(("app", server_name)), - )), - port: k8s::policy::server::Port::Name("http".to_string()), - proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), - access_policy: None, - }, - }; - let server = create(&client, server).await; - let srv_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("policy.linkerd.io".to_string()), - kind: Some("Server".to_string()), - namespace: server.namespace(), - name: server.name_unchecked(), - section_name: None, - port: None, - }]; - // Create a route with a parent reference. - let route = create( - &client, - mk_route(&ns, "test-no-parent-refs-route", Some(srv_ref)), - ) - .await; +// #[tokio::test(flavor = "current_thread")] +// async fn inbound_no_parent_ref_patch() { +// with_temp_ns(|client, ns| async move { +// // Create a test 'Server' +// let server_name = "test-accepted-server"; +// let server = k8s::policy::Server { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(server_name.to_string()), +// ..Default::default() +// }, +// spec: k8s::policy::ServerSpec { +// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( +// Some(("app", server_name)), +// )), +// port: k8s::policy::server::Port::Name("http".to_string()), +// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), +// access_policy: None, +// }, +// }; +// let server = create(&client, server).await; +// let srv_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("policy.linkerd.io".to_string()), +// kind: Some("Server".to_string()), +// namespace: server.namespace(), +// name: server.name_unchecked(), +// section_name: None, +// port: None, +// }]; +// // Create a route with a parent reference. +// let route = create( +// &client, +// mk_route(&ns, "test-no-parent-refs-route", Some(srv_ref)), +// ) +// .await; - // Status may not be set straight away. To account for that, wrap a - // status condition watcher in a timeout. - let status = await_route_status(&client, &ns, "test-no-parent-refs-route").await; - // If timeout has elapsed, then route did not receive a status patch - assert!( - status.parents.len() == 1, - "HTTPRoute Status should have 1 parent status" - ); +// // Status may not be set straight away. To account for that, wrap a +// // status condition watcher in a timeout. +// let status = await_route_status(&client, &ns, "test-no-parent-refs-route").await; +// // If timeout has elapsed, then route did not receive a status patch +// assert!( +// status.parents.len() == 1, +// "HTTPRoute Status should have 1 parent status" +// ); - // Update route to remove parent_refs - let _route = update(&client, mk_route(&ns, "test-no-parent-refs-route", None)).await; +// // Update route to remove parent_refs +// let _route = update(&client, mk_route(&ns, "test-no-parent-refs-route", None)).await; - // Wait for the status to be updated to contain no parent statuses. - await_condition::( - &client, - &ns, - &route.name_unchecked(), - |obj: Option<&k8s::policy::HttpRoute>| -> bool { - obj.and_then(|route| route.status.as_ref()) - .is_some_and(|status| status.inner.parents.is_empty()) - }, - ) - .await - .expect("HTTPRoute Status should have no parent status"); - }) - .await -} +// // Wait for the status to be updated to contain no parent statuses. +// await_condition::( +// &client, +// &ns, +// &route.name_unchecked(), +// |obj: Option<&k8s::policy::HttpRoute>| -> bool { +// obj.and_then(|route| route.status.as_ref()) +// .is_some_and(|status| status.inner.parents.is_empty()) +// }, +// ) +// .await +// .expect("HTTPRoute Status should have no parent status"); +// }) +// .await +// } -#[tokio::test(flavor = "current_thread")] -// Tests that inbound routes (routes attached to a `Server`) are properly -// reconciled when the parentReference changes. Additionally, tests that routes -// whose parentRefs do not exist are patched with an appropriate status. -async fn inbound_accepted_reconcile_no_parent() { - with_temp_ns(|client, ns| async move { - // Given a route with a nonexistent parentReference, we expect to have an - // 'Accepted' condition with 'False' as a status. - let server_name = "test-reconcile-inbound-server"; - let srv_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("policy.linkerd.io".to_string()), - kind: Some("Server".to_string()), - namespace: Some(ns.clone()), - name: server_name.to_string(), - section_name: None, - port: None, - }]; - let _route = create( - &client, - mk_route(&ns, "test-reconcile-inbound-route", Some(srv_ref)), - ) - .await; - let route_status = await_route_status(&client, &ns, "test-reconcile-inbound-route").await; - let cond = find_route_condition(&route_status.parents, server_name) - .expect("must have at least one 'Accepted' condition set for parent"); - // Test when parent ref does not exist we get Accepted { False }. - assert_eq!(cond.status, "False"); - assert_eq!(cond.reason, "NoMatchingParent"); +// #[tokio::test(flavor = "current_thread")] +// // Tests that inbound routes (routes attached to a `Server`) are properly +// // reconciled when the parentReference changes. Additionally, tests that routes +// // whose parentRefs do not exist are patched with an appropriate status. +// async fn inbound_accepted_reconcile_no_parent() { +// with_temp_ns(|client, ns| async move { +// // Given a route with a nonexistent parentReference, we expect to have an +// // 'Accepted' condition with 'False' as a status. +// let server_name = "test-reconcile-inbound-server"; +// let srv_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("policy.linkerd.io".to_string()), +// kind: Some("Server".to_string()), +// namespace: Some(ns.clone()), +// name: server_name.to_string(), +// section_name: None, +// port: None, +// }]; +// let _route = create( +// &client, +// mk_route(&ns, "test-reconcile-inbound-route", Some(srv_ref)), +// ) +// .await; +// let route_status = await_route_status(&client, &ns, "test-reconcile-inbound-route").await; +// let cond = find_route_condition(&route_status.parents, server_name) +// .expect("must have at least one 'Accepted' condition set for parent"); +// // Test when parent ref does not exist we get Accepted { False }. +// assert_eq!(cond.status, "False"); +// assert_eq!(cond.reason, "NoMatchingParent"); - // Create the 'Server' that route references and expect it to be picked up - // by the index. Consequently, route will have its status reconciled. - let server = k8s::policy::Server { - metadata: k8s::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(server_name.to_string()), - ..Default::default() - }, - spec: k8s::policy::ServerSpec { - selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( - Some(("app", server_name)), - )), - port: k8s::policy::server::Port::Name("http".to_string()), - proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), - access_policy: None, - }, - }; - create(&client, server).await; +// // Create the 'Server' that route references and expect it to be picked up +// // by the index. Consequently, route will have its status reconciled. +// let server = k8s::policy::Server { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(server_name.to_string()), +// ..Default::default() +// }, +// spec: k8s::policy::ServerSpec { +// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( +// Some(("app", server_name)), +// )), +// port: k8s::policy::server::Port::Name("http".to_string()), +// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), +// access_policy: None, +// }, +// }; +// create(&client, server).await; - // HTTPRoute may not be patched instantly, await the route condition - // status becoming accepted. - let _route_status = await_condition( - &client, - &ns, - "test-reconcile-inbound-route", - |obj: Option<&k8s::policy::httproute::HttpRoute>| -> bool { - tracing::trace!(?obj, "got route status"); - let status = match obj.and_then(|route| route.status.as_ref()) { - Some(status) => status, - None => return false, - }; - let cond = match find_route_condition(&status.inner.parents, server_name) { - Some(cond) => cond, - None => return false, - }; - cond.status == "True" && cond.reason == "Accepted" - }, - ) - .await - .expect("must fetch route") - .status - .expect("route must contain a status representation"); - }) - .await; -} +// // HTTPRoute may not be patched instantly, await the route condition +// // status becoming accepted. +// let _route_status = await_condition( +// &client, +// &ns, +// "test-reconcile-inbound-route", +// |obj: Option<&k8s::policy::httproute::HttpRoute>| -> bool { +// tracing::trace!(?obj, "got route status"); +// let status = match obj.and_then(|route| route.status.as_ref()) { +// Some(status) => status, +// None => return false, +// }; +// let cond = match find_route_condition(&status.inner.parents, server_name) { +// Some(cond) => cond, +// None => return false, +// }; +// cond.status == "True" && cond.reason == "Accepted" +// }, +// ) +// .await +// .expect("must fetch route") +// .status +// .expect("route must contain a status representation"); +// }) +// .await; +// } -#[tokio::test(flavor = "current_thread")] -async fn inbound_accepted_reconcile_parent_delete() { - with_temp_ns(|client, ns| async move { - // Attach a route to a Server and expect the route to be patched with an - // Accepted status. - let server_name = "test-reconcile-delete-server"; - let server = k8s::policy::Server { - metadata: k8s::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(server_name.to_string()), - ..Default::default() - }, - spec: k8s::policy::ServerSpec { - selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( - Some(("app", server_name)), - )), - port: k8s::policy::server::Port::Name("http".to_string()), - proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), - access_policy: None, - }, - }; - create(&client, server).await; +// #[tokio::test(flavor = "current_thread")] +// async fn inbound_accepted_reconcile_parent_delete() { +// with_temp_ns(|client, ns| async move { +// // Attach a route to a Server and expect the route to be patched with an +// // Accepted status. +// let server_name = "test-reconcile-delete-server"; +// let server = k8s::policy::Server { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(server_name.to_string()), +// ..Default::default() +// }, +// spec: k8s::policy::ServerSpec { +// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( +// Some(("app", server_name)), +// )), +// port: k8s::policy::server::Port::Name("http".to_string()), +// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), +// access_policy: None, +// }, +// }; +// create(&client, server).await; - // Create parentReference and route - let srv_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("policy.linkerd.io".to_string()), - kind: Some("Server".to_string()), - namespace: Some(ns.clone()), - name: server_name.to_string(), - section_name: None, - port: None, - }]; - let _route = create( - &client, - mk_route(&ns, "test-reconcile-delete-route", Some(srv_ref)), - ) - .await; - let route_status = await_route_status(&client, &ns, "test-reconcile-delete-route").await; - let cond = find_route_condition(&route_status.parents, server_name) - .expect("must have at least one 'Accepted' condition"); - assert_eq!(cond.status, "True"); - assert_eq!(cond.reason, "Accepted"); +// // Create parentReference and route +// let srv_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("policy.linkerd.io".to_string()), +// kind: Some("Server".to_string()), +// namespace: Some(ns.clone()), +// name: server_name.to_string(), +// section_name: None, +// port: None, +// }]; +// let _route = create( +// &client, +// mk_route(&ns, "test-reconcile-delete-route", Some(srv_ref)), +// ) +// .await; +// let route_status = await_route_status(&client, &ns, "test-reconcile-delete-route").await; +// let cond = find_route_condition(&route_status.parents, server_name) +// .expect("must have at least one 'Accepted' condition"); +// assert_eq!(cond.status, "True"); +// assert_eq!(cond.reason, "Accepted"); - // Delete Server - let api: kube::Api = kube::Api::namespaced(client.clone(), &ns); - api.delete( - "test-reconcile-delete-server", - &kube::api::DeleteParams::default(), - ) - .await - .expect("API delete request failed"); +// // Delete Server +// let api: kube::Api = kube::Api::namespaced(client.clone(), &ns); +// api.delete( +// "test-reconcile-delete-server", +// &kube::api::DeleteParams::default(), +// ) +// .await +// .expect("API delete request failed"); - // HTTPRoute may not be patched instantly, await the route condition - // becoming NoMatchingParent. - let _route_status = await_condition( - &client, - &ns, - "test-reconcile-delete-route", - |obj: Option<&k8s::policy::httproute::HttpRoute>| -> bool { - tracing::trace!(?obj, "got route status"); - let status = match obj.and_then(|route| route.status.as_ref()) { - Some(status) => status, - None => return false, - }; - let cond = match find_route_condition(&status.inner.parents, server_name) { - Some(cond) => cond, - None => return false, - }; - cond.status == "False" && cond.reason == "NoMatchingParent" - }, - ) - .await - .expect("must fetch route") - .status - .expect("route must contain a status representation"); - }) - .await; -} +// // HTTPRoute may not be patched instantly, await the route condition +// // becoming NoMatchingParent. +// let _route_status = await_condition( +// &client, +// &ns, +// "test-reconcile-delete-route", +// |obj: Option<&k8s::policy::httproute::HttpRoute>| -> bool { +// tracing::trace!(?obj, "got route status"); +// let status = match obj.and_then(|route| route.status.as_ref()) { +// Some(status) => status, +// None => return false, +// }; +// let cond = match find_route_condition(&status.inner.parents, server_name) { +// Some(cond) => cond, +// None => return false, +// }; +// cond.status == "False" && cond.reason == "NoMatchingParent" +// }, +// ) +// .await +// .expect("must fetch route") +// .status +// .expect("route must contain a status representation"); +// }) +// .await; +// } diff --git a/policy-test/tests/outbound_api.rs b/policy-test/tests/outbound_api.rs index 24551b838c735..8a1caa880795f 100644 --- a/policy-test/tests/outbound_api.rs +++ b/policy-test/tests/outbound_api.rs @@ -16,12 +16,13 @@ async fn parent_does_not_exist() { async fn test() { with_temp_ns(|client, ns| async move { let port = 4191; - // Build a parent but don't apply it to the cluster. - let parent = P::make_parent(&ns); + // Some IP address in the cluster networks which we assume is not + // used. + let ip = "10.8.255.255"; let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; let rsp: Result, tonic::Status> = - policy_api.watch_ip(&ns, parent.ip(), port).await; + policy_api.watch_ip(&ns, ip, port).await; assert!(rsp.is_err()); assert_eq!(rsp.err().unwrap().code(), tonic::Code::NotFound); @@ -39,8 +40,8 @@ async fn parent_with_no_routes() { with_temp_ns(|client, ns| async move { let port = 4191; // Create a parent with no routes. - let parent = P::make_parent(&ns); - create(&client, parent.clone()).await; + // let parent = P::create_parent(&client.clone(), &ns).await; + let parent = create(&client, P::make_parent(&ns)).await; let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; let config = rx @@ -53,9 +54,9 @@ async fn parent_with_no_routes() { assert_resource_meta(&config.metadata, parent.obj_ref(), port); // There should be a default route. - R::routes(&config, |routes| { + gateway::HttpRoute::routes(&config, |routes| { let route = assert_singleton(routes); - assert_route_is_default::(route, &parent.obj_ref(), port); + assert_route_is_default::(route, &parent.obj_ref(), port); }); }) .await; @@ -70,8 +71,7 @@ async fn http_route_with_no_rules() { async fn test() { with_temp_ns(|client, ns| async move { let port = 4191; - let parent = P::make_parent(&ns); - create(&client, parent.clone()).await; + let parent = create(&client, P::make_parent(&ns)).await; let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; let config = rx @@ -84,13 +84,12 @@ async fn http_route_with_no_rules() { assert_resource_meta(&config.metadata, parent.obj_ref(), port); // There should be a default route. - R::routes(&config, |routes| { + gateway::HttpRoute::routes(&config, |routes| { let route = assert_singleton(routes); - assert_route_is_default::(route, &parent.obj_ref(), port); + assert_route_is_default::(route, &parent.obj_ref(), port); }); - let route = R::make_route(ns.clone(), vec![parent.obj_ref()], vec![]); - let _ = create(&client, route.clone()).await; + let route = R::create_route(&client, ns.clone(), vec![parent.obj_ref()], vec![]).await; let status = await_route_status(&client, &route).await; assert_status_accepted(status); @@ -126,8 +125,7 @@ async fn http_routes_without_backends() { with_temp_ns(|client, ns| async move { // Create a parent let port = 4191; - let parent = P::make_parent(&ns); - create(&client, parent.clone()).await; + let parent = create(&client, P::make_parent(&ns)).await; let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; let config = rx @@ -140,14 +138,14 @@ async fn http_routes_without_backends() { assert_resource_meta(&config.metadata, parent.obj_ref(), port); // There should be a default route. - R::routes(&config, |routes| { + gateway::HttpRoute::routes(&config, |routes| { let route = assert_singleton(routes); - assert_route_is_default::(route, &parent.obj_ref(), port); + assert_route_is_default::(route, &parent.obj_ref(), port); }); // Create a route with one rule with no backends. - let route = R::make_route(ns.clone(), vec![parent.obj_ref()], vec![vec![]]); - let _ = create(&client, route.clone()).await; + let route = + R::create_route(&client, ns.clone(), vec![parent.obj_ref()], vec![vec![]]).await; let status = await_route_status(&client, &route).await; assert_status_accepted(status); @@ -162,7 +160,7 @@ async fn http_routes_without_backends() { // There should be a route with the logical backend. R::routes(&config, |routes| { - let outbound_route = assert_singleton(routes); + let outbound_route = routes.first().expect("route must exist"); let rules = &R::rules_first_available(outbound_route); assert!(route.meta_eq(R::extract_meta(outbound_route))); let backends = assert_singleton(rules); @@ -185,8 +183,7 @@ async fn routes_with_backend() { with_temp_ns(|client, ns| async move { // Create a parent let port = 4191; - let parent = P::make_parent(&ns); - create(&client, parent.clone()).await; + let parent = create(&client, P::make_parent(&ns)).await; // Create a backend let backend_port = 8888; @@ -204,13 +201,14 @@ async fn routes_with_backend() { assert_resource_meta(&config.metadata, parent.obj_ref(), port); // There should be a default route. - R::routes(&config, |routes| { + gateway::HttpRoute::routes(&config, |routes| { let route = assert_singleton(routes); - assert_route_is_default::(route, &parent.obj_ref(), port); + assert_route_is_default::(route, &parent.obj_ref(), port); }); let dt = Default::default(); - let route = R::make_route( + let route = R::create_route( + &client, ns, vec![parent.obj_ref()], vec![vec![gateway::BackendRef { @@ -239,7 +237,7 @@ async fn routes_with_backend() { // There should be a route with a backend with no filters. R::routes(&config, |routes| { - let outbound_route = assert_singleton(routes); + let outbound_route = routes.first().expect("route must exist"); let rules = &R::rules_random_available(outbound_route); assert!(route.meta_eq(R::extract_meta(outbound_route))); let backends = assert_singleton(rules); diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs index 4a07158a99b80..1c56d2bd25562 100644 --- a/policy-test/tests/outbound_api_gateway.rs +++ b/policy-test/tests/outbound_api_gateway.rs @@ -1,1985 +1,1984 @@ -use futures::prelude::*; -use kube::ResourceExt; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - assert_default_accrual_backoff, assert_resource_meta, assert_status_accepted, - await_egress_net_status, await_gateway_route_status, create, create_annotated_egress_network, - create_annotated_service, create_cluster_scoped, create_egress_network, - create_opaque_egress_network, create_opaque_service, create_service, delete_cluster_scoped, - grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -}; -use maplit::{btreemap, convert_args}; -use std::{collections::BTreeMap, time::Duration}; - -// These tests are copies of the tests in outbound_api_gateway.rs but using the -// policy.linkerd.io HttpRoute kubernetes types instead of the Gateway API ones. -// These two files should be kept in sync to ensure that Linkerd can read and -// function correctly with both types of resources. - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_route_without_rules() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_http_route_without_rules(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_route_without_rules() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_route_without_rules(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_without_backends() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_http_routes_without_backends(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_without_backends() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_routes_without_backends(Resource::EgressNetwork(egress), &client, &ns) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - parent_with_http_routes_with_backend( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_routes_with_backend( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_cross_namespace_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_ns_name = format!("{}-backend", ns); - let backend_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(backend_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - let backend_name = "backend"; - let backend_svc = - Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); - let backends = [backend_svc.clone()]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( - Some(&backends), - Some(backend_ns_name), - None, - ); - let _route = create(&client, route.build()).await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); - - delete_cluster_scoped(&client, backend_ns).await - }) - .await; -} - -// TODO: Test fails until handling of invalid backends is implemented. -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "invalid", 4191); - - parent_with_http_routes_with_invalid_backend( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -// TODO: Test fails until handling of invalid backends is implemented. -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - let backend = mk_egress_net(&ns, "invalid"); - - parent_with_http_routes_with_invalid_backend( - Resource::EgressNetwork(egress), - Resource::EgressNetwork(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -// TODO: Investigate why the policy controller is only returning one route in this -// case instead of two. -#[tokio::test(flavor = "current_thread")] -async fn service_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -// TODO: Investigate why the policy controller is only returning one route in this -// case instead of two. -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual() { - with_temp_ns(|client, ns| async move { - let svc = create_annotated_service( - &client, - &ns, - "consecutive-accrual-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), - "10s".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), - "10m".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual() { - with_temp_ns(|client, ns| async move { - let egress = create_annotated_egress_network( - &client, - &ns, - "consecutive-accrual-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), - "10s".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), - "10m".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_no_config() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual, but - // with no additional configuration - let svc_no_config = create_annotated_service( - &client, - &ns, - "default-accrual-svc", - 80, - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - )]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_no_config( - Resource::Service(svc_no_config), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_max_fails() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual with - // max number of failures and with default backoff - let svc_max_fails = create_annotated_service( - &client, - &ns, - "no-backoff-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_max_fails( - Resource::Service(svc_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_jitter() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual with - // only the jitter ratio configured in the backoff - let svc_jitter = create_annotated_service( - &client, - &ns, - "only-jitter-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_max_jitter( - Resource::Service(svc_jitter), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { - with_temp_ns(|client, ns| async move { - // Create a egress network configured to do consecutive failure accrual, but - // with no additional configuration - let egress_no_config = create_annotated_egress_network( - &client, - &ns, - "default-accrual-egress", - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - )]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "default-accrual-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_no_config( - Resource::EgressNetwork(egress_no_config), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { - with_temp_ns(|client, ns| async move { - // Create a egress network configured to do consecutive failure accrual with - // max number of failures and with default backoff - let egress_max_fails = create_annotated_egress_network( - &client, - &ns, - "no-backoff-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "no-backoff-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_max_fails( - Resource::EgressNetwork(egress_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { - with_temp_ns(|client, ns| async move { - // Create an egress net configured to do consecutive failure accrual with - // only the jitter ratio configured in the backoff - let egress_jitter = create_annotated_egress_network( - &client, - &ns, - "only-jitter-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "only-jitter-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_max_jitter( - Resource::EgressNetwork(egress_jitter), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_default_failure_accrual() { - with_temp_ns(|client, ns| async move { - // Default config for Service, no failure accrual - let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; - - // Create Service with consecutive failure accrual config for - // max_failures but no mode - let svc_max_fails = create_annotated_service( - &client, - &ns, - "default-max-failure-svc", - 80, - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - )]), - ) - .await; - - parent_with_default_failure_accrual( - Resource::Service(svc_default), - Resource::Service(svc_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_default_failure_accrual() { - with_temp_ns(|client, ns| async move { - // Default config for EgressNetwork, no failure accrual - let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; - let status = await_egress_net_status(&client, &ns, "default-failure-accrual").await; - assert_status_accepted(status.conditions); - - // Create EgressNetwork with consecutive failure accrual config for - // max_failures but no mode - let egress_max_fails = create_annotated_egress_network( - &client, - &ns, - "default-max-failure-egress", - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - )]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "default-max-failure-egress").await; - assert_status_accepted(status.conditions); - - parent_with_default_failure_accrual( - Resource::EgressNetwork(egress_default), - Resource::EgressNetwork(egress_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn opaque_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; - opaque_parent(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn opaque_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; - opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn route_with_filters_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "backend", 4191); - - route_with_filters( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn route_with_filters_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - route_with_filters( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn backend_with_filters_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - backend_with_filters( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn backend_with_filters_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - backend_with_filters( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_with_no_port() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config_4191); - - let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config_9999); - - // There should be a default route. - detect_http_routes(&config_4191, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&config_9999, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 9999); - }); - - let _route = create(&client, mk_http_route(&ns, "foo-route", &svc, None).build()).await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_4191); - - // The route should apply to the service. - detect_http_routes(&config_4191, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_9999); - - // The route should apply to other ports too. - detect_http_routes(&config_9999, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn producer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // There should be a default route. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?producer_config); - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should be returned in queries from a consumer namespace. - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn pre_existing_producer_route() { - // We test the scenario where outbound policy watches are initiated after - // a produce route already exists. - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should be returned in queries from a consumer namespace. - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn consumer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let consumer_ns_name = format!("{}-consumer", ns); - let consumer_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(consumer_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = - retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; - let other_config = other_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?other_config); - - // There should be a default route. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&other_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // A route created in a different namespace as its parent service is - // called a consumer route. It should be returned in outbound policy - // requests for that service ONLY when the request comes from the - // consumer namespace. - let _route = create( - &client, - mk_http_route(&consumer_ns_name, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_gateway_route_status(&client, &consumer_ns_name, "foo-route").await; - - // The route should NOT be returned in queries from the producer namespace. - // There should be a default route. - assert!(producer_rx.next().now_or_never().is_none()); - - // The route should be returned in queries from the same consumer - // namespace. - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should NOT be returned in queries from a different consumer - // namespace. - assert!(other_rx.next().now_or_never().is_none()); - - delete_cluster_scoped(&client, consumer_ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a service - let mut svc = mk_service(&ns, "my-svc", 4191); - svc.annotations_mut() - .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); - svc.annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let svc = Resource::Service(create(&client, svc).await); - - retries_and_timeouts(svc, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let mut egress = mk_egress_net(&ns, "my-egress"); - egress - .annotations_mut() - .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); - egress - .annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let egress = Resource::EgressNetwork(create(&client, egress).await); - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - retries_and_timeouts(egress, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_http_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - http_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_http_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct HttpRouteBuilder(k8s_gateway_api::HttpRoute); - -fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { - use k8s_gateway_api as api; - - HttpRouteBuilder(api::HttpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::HttpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - hostnames: None, - rules: Some(vec![api::HttpRouteRule { - matches: Some(vec![api::HttpRouteMatch { - path: Some(api::HttpPathMatch::Exact { - value: "/foo".to_string(), - }), - headers: None, - query_params: None, - method: Some("GET".to_string()), - }]), - filters: None, - backend_refs: None, - }]), - }, - status: None, - }) -} - -impl HttpRouteBuilder { - fn with_backends( - self, - backends: Option<&[Resource]>, - backends_ns: Option, - backend_filters: Option>, - ) -> Self { - let mut route = self.0; - let backend_refs = backends.map(|backends| { - backends - .iter() - .map(|backend| k8s_gateway_api::HttpBackendRef { - backend_ref: Some(k8s_gateway_api::BackendRef { - weight: None, - inner: k8s_gateway_api::BackendObjectReference { - name: backend.name(), - port: Some(8888), - group: Some(backend.group()), - kind: Some(backend.kind()), - namespace: backends_ns.clone(), - }, - }), - filters: backend_filters.clone(), - }) - .collect() - }); - route.spec.rules.iter_mut().flatten().for_each(|rule| { - rule.backend_refs = backend_refs.clone(); - }); - Self(route) - } - - fn with_filters(self, filters: Option>) -> Self { - let mut route = self.0; - route - .spec - .rules - .iter_mut() - .flatten() - .for_each(|rule| rule.filters = filters.clone()); - Self(route) - } - - fn with_annotations(self, annotations: BTreeMap) -> Self { - let mut route = self.0; - route.metadata.annotations = Some(annotations); - Self(route) - } - - fn build(self) -> k8s_gateway_api::HttpRoute { - self.0 - } -} - -fn mk_empty_http_route( - ns: &str, - name: &str, - parent: &Resource, - port: u16, -) -> k8s_gateway_api::HttpRoute { - use k8s_gateway_api as api; - api::HttpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::HttpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port: Some(port), - }]), - }, - hostnames: None, - rules: Some(vec![]), - }, - status: None, - } -} - -async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); -} - -async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with no rules. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - assert_eq!(route.rules.len(), 0); - }); -} - -async fn parent_with_http_routes_without_backends( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), - ) - .await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with the logical backend. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend, &parent, 4191); - }); -} - -async fn parent_with_http_routes_with_backend( - parent: Resource, - rule_backend: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [rule_backend.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(client, route.build()).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); -} - -async fn parent_with_http_routes_with_invalid_backend( - parent: Resource, - backend: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(client, route.build()).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with a backend. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_has_failure_filter(backend); - }); -} - -async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - client, - mk_http_route(ns, "a-route", &parent, Some(4191)).build(), - ) - .await; - await_gateway_route_status(client, ns, "a-route").await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let _b_route = create( - client, - mk_http_route(ns, "b-route", &parent, Some(4191)).build(), - ) - .await; - await_gateway_route_status(client, ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - detect_http_routes(&config, |routes| { - let num_routes = match parent { - Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default - Resource::Service(_) => 2, // two routes for service - }; - assert_eq!(routes.len(), num_routes); - assert_eq!(route_name(&routes[0]), "a-route"); - assert_eq!(route_name(&routes[1]), "b-route"); - }); -} - -async fn parent_with_consecutive_failure_accrual( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_no_config( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default max_failures and default backoff - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_max_fails( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default backoff and overridden max_failures - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect defaults for everything except for the jitter ratio - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); -} - -async fn parent_with_default_failure_accrual( - parent_default_config: Resource, - parent_max_failures: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ); - }); - - let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ) - }); -} - -async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Proxy protocol should be opaque. - match config.protocol.unwrap().kind.unwrap() { - grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} - _ => panic!("proxy protocol must be Opaque"), - }; -} - -async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_backends(Some(&backends), None, None) - .with_filters(Some(vec![ - k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s_gateway_api::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ])); - let _route = create(client, route.build()).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - // There should be a route with filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let filters = &rule.filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { - replace: Some( - linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( - "/path".to_string() - ) - ) - }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); -} - -async fn backend_with_filters( - parent: Resource, - backend_for_parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend_for_parent.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - Some(vec![ - k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s_gateway_api::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ]), - ); - let _route = create(client, route.build()).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - // There should be a route without rule filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - assert_eq!(rule.filters.len(), 0); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { - replace: Some( - linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( - "/path".to_string() - ) - ) - }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); -} - -async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - ("retry.linkerd.io/http".to_string(), "5xx".to_string()), - ("timeout.linkerd.io/response".to_string(), "10s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - - await_gateway_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeout = rule - .timeouts - .as_ref() - .expect("timeouts expected") - .response - .as_ref() - .expect("response timeout expected"); - assert_eq!(timeout.seconds, 10); - }); -} - -async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - // Route annotations override the timeout config specified - // on the service. - ("timeout.linkerd.io/request".to_string(), "5s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_gateway_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - // Retry config inherited from the service. - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); - // Service timeout config overridden by route timeout config. - assert_eq!(timeouts.response, None); - let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); - assert_eq!(request_timeout.seconds, 5); - }); -} - -async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; - await_gateway_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached. - detect_http_routes(&config, |routes| { - let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); - assert_route_name_eq(route, "foo-route"); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached again. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - assert_route_name_eq(route, "foo-route"); - }); -} +// use futures::prelude::*; +// use kube::ResourceExt; +// use linkerd_policy_controller_k8s_api as k8s; +// use linkerd_policy_test::{ +// assert_default_accrual_backoff, assert_resource_meta, assert_status_accepted, +// await_egress_net_status, await_gateway_route_status, create, create_annotated_egress_network, +// create_annotated_service, create_cluster_scoped, create_egress_network, +// create_opaque_egress_network, create_opaque_service, create_service, delete_cluster_scoped, +// grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, +// }; +// use maplit::{btreemap, convert_args}; +// use std::{collections::BTreeMap, time::Duration}; + +// // These tests are copies of the tests in outbound_api_gateway.rs but using the +// // policy.linkerd.io HttpRoute kubernetes types instead of the Gateway API ones. +// // These two files should be kept in sync to ensure that Linkerd can read and +// // function correctly with both types of resources. + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_http_route_without_rules() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// parent_with_http_route_without_rules(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_http_route_without_rules() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_http_route_without_rules(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_http_routes_without_backends() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// parent_with_http_routes_without_backends(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_http_routes_without_backends() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_http_routes_without_backends(Resource::EgressNetwork(egress), &client, &ns) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_http_routes_with_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend_svc = create_service(&client, &ns, "backend", 8888).await; +// parent_with_http_routes_with_backend( +// Resource::Service(svc), +// Resource::Service(backend_svc), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_http_routes_with_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_http_routes_with_backend( +// Resource::EgressNetwork(egress.clone()), +// Resource::EgressNetwork(egress), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_http_routes_with_cross_namespace_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &svc, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); + +// let backend_ns_name = format!("{}-backend", ns); +// let backend_ns = create_cluster_scoped( +// &client, +// k8s::Namespace { +// metadata: k8s::ObjectMeta { +// name: Some(backend_ns_name.clone()), +// labels: Some(convert_args!(btreemap!( +// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), +// ))), +// ..Default::default() +// }, +// ..Default::default() +// }, +// ) +// .await; +// let backend_name = "backend"; +// let backend_svc = +// Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); +// let backends = [backend_svc.clone()]; +// let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( +// Some(&backends), +// Some(backend_ns_name), +// None, +// ); +// let _route = create(&client, route.build()).await; +// await_gateway_route_status(&client, &ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &svc, 4191); + +// // There should be a route with a backend with no filters. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// let backends = route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); +// let filters = &backend.backend.as_ref().unwrap().filters; +// assert_eq!(filters.len(), 0); +// }); + +// delete_cluster_scoped(&client, backend_ns).await +// }) +// .await; +// } + +// // TODO: Test fails until handling of invalid backends is implemented. +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_http_routes_with_invalid_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend = mk_service(&ns, "invalid", 4191); + +// parent_with_http_routes_with_invalid_backend( +// Resource::Service(svc), +// Resource::Service(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// // TODO: Test fails until handling of invalid backends is implemented. +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_http_routes_with_invalid_backend() { +// with_temp_ns(|client, ns| async move { +// // Create an egress network +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// let backend = mk_egress_net(&ns, "invalid"); + +// parent_with_http_routes_with_invalid_backend( +// Resource::EgressNetwork(egress), +// Resource::EgressNetwork(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// // TODO: Investigate why the policy controller is only returning one route in this +// // case instead of two. +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_multiple_http_routes() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// // TODO: Investigate why the policy controller is only returning one route in this +// // case instead of two. +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_multiple_http_routes() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_consecutive_failure_accrual() { +// with_temp_ns(|client, ns| async move { +// let svc = create_annotated_service( +// &client, +// &ns, +// "consecutive-accrual-svc", +// 80, +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), +// "10s".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), +// "10m".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), +// "1.0".to_string(), +// ), +// ]), +// ) +// .await; +// parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_consecutive_failure_accrual() { +// with_temp_ns(|client, ns| async move { +// let egress = create_annotated_egress_network( +// &client, +// &ns, +// "consecutive-accrual-egress", +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), +// "10s".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), +// "10m".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), +// "1.0".to_string(), +// ), +// ]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_consecutive_failure_accrual_defaults_no_config() { +// with_temp_ns(|client, ns| async move { +// // Create a service configured to do consecutive failure accrual, but +// // with no additional configuration +// let svc_no_config = create_annotated_service( +// &client, +// &ns, +// "default-accrual-svc", +// 80, +// BTreeMap::from([( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// )]), +// ) +// .await; + +// parent_with_consecutive_failure_accrual_defaults_no_config( +// Resource::Service(svc_no_config), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_consecutive_failure_accrual_defaults_max_fails() { +// with_temp_ns(|client, ns| async move { +// // Create a service configured to do consecutive failure accrual with +// // max number of failures and with default backoff +// let svc_max_fails = create_annotated_service( +// &client, +// &ns, +// "no-backoff-svc", +// 80, +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// ), +// ]), +// ) +// .await; + +// parent_with_consecutive_failure_accrual_defaults_max_fails( +// Resource::Service(svc_max_fails), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_consecutive_failure_accrual_defaults_jitter() { +// with_temp_ns(|client, ns| async move { +// // Create a service configured to do consecutive failure accrual with +// // only the jitter ratio configured in the backoff +// let svc_jitter = create_annotated_service( +// &client, +// &ns, +// "only-jitter-svc", +// 80, +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), +// "1.0".to_string(), +// ), +// ]), +// ) +// .await; + +// parent_with_consecutive_failure_accrual_defaults_max_jitter( +// Resource::Service(svc_jitter), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { +// with_temp_ns(|client, ns| async move { +// // Create a egress network configured to do consecutive failure accrual, but +// // with no additional configuration +// let egress_no_config = create_annotated_egress_network( +// &client, +// &ns, +// "default-accrual-egress", +// BTreeMap::from([( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// )]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "default-accrual-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_consecutive_failure_accrual_defaults_no_config( +// Resource::EgressNetwork(egress_no_config), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { +// with_temp_ns(|client, ns| async move { +// // Create a egress network configured to do consecutive failure accrual with +// // max number of failures and with default backoff +// let egress_max_fails = create_annotated_egress_network( +// &client, +// &ns, +// "no-backoff-egress", +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// ), +// ]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "no-backoff-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_consecutive_failure_accrual_defaults_max_fails( +// Resource::EgressNetwork(egress_max_fails), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net configured to do consecutive failure accrual with +// // only the jitter ratio configured in the backoff +// let egress_jitter = create_annotated_egress_network( +// &client, +// &ns, +// "only-jitter-egress", +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), +// "1.0".to_string(), +// ), +// ]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "only-jitter-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_consecutive_failure_accrual_defaults_max_jitter( +// Resource::EgressNetwork(egress_jitter), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_default_failure_accrual() { +// with_temp_ns(|client, ns| async move { +// // Default config for Service, no failure accrual +// let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; + +// // Create Service with consecutive failure accrual config for +// // max_failures but no mode +// let svc_max_fails = create_annotated_service( +// &client, +// &ns, +// "default-max-failure-svc", +// 80, +// BTreeMap::from([( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// )]), +// ) +// .await; + +// parent_with_default_failure_accrual( +// Resource::Service(svc_default), +// Resource::Service(svc_max_fails), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_default_failure_accrual() { +// with_temp_ns(|client, ns| async move { +// // Default config for EgressNetwork, no failure accrual +// let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; +// let status = await_egress_net_status(&client, &ns, "default-failure-accrual").await; +// assert_status_accepted(status.conditions); + +// // Create EgressNetwork with consecutive failure accrual config for +// // max_failures but no mode +// let egress_max_fails = create_annotated_egress_network( +// &client, +// &ns, +// "default-max-failure-egress", +// BTreeMap::from([( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// )]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "default-max-failure-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_default_failure_accrual( +// Resource::EgressNetwork(egress_default), +// Resource::EgressNetwork(egress_max_fails), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn opaque_service() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; +// opaque_parent(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn opaque_egress_net() { +// with_temp_ns(|client, ns| async move { +// // Create an egress network +// let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; +// opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn route_with_filters_service() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend = mk_service(&ns, "backend", 4191); + +// route_with_filters( +// Resource::Service(svc), +// Resource::Service(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn route_with_filters_egress_net() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// route_with_filters( +// Resource::EgressNetwork(egress.clone()), +// Resource::EgressNetwork(egress), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn backend_with_filters_service() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend_svc = create_service(&client, &ns, "backend", 8888).await; +// backend_with_filters( +// Resource::Service(svc), +// Resource::Service(backend_svc), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn backend_with_filters_egress_net() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// backend_with_filters( +// Resource::EgressNetwork(egress.clone()), +// Resource::EgressNetwork(egress), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn http_route_with_no_port() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let config_4191 = rx_4191 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config_4191); + +// let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; +// let config_9999 = rx_9999 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config_9999); + +// // There should be a default route. +// detect_http_routes(&config_4191, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); +// detect_http_routes(&config_9999, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 9999); +// }); + +// let _route = create(&client, mk_http_route(&ns, "foo-route", &svc, None).build()).await; +// await_gateway_route_status(&client, &ns, "foo-route").await; + +// let config_4191 = rx_4191 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config_4191); + +// // The route should apply to the service. +// detect_http_routes(&config_4191, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); + +// let config_9999 = rx_9999 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config_9999); + +// // The route should apply to other ports too. +// detect_http_routes(&config_9999, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn producer_route() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// // There should be a default route. +// detect_http_routes(&producer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); + +// // A route created in the same namespace as its parent service is called +// // a producer route. It should be returned in outbound policy requests +// // for that service from ALL namespaces. +// let _route = create( +// &client, +// mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), +// ) +// .await; +// await_gateway_route_status(&client, &ns, "foo-route").await; + +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?producer_config); +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// // The route should be returned in queries from the producer namespace. +// detect_http_routes(&producer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); + +// // The route should be returned in queries from a consumer namespace. +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn pre_existing_producer_route() { +// // We test the scenario where outbound policy watches are initiated after +// // a produce route already exists. +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// // A route created in the same namespace as its parent service is called +// // a producer route. It should be returned in outbound policy requests +// // for that service from ALL namespaces. +// let _route = create( +// &client, +// mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), +// ) +// .await; +// await_gateway_route_status(&client, &ns, "foo-route").await; + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// // The route should be returned in queries from the producer namespace. +// detect_http_routes(&producer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); + +// // The route should be returned in queries from a consumer namespace. +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn consumer_route() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let consumer_ns_name = format!("{}-consumer", ns); +// let consumer_ns = create_cluster_scoped( +// &client, +// k8s::Namespace { +// metadata: k8s::ObjectMeta { +// name: Some(consumer_ns_name.clone()), +// labels: Some(convert_args!(btreemap!( +// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), +// ))), +// ..Default::default() +// }, +// ..Default::default() +// }, +// ) +// .await; + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = +// retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; +// let other_config = other_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?other_config); + +// // There should be a default route. +// detect_http_routes(&producer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); +// detect_http_routes(&other_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); + +// // A route created in a different namespace as its parent service is +// // called a consumer route. It should be returned in outbound policy +// // requests for that service ONLY when the request comes from the +// // consumer namespace. +// let _route = create( +// &client, +// mk_http_route(&consumer_ns_name, "foo-route", &svc, Some(4191)).build(), +// ) +// .await; +// await_gateway_route_status(&client, &consumer_ns_name, "foo-route").await; + +// // The route should NOT be returned in queries from the producer namespace. +// // There should be a default route. +// assert!(producer_rx.next().now_or_never().is_none()); + +// // The route should be returned in queries from the same consumer +// // namespace. +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); + +// // The route should NOT be returned in queries from a different consumer +// // namespace. +// assert!(other_rx.next().now_or_never().is_none()); + +// delete_cluster_scoped(&client, consumer_ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn http_route_retries_and_timeouts_service() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn http_route_retries_and_timeouts_egress_net() { +// with_temp_ns(|client, ns| async move { +// // Create an egress network +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_retries_and_timeouts() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let mut svc = mk_service(&ns, "my-svc", 4191); +// svc.annotations_mut() +// .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); +// svc.annotations_mut() +// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); +// let svc = Resource::Service(create(&client, svc).await); + +// retries_and_timeouts(svc, &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_retries_and_timeouts() { +// with_temp_ns(|client, ns| async move { +// // Create a egress net +// let mut egress = mk_egress_net(&ns, "my-egress"); +// egress +// .annotations_mut() +// .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); +// egress +// .annotations_mut() +// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); +// let egress = Resource::EgressNetwork(create(&client, egress).await); +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// retries_and_timeouts(egress, &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_http_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// http_route_reattachment(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_http_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// /* Helpers */ +// struct HttpRouteBuilder(k8s_gateway_api::HttpRoute); + +// fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { +// use k8s_gateway_api as api; + +// HttpRouteBuilder(api::HttpRoute { +// metadata: kube::api::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(name.to_string()), +// ..Default::default() +// }, +// spec: api::HttpRouteSpec { +// inner: api::CommonRouteSpec { +// parent_refs: Some(vec![api::ParentReference { +// group: Some(parent.group()), +// kind: Some(parent.kind()), +// namespace: Some(parent.namespace()), +// name: parent.name(), +// section_name: None, +// port, +// }]), +// }, +// hostnames: None, +// rules: Some(vec![api::HttpRouteRule { +// matches: Some(vec![api::HttpRouteMatch { +// path: Some(api::HttpPathMatch::Exact { +// value: "/foo".to_string(), +// }), +// headers: None, +// query_params: None, +// method: Some("GET".to_string()), +// }]), +// filters: None, +// backend_refs: None, +// }]), +// }, +// status: None, +// }) +// } + +// impl HttpRouteBuilder { +// fn with_backends( +// self, +// backends: Option<&[Resource]>, +// backends_ns: Option, +// backend_filters: Option>, +// ) -> Self { +// let mut route = self.0; +// let backend_refs = backends.map(|backends| { +// backends +// .iter() +// .map(|backend| k8s_gateway_api::HttpBackendRef { +// backend_ref: Some(k8s_gateway_api::BackendRef { +// weight: None, +// inner: k8s_gateway_api::BackendObjectReference { +// name: backend.name(), +// port: Some(8888), +// group: Some(backend.group()), +// kind: Some(backend.kind()), +// namespace: backends_ns.clone(), +// }, +// }), +// filters: backend_filters.clone(), +// }) +// .collect() +// }); +// route.spec.rules.iter_mut().flatten().for_each(|rule| { +// rule.backend_refs = backend_refs.clone(); +// }); +// Self(route) +// } + +// fn with_filters(self, filters: Option>) -> Self { +// let mut route = self.0; +// route +// .spec +// .rules +// .iter_mut() +// .flatten() +// .for_each(|rule| rule.filters = filters.clone()); +// Self(route) +// } + +// fn with_annotations(self, annotations: BTreeMap) -> Self { +// let mut route = self.0; +// route.metadata.annotations = Some(annotations); +// Self(route) +// } + +// fn build(self) -> k8s_gateway_api::HttpRoute { +// self.0 +// } +// } + +// fn mk_empty_http_route( +// ns: &str, +// name: &str, +// parent: &Resource, +// port: u16, +// ) -> k8s_gateway_api::HttpRoute { +// use k8s_gateway_api as api; +// api::HttpRoute { +// metadata: kube::api::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(name.to_string()), +// ..Default::default() +// }, +// spec: api::HttpRouteSpec { +// inner: api::CommonRouteSpec { +// parent_refs: Some(vec![api::ParentReference { +// group: Some(parent.group()), +// kind: Some(parent.kind()), +// namespace: Some(parent.namespace()), +// name: parent.name(), +// section_name: None, +// port: Some(port), +// }]), +// }, +// hostnames: None, +// rules: Some(vec![]), +// }, +// status: None, +// } +// } + +// async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); +// } + +// async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; +// await_gateway_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a route with no rules. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// assert_eq!(route.rules.len(), 0); +// }); +// } + +// async fn parent_with_http_routes_without_backends( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let _route = create( +// client, +// mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), +// ) +// .await; +// await_gateway_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a route with the logical backend. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let backends = route_backends_first_available(route); +// let backend = assert_singleton(backends); +// assert_backend_matches_parent(backend, &parent, 4191); +// }); +// } + +// async fn parent_with_http_routes_with_backend( +// parent: Resource, +// rule_backend: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let backends = [rule_backend.clone()]; +// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( +// Some(&backends), +// None, +// None, +// ); +// let _route = create(client, route.build()).await; +// await_gateway_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a route with a backend with no filters. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let backends = route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); +// let filters = &backend.backend.as_ref().unwrap().filters; +// assert_eq!(filters.len(), 0); +// }); +// } + +// async fn parent_with_http_routes_with_invalid_backend( +// parent: Resource, +// backend: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let backends = [backend]; +// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( +// Some(&backends), +// None, +// None, +// ); +// let _route = create(client, route.build()).await; +// await_gateway_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a route with a backend. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let backends = route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_backend_has_failure_filter(backend); +// }); +// } + +// async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// // Routes should be returned in sorted order by creation timestamp then +// // name. To ensure that this test isn't timing dependant, routes should +// // be created in alphabetical order. +// let _a_route = create( +// client, +// mk_http_route(ns, "a-route", &parent, Some(4191)).build(), +// ) +// .await; +// await_gateway_route_status(client, ns, "a-route").await; + +// // First route update. +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let _b_route = create( +// client, +// mk_http_route(ns, "b-route", &parent, Some(4191)).build(), +// ) +// .await; +// await_gateway_route_status(client, ns, "b-route").await; + +// // Second route update. +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// detect_http_routes(&config, |routes| { +// let num_routes = match parent { +// Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default +// Resource::Service(_) => 2, // two routes for service +// }; +// assert_eq!(routes.len(), num_routes); +// assert_eq!(route_name(&routes[0]), "a-route"); +// assert_eq!(route_name(&routes[1]), "b-route"); +// }); +// } + +// async fn parent_with_consecutive_failure_accrual( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// detect_failure_accrual(&config, |accrual| { +// let consecutive = failure_accrual_consecutive(accrual); +// assert_eq!(8, consecutive.max_failures); +// assert_eq!( +// &grpc::outbound::ExponentialBackoff { +// min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), +// max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), +// jitter_ratio: 1.0_f32, +// }, +// consecutive +// .backoff +// .as_ref() +// .expect("backoff must be configured") +// ); +// }); +// } + +// async fn parent_with_consecutive_failure_accrual_defaults_no_config( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect default max_failures and default backoff +// detect_failure_accrual(&config, |accrual| { +// let consecutive = failure_accrual_consecutive(accrual); +// assert_eq!(7, consecutive.max_failures); +// assert_default_accrual_backoff!(consecutive +// .backoff +// .as_ref() +// .expect("backoff must be configured")); +// }); +// } + +// async fn parent_with_consecutive_failure_accrual_defaults_max_fails( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect default backoff and overridden max_failures +// detect_failure_accrual(&config, |accrual| { +// let consecutive = failure_accrual_consecutive(accrual); +// assert_eq!(8, consecutive.max_failures); +// assert_default_accrual_backoff!(consecutive +// .backoff +// .as_ref() +// .expect("backoff must be configured")); +// }); +// } + +// async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect defaults for everything except for the jitter ratio +// detect_failure_accrual(&config, |accrual| { +// let consecutive = failure_accrual_consecutive(accrual); +// assert_eq!(7, consecutive.max_failures); +// assert_eq!( +// &grpc::outbound::ExponentialBackoff { +// min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), +// max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), +// jitter_ratio: 1.0_f32, +// }, +// consecutive +// .backoff +// .as_ref() +// .expect("backoff must be configured") +// ); +// }); +// } + +// async fn parent_with_default_failure_accrual( +// parent_default_config: Resource, +// parent_max_failures: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect failure accrual config to be default (no failure accrual) +// detect_failure_accrual(&config, |accrual| { +// assert!( +// accrual.is_none(), +// "consecutive failure accrual should not be configured for service" +// ); +// }); + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect failure accrual config to be default (no failure accrual) +// detect_failure_accrual(&config, |accrual| { +// assert!( +// accrual.is_none(), +// "consecutive failure accrual should not be configured for service" +// ) +// }); +// } + +// async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Proxy protocol should be opaque. +// match config.protocol.unwrap().kind.unwrap() { +// grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} +// _ => panic!("proxy protocol must be Opaque"), +// }; +// } + +// async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let backends = [backend.clone()]; +// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) +// .with_backends(Some(&backends), None, None) +// .with_filters(Some(vec![ +// k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { +// request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { +// set: Some(vec![k8s_gateway_api::HttpHeader { +// name: "set".to_string(), +// value: "set-value".to_string(), +// }]), +// add: Some(vec![k8s_gateway_api::HttpHeader { +// name: "add".to_string(), +// value: "add-value".to_string(), +// }]), +// remove: Some(vec!["remove".to_string()]), +// }, +// }, +// k8s_gateway_api::HttpRouteFilter::RequestRedirect { +// request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { +// scheme: Some("http".to_string()), +// hostname: Some("host".to_string()), +// path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { +// replace_prefix_match: "/path".to_string(), +// }), +// port: Some(5555), +// status_code: Some(302), +// }, +// }, +// ])); +// let _route = create(client, route.build()).await; +// await_gateway_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// // There should be a route with filters. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// let filters = &rule.filters; +// assert_eq!( +// *filters, +// vec![ +// grpc::outbound::http_route::Filter { +// kind: Some( +// grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( +// grpc::http_route::RequestHeaderModifier { +// add: Some(grpc::http_types::Headers { +// headers: vec![grpc::http_types::headers::Header { +// name: "add".to_string(), +// value: "add-value".into(), +// }] +// }), +// set: Some(grpc::http_types::Headers { +// headers: vec![grpc::http_types::headers::Header { +// name: "set".to_string(), +// value: "set-value".into(), +// }] +// }), +// remove: vec!["remove".to_string()], +// } +// ) +// ) +// }, +// grpc::outbound::http_route::Filter { +// kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( +// grpc::http_route::RequestRedirect { +// scheme: Some(grpc::http_types::Scheme { +// r#type: Some(grpc::http_types::scheme::Type::Registered( +// grpc::http_types::scheme::Registered::Http.into(), +// )) +// }), +// host: "host".to_string(), +// path: Some(linkerd2_proxy_api::http_route::PathModifier { +// replace: Some( +// linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( +// "/path".to_string() +// ) +// ) +// }), +// port: 5555, +// status: 302, +// } +// )) +// } +// ] +// ); +// }); +// } + +// async fn backend_with_filters( +// parent: Resource, +// backend_for_parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let backends = [backend_for_parent.clone()]; +// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( +// Some(&backends), +// None, +// Some(vec![ +// k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { +// request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { +// set: Some(vec![k8s_gateway_api::HttpHeader { +// name: "set".to_string(), +// value: "set-value".to_string(), +// }]), +// add: Some(vec![k8s_gateway_api::HttpHeader { +// name: "add".to_string(), +// value: "add-value".to_string(), +// }]), +// remove: Some(vec!["remove".to_string()]), +// }, +// }, +// k8s_gateway_api::HttpRouteFilter::RequestRedirect { +// request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { +// scheme: Some("http".to_string()), +// hostname: Some("host".to_string()), +// path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { +// replace_prefix_match: "/path".to_string(), +// }), +// port: Some(5555), +// status_code: Some(302), +// }, +// }, +// ]), +// ); +// let _route = create(client, route.build()).await; +// await_gateway_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// // There should be a route without rule filters. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// assert_eq!(rule.filters.len(), 0); +// let backends = route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); +// let filters = &backend.backend.as_ref().unwrap().filters; +// assert_eq!( +// *filters, +// vec![ +// grpc::outbound::http_route::Filter { +// kind: Some( +// grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( +// grpc::http_route::RequestHeaderModifier { +// add: Some(grpc::http_types::Headers { +// headers: vec![grpc::http_types::headers::Header { +// name: "add".to_string(), +// value: "add-value".into(), +// }] +// }), +// set: Some(grpc::http_types::Headers { +// headers: vec![grpc::http_types::headers::Header { +// name: "set".to_string(), +// value: "set-value".into(), +// }] +// }), +// remove: vec!["remove".to_string()], +// } +// ) +// ) +// }, +// grpc::outbound::http_route::Filter { +// kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( +// grpc::http_route::RequestRedirect { +// scheme: Some(grpc::http_types::Scheme { +// r#type: Some(grpc::http_types::scheme::Type::Registered( +// grpc::http_types::scheme::Registered::Http.into(), +// )) +// }), +// host: "host".to_string(), +// path: Some(linkerd2_proxy_api::http_route::PathModifier { +// replace: Some( +// linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( +// "/path".to_string() +// ) +// ) +// }), +// port: 5555, +// status: 302, +// } +// )) +// } +// ] +// ); +// }); +// } + +// async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { +// let _route = create( +// client, +// mk_http_route(ns, "foo-route", &parent, Some(4191)) +// .with_annotations( +// vec![ +// ("retry.linkerd.io/http".to_string(), "5xx".to_string()), +// ("timeout.linkerd.io/response".to_string(), "10s".to_string()), +// ] +// .into_iter() +// .collect(), +// ) +// .build(), +// ) +// .await; + +// await_gateway_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// let conditions = rule +// .retry +// .as_ref() +// .expect("retry config expected") +// .conditions +// .as_ref() +// .expect("retry conditions expected"); +// let status_range = assert_singleton(&conditions.status_ranges); +// assert_eq!(status_range.start, 500); +// assert_eq!(status_range.end, 599); +// let timeout = rule +// .timeouts +// .as_ref() +// .expect("timeouts expected") +// .response +// .as_ref() +// .expect("response timeout expected"); +// assert_eq!(timeout.seconds, 10); +// }); +// } + +// async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { +// let _route = create( +// client, +// mk_http_route(ns, "foo-route", &parent, Some(4191)) +// .with_annotations( +// vec![ +// // Route annotations override the timeout config specified +// // on the service. +// ("timeout.linkerd.io/request".to_string(), "5s".to_string()), +// ] +// .into_iter() +// .collect(), +// ) +// .build(), +// ) +// .await; +// await_gateway_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// let conditions = rule +// .retry +// .as_ref() +// .expect("retry config expected") +// .conditions +// .as_ref() +// .expect("retry conditions expected"); +// let status_range = assert_singleton(&conditions.status_ranges); +// // Retry config inherited from the service. +// assert_eq!(status_range.start, 500); +// assert_eq!(status_range.end, 599); +// let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); +// // Service timeout config overridden by route timeout config. +// assert_eq!(timeouts.response, None); +// let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); +// assert_eq!(request_timeout.seconds, 5); +// }); +// } + +// async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { +// let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; +// await_gateway_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be attached. +// detect_http_routes(&config, |routes| { +// let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); +// assert_route_name_eq(route, "foo-route"); +// }); + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = "other".to_string(); +// update(client, route.clone()).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be unattached and the default route should be present. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = parent.name(); +// update(client, route).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be attached again. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// assert_route_name_eq(route, "foo-route"); +// }); +// } diff --git a/policy-test/tests/outbound_api_grpc.rs b/policy-test/tests/outbound_api_grpc.rs index e2493694fa53a..43db75a623c83 100644 --- a/policy-test/tests/outbound_api_grpc.rs +++ b/policy-test/tests/outbound_api_grpc.rs @@ -1,314 +1,313 @@ -use futures::prelude::*; -use kube::ResourceExt; -use linkerd_policy_test::{ - assert_resource_meta, assert_status_accepted, await_egress_net_status, await_grpc_route_status, - create, create_egress_network, create_service, mk_egress_net, mk_service, outbound_api::*, - update, with_temp_ns, Resource, -}; -use std::collections::BTreeMap; - -#[tokio::test(flavor = "current_thread")] -async fn service_grpc_route_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - grpc_route_retries_and_timeouts(svc, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_grpc_route_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = - Resource::EgressNetwork(create_egress_network(&client, &ns, "my-egress").await); - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - grpc_route_retries_and_timeouts(egress, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a service - let mut svc = mk_service(&ns, "my-svc", 4191); - svc.annotations_mut() - .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); - svc.annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let svc = Resource::Service(create(&client, svc).await); - - parent_retries_and_timeouts(svc, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let mut egress = mk_egress_net(&ns, "my-egress"); - egress - .annotations_mut() - .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); - egress - .annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let egress = Resource::EgressNetwork(create(&client, egress).await); - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_retries_and_timeouts(egress, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_grpc_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - grpc_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_grpc_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - grpc_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct GrpcRouteBuilder(k8s_gateway_api::GrpcRoute); - -fn mk_grpc_route(ns: &str, name: &str, parent: &Resource, port: Option) -> GrpcRouteBuilder { - GrpcRouteBuilder(k8s_gateway_api::GrpcRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: k8s_gateway_api::GrpcRouteSpec { - inner: k8s_gateway_api::CommonRouteSpec { - parent_refs: Some(vec![k8s_gateway_api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - hostnames: None, - rules: Some(vec![k8s_gateway_api::GrpcRouteRule { - matches: Some(vec![k8s_gateway_api::GrpcRouteMatch { - method: Some(k8s_gateway_api::GrpcMethodMatch::Exact { - method: Some("foo".to_string()), - service: Some("my-gprc-service".to_string()), - }), - headers: None, - }]), - filters: None, - backend_refs: None, - }]), - }, - status: None, - }) -} - -impl GrpcRouteBuilder { - fn with_annotations(self, annotations: BTreeMap) -> Self { - let mut route = self.0; - route.metadata.annotations = Some(annotations); - Self(route) - } - - fn build(self) -> k8s_gateway_api::GrpcRoute { - self.0 - } -} - -async fn grpc_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create( - client, - mk_grpc_route(ns, "foo-route", &parent, Some(4191)).build(), - ) - .await; - await_grpc_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - { - // The route should be attached. - let routes = grpc_routes(&config); - let route = assert_route_attached(routes, &parent); - assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); - } - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The grpc route should be unattached and the default (http) route - // should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached again. - { - // The route should be attached. - let routes = grpc_routes(&config); - let route = assert_route_attached(routes, &parent); - assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); - } -} - -async fn grpc_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_grpc_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - ("retry.linkerd.io/grpc".to_string(), "internal".to_string()), - ("timeout.linkerd.io/response".to_string(), "10s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_grpc_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - let routes = grpc_routes(&config); - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - assert!(conditions.internal); - let timeout = rule - .timeouts - .as_ref() - .expect("timeouts expected") - .response - .as_ref() - .expect("response timeout expected"); - assert_eq!(timeout.seconds, 10); -} - -async fn parent_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_grpc_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - // Route annotations override the timeout config specified - // on the service. - ("timeout.linkerd.io/request".to_string(), "5s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_grpc_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - let routes = grpc_routes(&config); - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - // Retry config inherited from the service. - assert!(conditions.internal); - let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); - // Parent timeout config overridden by route timeout config. - assert_eq!(timeouts.response, None); - let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); - assert_eq!(request_timeout.seconds, 5); -} +// use futures::prelude::*; +// use kube::ResourceExt; +// use linkerd_policy_test::{ +// assert_resource_meta, assert_status_accepted, await_egress_net_status, await_grpc_route_status, +// create, create_egress_network, create_service, mk_egress_net, mk_service, outbound_api::*, +// update, with_temp_ns, Resource, +// }; +// use std::collections::BTreeMap; + +// #[tokio::test(flavor = "current_thread")] +// async fn service_grpc_route_retries_and_timeouts() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); +// grpc_route_retries_and_timeouts(svc, &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_grpc_route_retries_and_timeouts() { +// with_temp_ns(|client, ns| async move { +// // Create a egress net +// let egress = +// Resource::EgressNetwork(create_egress_network(&client, &ns, "my-egress").await); +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// grpc_route_retries_and_timeouts(egress, &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_retries_and_timeouts() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let mut svc = mk_service(&ns, "my-svc", 4191); +// svc.annotations_mut() +// .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); +// svc.annotations_mut() +// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); +// let svc = Resource::Service(create(&client, svc).await); + +// parent_retries_and_timeouts(svc, &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_retries_and_timeouts() { +// with_temp_ns(|client, ns| async move { +// // Create a egress net +// let mut egress = mk_egress_net(&ns, "my-egress"); +// egress +// .annotations_mut() +// .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); +// egress +// .annotations_mut() +// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); +// let egress = Resource::EgressNetwork(create(&client, egress).await); +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_retries_and_timeouts(egress, &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_grpc_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// grpc_route_reattachment(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_grpc_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a egress network +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// grpc_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// /* Helpers */ +// struct GrpcRouteBuilder(k8s_gateway_api::GrpcRoute); + +// fn mk_grpc_route(ns: &str, name: &str, parent: &Resource, port: Option) -> GrpcRouteBuilder { +// GrpcRouteBuilder(k8s_gateway_api::GrpcRoute { +// metadata: kube::api::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(name.to_string()), +// ..Default::default() +// }, +// spec: k8s_gateway_api::GrpcRouteSpec { +// inner: k8s_gateway_api::CommonRouteSpec { +// parent_refs: Some(vec![k8s_gateway_api::ParentReference { +// group: Some(parent.group()), +// kind: Some(parent.kind()), +// namespace: Some(parent.namespace()), +// name: parent.name(), +// section_name: None, +// port, +// }]), +// }, +// hostnames: None, +// rules: Some(vec![k8s_gateway_api::GrpcRouteRule { +// matches: Some(vec![k8s_gateway_api::GrpcRouteMatch { +// method: Some(k8s_gateway_api::GrpcMethodMatch::Exact { +// method: Some("foo".to_string()), +// service: Some("my-gprc-service".to_string()), +// }), +// headers: None, +// }]), +// filters: None, +// backend_refs: None, +// }]), +// }, +// status: None, +// }) +// } + +// impl GrpcRouteBuilder { +// fn with_annotations(self, annotations: BTreeMap) -> Self { +// let mut route = self.0; +// route.metadata.annotations = Some(annotations); +// Self(route) +// } + +// fn build(self) -> k8s_gateway_api::GrpcRoute { +// self.0 +// } +// } + +// async fn grpc_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { +// let mut route = create( +// client, +// mk_grpc_route(ns, "foo-route", &parent, Some(4191)).build(), +// ) +// .await; +// await_grpc_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// { +// // The route should be attached. +// let routes = grpc_routes(&config); +// let route = assert_route_attached(routes, &parent); +// assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); +// } + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = "other".to_string(); +// update(client, route.clone()).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The grpc route should be unattached and the default (http) route +// // should be present. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = parent.name(); +// update(client, route).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be attached again. +// { +// // The route should be attached. +// let routes = grpc_routes(&config); +// let route = assert_route_attached(routes, &parent); +// assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); +// } +// } + +// async fn grpc_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { +// let _route = create( +// client, +// mk_grpc_route(ns, "foo-route", &parent, Some(4191)) +// .with_annotations( +// vec![ +// ("retry.linkerd.io/grpc".to_string(), "internal".to_string()), +// ("timeout.linkerd.io/response".to_string(), "10s".to_string()), +// ] +// .into_iter() +// .collect(), +// ) +// .build(), +// ) +// .await; +// await_grpc_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// let routes = grpc_routes(&config); +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// let conditions = rule +// .retry +// .as_ref() +// .expect("retry config expected") +// .conditions +// .as_ref() +// .expect("retry conditions expected"); +// assert!(conditions.internal); +// let timeout = rule +// .timeouts +// .as_ref() +// .expect("timeouts expected") +// .response +// .as_ref() +// .expect("response timeout expected"); +// assert_eq!(timeout.seconds, 10); +// } + +// async fn parent_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { +// let _route = create( +// client, +// mk_grpc_route(ns, "foo-route", &parent, Some(4191)) +// .with_annotations( +// vec![ +// // Route annotations override the timeout config specified +// // on the service. +// ("timeout.linkerd.io/request".to_string(), "5s".to_string()), +// ] +// .into_iter() +// .collect(), +// ) +// .build(), +// ) +// .await; +// await_grpc_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// let routes = grpc_routes(&config); +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// let conditions = rule +// .retry +// .as_ref() +// .expect("retry config expected") +// .conditions +// .as_ref() +// .expect("retry conditions expected"); +// // Retry config inherited from the service. +// assert!(conditions.internal); +// let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); +// // Parent timeout config overridden by route timeout config. +// assert_eq!(timeouts.response, None); +// let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); +// assert_eq!(request_timeout.seconds, 5); +// } diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index 83f5ff7bd4535..99eb9e3157566 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -1,2010 +1,1934 @@ -use std::{collections::BTreeMap, time::Duration}; - -use futures::prelude::*; -use kube::ResourceExt; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - assert_default_accrual_backoff, assert_resource_meta, assert_status_accepted, - await_egress_net_status, await_route_status, create, create_annotated_egress_network, - create_annotated_service, create_cluster_scoped, create_egress_network, - create_opaque_egress_network, create_opaque_service, create_service, delete_cluster_scoped, - grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -}; -use maplit::{btreemap, convert_args}; - -// These tests are copies of the tests in outbound_api_gateway.rs but using the -// policy.linkerd.io HttpRoute kubernetes types instead of the Gateway API ones. -// These two files should be kept in sync to ensure that Linkerd can read and -// function correctly with both types of resources. - -#[tokio::test(flavor = "current_thread")] -async fn service_with_no_http_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_no_http_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_no_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_no_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_route_without_rules() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_http_route_without_rules(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_route_without_rules() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_route_without_rules(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_without_backends() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_http_routes_without_backends(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_without_backends() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_routes_without_backends(Resource::EgressNetwork(egress), &client, &ns) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - parent_with_http_routes_with_backend( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_http_routes_with_backend( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_cross_namespace_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_ns_name = format!("{}-backend", ns); - let backend_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(backend_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - let backend_name = "backend"; - let backend_svc = - Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); - let backends = [backend_svc.clone()]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( - Some(&backends), - Some(backend_ns_name), - None, - ); - let _route = create(&client, route.build()).await; - await_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); - - delete_cluster_scoped(&client, backend_ns).await - }) - .await; -} - -// TODO: Test fails until handling of invalid backends is implemented. -#[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "invalid", 4191); - - parent_with_http_routes_with_invalid_backend( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -// TODO: Test fails until handling of invalid backends is implemented. -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_http_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - let backend = mk_egress_net(&ns, "invalid"); - - parent_with_http_routes_with_invalid_backend( - Resource::EgressNetwork(egress), - Resource::EgressNetwork(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -// TODO: Investigate why the policy controller is only returning one route in this -// case instead of two. -#[tokio::test(flavor = "current_thread")] -async fn service_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -// TODO: Investigate why the policy controller is only returning one route in this -// case instead of two. -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual() { - with_temp_ns(|client, ns| async move { - let svc = create_annotated_service( - &client, - &ns, - "consecutive-accrual-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), - "10s".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), - "10m".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual() { - with_temp_ns(|client, ns| async move { - let egress = create_annotated_egress_network( - &client, - &ns, - "consecutive-accrual-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), - "10s".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), - "10m".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_no_config() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual, but - // with no additional configuration - let svc_no_config = create_annotated_service( - &client, - &ns, - "default-accrual-svc", - 80, - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - )]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_no_config( - Resource::Service(svc_no_config), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_max_fails() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual with - // max number of failures and with default backoff - let svc_max_fails = create_annotated_service( - &client, - &ns, - "no-backoff-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_max_fails( - Resource::Service(svc_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults_jitter() { - with_temp_ns(|client, ns| async move { - // Create a service configured to do consecutive failure accrual with - // only the jitter ratio configured in the backoff - let svc_jitter = create_annotated_service( - &client, - &ns, - "only-jitter-svc", - 80, - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - - parent_with_consecutive_failure_accrual_defaults_max_jitter( - Resource::Service(svc_jitter), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { - with_temp_ns(|client, ns| async move { - // Create a egress network configured to do consecutive failure accrual, but - // with no additional configuration - let egress_no_config = create_annotated_egress_network( - &client, - &ns, - "default-accrual-egress", - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - )]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "default-accrual-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_no_config( - Resource::EgressNetwork(egress_no_config), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { - with_temp_ns(|client, ns| async move { - // Create a egress network configured to do consecutive failure accrual with - // max number of failures and with default backoff - let egress_max_fails = create_annotated_egress_network( - &client, - &ns, - "no-backoff-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "no-backoff-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_max_fails( - Resource::EgressNetwork(egress_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { - with_temp_ns(|client, ns| async move { - // Create an egress net configured to do consecutive failure accrual with - // only the jitter ratio configured in the backoff - let egress_jitter = create_annotated_egress_network( - &client, - &ns, - "only-jitter-egress", - BTreeMap::from([ - ( - "balancer.linkerd.io/failure-accrual".to_string(), - "consecutive".to_string(), - ), - ( - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), - "1.0".to_string(), - ), - ]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "only-jitter-egress").await; - assert_status_accepted(status.conditions); - - parent_with_consecutive_failure_accrual_defaults_max_jitter( - Resource::EgressNetwork(egress_jitter), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_default_failure_accrual() { - with_temp_ns(|client, ns| async move { - // Default config for Service, no failure accrual - let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; - - // Create Service with consecutive failure accrual config for - // max_failures but no mode - let svc_max_fails = create_annotated_service( - &client, - &ns, - "default-max-failure-svc", - 80, - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - )]), - ) - .await; - - parent_with_default_failure_accrual( - Resource::Service(svc_default), - Resource::Service(svc_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_default_failure_accrual() { - with_temp_ns(|client, ns| async move { - // Default config for EgressNetwork, no failure accrual - let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; - let status = await_egress_net_status(&client, &ns, "default-failure-accrual").await; - assert_status_accepted(status.conditions); - - // Create EgressNetwork with consecutive failure accrual config for - // max_failures but no mode - let egress_max_fails = create_annotated_egress_network( - &client, - &ns, - "default-max-failure-egress", - BTreeMap::from([( - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), - "8".to_string(), - )]), - ) - .await; - let status = await_egress_net_status(&client, &ns, "default-max-failure-egress").await; - assert_status_accepted(status.conditions); - - parent_with_default_failure_accrual( - Resource::EgressNetwork(egress_default), - Resource::EgressNetwork(egress_max_fails), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn opaque_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; - opaque_parent(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn opaque_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; - opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn route_with_filters_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "backend", 4191); - - route_with_filters( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn route_with_filters_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - route_with_filters( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn backend_with_filters_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - backend_with_filters( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn backend_with_filters_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - backend_with_filters( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_with_no_port() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config_4191); - - let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config_9999); - - // There should be a default route. - detect_http_routes(&config_4191, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&config_9999, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 9999); - }); - - let _route = create(&client, mk_http_route(&ns, "foo-route", &svc, None).build()).await; - await_route_status(&client, &ns, "foo-route").await; - - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_4191); - - // The route should apply to the service. - detect_http_routes(&config_4191, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_9999); - - // The route should apply to other ports too. - detect_http_routes(&config_9999, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn producer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // There should be a default route. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_route_status(&client, &ns, "foo-route").await; - - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?producer_config); - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should be returned in queries from a consumer namespace. - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn pre_existing_producer_route() { - // We test the scenario where outbound policy watches are initiated after - // a produce route already exists. - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_route_status(&client, &ns, "foo-route").await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should be returned in queries from a consumer namespace. - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn consumer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let consumer_ns_name = format!("{}-consumer", ns); - let consumer_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(consumer_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = - retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; - let other_config = other_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?other_config); - - // There should be a default route. - detect_http_routes(&producer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - detect_http_routes(&other_config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // A route created in a different namespace as its parent service is - // called a consumer route. It should be returned in outbound policy - // requests for that service ONLY when the request comes from the - // consumer namespace. - let _route = create( - &client, - mk_http_route(&consumer_ns_name, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_route_status(&client, &consumer_ns_name, "foo-route").await; - - // The route should NOT be returned in queries from the producer namespace. - // There should be a default route. - assert!(producer_rx.next().now_or_never().is_none()); - - // The route should be returned in queries from the same consumer - // namespace. - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - detect_http_routes(&consumer_config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - // The route should NOT be returned in queries from a different consumer - // namespace. - assert!(other_rx.next().now_or_never().is_none()); - - delete_cluster_scoped(&client, consumer_ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts_service() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts_egress_net() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a service - let mut svc = mk_service(&ns, "my-svc", 4191); - svc.annotations_mut() - .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); - svc.annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let svc = Resource::Service(create(&client, svc).await); - - retries_and_timeouts(svc, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_retries_and_timeouts() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let mut egress = mk_egress_net(&ns, "my-egress"); - egress - .annotations_mut() - .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); - egress - .annotations_mut() - .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let egress = Resource::EgressNetwork(create(&client, egress).await); - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - retries_and_timeouts(egress, &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_http_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - http_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_http_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct HttpRouteBuilder(k8s::policy::HttpRoute); - -fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { - use k8s::policy::httproute as api; - - HttpRouteBuilder(api::HttpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::HttpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - hostnames: None, - rules: Some(vec![api::HttpRouteRule { - matches: Some(vec![api::HttpRouteMatch { - path: Some(api::HttpPathMatch::Exact { - value: "/foo".to_string(), - }), - headers: None, - query_params: None, - method: Some("GET".to_string()), - }]), - filters: None, - backend_refs: None, - timeouts: None, - }]), - }, - status: None, - }) -} - -impl HttpRouteBuilder { - fn with_backends( - self, - backends: Option<&[Resource]>, - backends_ns: Option, - backend_filters: Option>, - ) -> Self { - let mut route = self.0; - let backend_refs = backends.map(|backends| { - backends - .iter() - .map(|backend| k8s::policy::httproute::HttpBackendRef { - backend_ref: Some(k8s_gateway_api::BackendRef { - weight: None, - inner: k8s_gateway_api::BackendObjectReference { - name: backend.name(), - port: Some(8888), - group: Some(backend.group()), - kind: Some(backend.kind()), - namespace: backends_ns.clone(), - }, - }), - filters: backend_filters.clone(), - }) - .collect() - }); - route.spec.rules.iter_mut().flatten().for_each(|rule| { - rule.backend_refs = backend_refs.clone(); - }); - Self(route) - } - - fn with_filters(self, filters: Option>) -> Self { - let mut route = self.0; - route - .spec - .rules - .iter_mut() - .flatten() - .for_each(|rule| rule.filters = filters.clone()); - Self(route) - } - - fn with_annotations(self, annotations: BTreeMap) -> Self { - let mut route = self.0; - route.metadata.annotations = Some(annotations); - Self(route) - } - - fn build(self) -> k8s::policy::HttpRoute { - self.0 - } -} - -fn mk_empty_http_route( - ns: &str, - name: &str, - parent: &Resource, - port: u16, -) -> k8s::policy::HttpRoute { - use k8s::policy::httproute as api; - api::HttpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::HttpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port: Some(port), - }]), - }, - hostnames: None, - rules: Some(vec![]), - }, - status: None, - } -} - -async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); -} - -async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with no rules. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - assert_eq!(route.rules.len(), 0); - }); -} - -async fn parent_with_http_routes_without_backends( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), - ) - .await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with the logical backend. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend, &parent, 4191); - }); -} - -async fn parent_with_http_routes_with_backend( - parent: Resource, - rule_backend: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [rule_backend.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(client, route.build()).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); -} - -async fn parent_with_http_routes_with_invalid_backend( - parent: Resource, - backend: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(client, route.build()).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a route with a backend. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_has_failure_filter(backend); - }); -} - -async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - client, - mk_http_route(ns, "a-route", &parent, Some(4191)).build(), - ) - .await; - await_route_status(client, ns, "a-route").await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let _b_route = create( - client, - mk_http_route(ns, "b-route", &parent, Some(4191)).build(), - ) - .await; - await_route_status(client, ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - detect_http_routes(&config, |routes| { - let num_routes = match parent { - Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default - Resource::Service(_) => 2, // two routes for service - }; - assert_eq!(routes.len(), num_routes); - assert_eq!(route_name(&routes[0]), "a-route"); - assert_eq!(route_name(&routes[1]), "b-route"); - }); -} - -async fn parent_with_consecutive_failure_accrual( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_no_config( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default max_failures and default backoff - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_max_fails( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default backoff and overridden max_failures - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); -} - -async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( - parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect defaults for everything except for the jitter ratio - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); -} - -async fn parent_with_default_failure_accrual( - parent_default_config: Resource, - parent_max_failures: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ); - }); - - let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ) - }); -} - -async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Proxy protocol should be opaque. - match config.protocol.unwrap().kind.unwrap() { - grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} - _ => panic!("proxy protocol must be Opaque"), - }; -} - -async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_backends(Some(&backends), None, None) - .with_filters(Some(vec![ - k8s::policy::httproute::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s::policy::httproute::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ])); - let _route = create(client, route.build()).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - // There should be a route with filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let filters = &rule.filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { - replace: Some( - linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( - "/path".to_string() - ) - ) - }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); -} - -async fn backend_with_filters( - parent: Resource, - backend_for_parent: Resource, - client: &kube::Client, - ns: &str, -) { - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - let backends = [backend_for_parent.clone()]; - let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( - Some(&backends), - None, - Some(vec![ - k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s_gateway_api::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ]), - ); - let _route = create(client, route.build()).await; - await_route_status(client, ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - // There should be a route without rule filters. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - assert_eq!(rule.filters.len(), 0); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { - replace: Some( - linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( - "/path".to_string() - ) - ) - }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); -} - -async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - ("retry.linkerd.io/http".to_string(), "5xx".to_string()), - ("timeout.linkerd.io/response".to_string(), "10s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - - await_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeout = rule - .timeouts - .as_ref() - .expect("timeouts expected") - .response - .as_ref() - .expect("response timeout expected"); - assert_eq!(timeout.seconds, 10); - }); -} - -async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { - let _route = create( - client, - mk_http_route(ns, "foo-route", &parent, Some(4191)) - .with_annotations( - vec![ - // Route annotations override the timeout config specified - // on the service. - ("timeout.linkerd.io/request".to_string(), "5s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - // Retry config inherited from the service. - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); - // Service timeout config overridden by route timeout config. - assert_eq!(timeouts.response, None); - let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); - assert_eq!(request_timeout.seconds, 5); - }); -} - -async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; - await_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached. - detect_http_routes(&config, |routes| { - let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); - assert_route_name_eq(route, "foo-route"); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached again. - detect_http_routes(&config, |routes| { - let route = assert_route_attached(routes, &parent); - assert_route_name_eq(route, "foo-route"); - }); -} +// use std::{collections::BTreeMap, time::Duration}; + +// use futures::prelude::*; +// use kube::ResourceExt; +// use linkerd_policy_controller_k8s_api as k8s; +// use linkerd_policy_test::{ +// assert_default_accrual_backoff, assert_resource_meta, assert_status_accepted, +// await_egress_net_status, await_route_status, create, create_annotated_egress_network, +// create_annotated_service, create_cluster_scoped, create_egress_network, +// create_opaque_egress_network, create_opaque_service, create_service, delete_cluster_scoped, +// grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, +// }; +// use maplit::{btreemap, convert_args}; + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_http_routes_with_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend_svc = create_service(&client, &ns, "backend", 8888).await; +// parent_with_http_routes_with_backend( +// Resource::Service(svc), +// Resource::Service(backend_svc), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_http_routes_with_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_http_routes_with_backend( +// Resource::EgressNetwork(egress.clone()), +// Resource::EgressNetwork(egress), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_http_routes_with_cross_namespace_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &svc, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); + +// let backend_ns_name = format!("{}-backend", ns); +// let backend_ns = create_cluster_scoped( +// &client, +// k8s::Namespace { +// metadata: k8s::ObjectMeta { +// name: Some(backend_ns_name.clone()), +// labels: Some(convert_args!(btreemap!( +// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), +// ))), +// ..Default::default() +// }, +// ..Default::default() +// }, +// ) +// .await; +// let backend_name = "backend"; +// let backend_svc = +// Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); +// let backends = [backend_svc.clone()]; +// let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( +// Some(&backends), +// Some(backend_ns_name), +// None, +// ); +// let _route = create(&client, route.build()).await; +// await_route_status(&client, &ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &svc, 4191); + +// // There should be a route with a backend with no filters. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// let backends = route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); +// let filters = &backend.backend.as_ref().unwrap().filters; +// assert_eq!(filters.len(), 0); +// }); + +// delete_cluster_scoped(&client, backend_ns).await +// }) +// .await; +// } + +// // TODO: Test fails until handling of invalid backends is implemented. +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_http_routes_with_invalid_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend = mk_service(&ns, "invalid", 4191); + +// parent_with_http_routes_with_invalid_backend( +// Resource::Service(svc), +// Resource::Service(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// // TODO: Test fails until handling of invalid backends is implemented. +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_http_routes_with_invalid_backend() { +// with_temp_ns(|client, ns| async move { +// // Create an egress network +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// let backend = mk_egress_net(&ns, "invalid"); + +// parent_with_http_routes_with_invalid_backend( +// Resource::EgressNetwork(egress), +// Resource::EgressNetwork(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// // TODO: Investigate why the policy controller is only returning one route in this +// // case instead of two. +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_multiple_http_routes() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// // TODO: Investigate why the policy controller is only returning one route in this +// // case instead of two. +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_multiple_http_routes() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_consecutive_failure_accrual() { +// with_temp_ns(|client, ns| async move { +// let svc = create_annotated_service( +// &client, +// &ns, +// "consecutive-accrual-svc", +// 80, +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), +// "10s".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), +// "10m".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), +// "1.0".to_string(), +// ), +// ]), +// ) +// .await; +// parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_consecutive_failure_accrual() { +// with_temp_ns(|client, ns| async move { +// let egress = create_annotated_egress_network( +// &client, +// &ns, +// "consecutive-accrual-egress", +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), +// "10s".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), +// "10m".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), +// "1.0".to_string(), +// ), +// ]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_consecutive_failure_accrual_defaults_no_config() { +// with_temp_ns(|client, ns| async move { +// // Create a service configured to do consecutive failure accrual, but +// // with no additional configuration +// let svc_no_config = create_annotated_service( +// &client, +// &ns, +// "default-accrual-svc", +// 80, +// BTreeMap::from([( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// )]), +// ) +// .await; + +// parent_with_consecutive_failure_accrual_defaults_no_config( +// Resource::Service(svc_no_config), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_consecutive_failure_accrual_defaults_max_fails() { +// with_temp_ns(|client, ns| async move { +// // Create a service configured to do consecutive failure accrual with +// // max number of failures and with default backoff +// let svc_max_fails = create_annotated_service( +// &client, +// &ns, +// "no-backoff-svc", +// 80, +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// ), +// ]), +// ) +// .await; + +// parent_with_consecutive_failure_accrual_defaults_max_fails( +// Resource::Service(svc_max_fails), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_consecutive_failure_accrual_defaults_jitter() { +// with_temp_ns(|client, ns| async move { +// // Create a service configured to do consecutive failure accrual with +// // only the jitter ratio configured in the backoff +// let svc_jitter = create_annotated_service( +// &client, +// &ns, +// "only-jitter-svc", +// 80, +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), +// "1.0".to_string(), +// ), +// ]), +// ) +// .await; + +// parent_with_consecutive_failure_accrual_defaults_max_jitter( +// Resource::Service(svc_jitter), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { +// with_temp_ns(|client, ns| async move { +// // Create a egress network configured to do consecutive failure accrual, but +// // with no additional configuration +// let egress_no_config = create_annotated_egress_network( +// &client, +// &ns, +// "default-accrual-egress", +// BTreeMap::from([( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// )]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "default-accrual-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_consecutive_failure_accrual_defaults_no_config( +// Resource::EgressNetwork(egress_no_config), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { +// with_temp_ns(|client, ns| async move { +// // Create a egress network configured to do consecutive failure accrual with +// // max number of failures and with default backoff +// let egress_max_fails = create_annotated_egress_network( +// &client, +// &ns, +// "no-backoff-egress", +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// ), +// ]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "no-backoff-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_consecutive_failure_accrual_defaults_max_fails( +// Resource::EgressNetwork(egress_max_fails), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net configured to do consecutive failure accrual with +// // only the jitter ratio configured in the backoff +// let egress_jitter = create_annotated_egress_network( +// &client, +// &ns, +// "only-jitter-egress", +// BTreeMap::from([ +// ( +// "balancer.linkerd.io/failure-accrual".to_string(), +// "consecutive".to_string(), +// ), +// ( +// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), +// "1.0".to_string(), +// ), +// ]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "only-jitter-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_consecutive_failure_accrual_defaults_max_jitter( +// Resource::EgressNetwork(egress_jitter), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_default_failure_accrual() { +// with_temp_ns(|client, ns| async move { +// // Default config for Service, no failure accrual +// let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; + +// // Create Service with consecutive failure accrual config for +// // max_failures but no mode +// let svc_max_fails = create_annotated_service( +// &client, +// &ns, +// "default-max-failure-svc", +// 80, +// BTreeMap::from([( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// )]), +// ) +// .await; + +// parent_with_default_failure_accrual( +// Resource::Service(svc_default), +// Resource::Service(svc_max_fails), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_default_failure_accrual() { +// with_temp_ns(|client, ns| async move { +// // Default config for EgressNetwork, no failure accrual +// let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; +// let status = await_egress_net_status(&client, &ns, "default-failure-accrual").await; +// assert_status_accepted(status.conditions); + +// // Create EgressNetwork with consecutive failure accrual config for +// // max_failures but no mode +// let egress_max_fails = create_annotated_egress_network( +// &client, +// &ns, +// "default-max-failure-egress", +// BTreeMap::from([( +// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), +// "8".to_string(), +// )]), +// ) +// .await; +// let status = await_egress_net_status(&client, &ns, "default-max-failure-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_default_failure_accrual( +// Resource::EgressNetwork(egress_default), +// Resource::EgressNetwork(egress_max_fails), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn opaque_service() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; +// opaque_parent(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn opaque_egress_net() { +// with_temp_ns(|client, ns| async move { +// // Create an egress network +// let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; +// opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn route_with_filters_service() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend = mk_service(&ns, "backend", 4191); + +// route_with_filters( +// Resource::Service(svc), +// Resource::Service(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn route_with_filters_egress_net() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// route_with_filters( +// Resource::EgressNetwork(egress.clone()), +// Resource::EgressNetwork(egress), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn backend_with_filters_service() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend_svc = create_service(&client, &ns, "backend", 8888).await; +// backend_with_filters( +// Resource::Service(svc), +// Resource::Service(backend_svc), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn backend_with_filters_egress_net() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// backend_with_filters( +// Resource::EgressNetwork(egress.clone()), +// Resource::EgressNetwork(egress), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn http_route_with_no_port() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let config_4191 = rx_4191 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config_4191); + +// let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; +// let config_9999 = rx_9999 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config_9999); + +// // There should be a default route. +// detect_http_routes(&config_4191, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); +// detect_http_routes(&config_9999, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 9999); +// }); + +// let _route = create(&client, mk_http_route(&ns, "foo-route", &svc, None).build()).await; +// await_route_status(&client, &ns, "foo-route").await; + +// let config_4191 = rx_4191 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config_4191); + +// // The route should apply to the service. +// detect_http_routes(&config_4191, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); + +// let config_9999 = rx_9999 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config_9999); + +// // The route should apply to other ports too. +// detect_http_routes(&config_9999, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn producer_route() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// // There should be a default route. +// detect_http_routes(&producer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); + +// // A route created in the same namespace as its parent service is called +// // a producer route. It should be returned in outbound policy requests +// // for that service from ALL namespaces. +// let _route = create( +// &client, +// mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), +// ) +// .await; +// await_route_status(&client, &ns, "foo-route").await; + +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?producer_config); +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// // The route should be returned in queries from the producer namespace. +// detect_http_routes(&producer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); + +// // The route should be returned in queries from a consumer namespace. +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn pre_existing_producer_route() { +// // We test the scenario where outbound policy watches are initiated after +// // a produce route already exists. +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// // A route created in the same namespace as its parent service is called +// // a producer route. It should be returned in outbound policy requests +// // for that service from ALL namespaces. +// let _route = create( +// &client, +// mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), +// ) +// .await; +// await_route_status(&client, &ns, "foo-route").await; + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// // The route should be returned in queries from the producer namespace. +// detect_http_routes(&producer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); + +// // The route should be returned in queries from a consumer namespace. +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn consumer_route() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let consumer_ns_name = format!("{}-consumer", ns); +// let consumer_ns = create_cluster_scoped( +// &client, +// k8s::Namespace { +// metadata: k8s::ObjectMeta { +// name: Some(consumer_ns_name.clone()), +// labels: Some(convert_args!(btreemap!( +// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), +// ))), +// ..Default::default() +// }, +// ..Default::default() +// }, +// ) +// .await; + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = +// retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; +// let other_config = other_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?other_config); + +// // There should be a default route. +// detect_http_routes(&producer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); +// detect_http_routes(&other_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &svc, 4191); +// }); + +// // A route created in a different namespace as its parent service is +// // called a consumer route. It should be returned in outbound policy +// // requests for that service ONLY when the request comes from the +// // consumer namespace. +// let _route = create( +// &client, +// mk_http_route(&consumer_ns_name, "foo-route", &svc, Some(4191)).build(), +// ) +// .await; +// await_route_status(&client, &consumer_ns_name, "foo-route").await; + +// // The route should NOT be returned in queries from the producer namespace. +// // There should be a default route. +// assert!(producer_rx.next().now_or_never().is_none()); + +// // The route should be returned in queries from the same consumer +// // namespace. +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// detect_http_routes(&consumer_config, |routes| { +// let route = assert_singleton(routes); +// assert_route_name_eq(route, "foo-route"); +// }); + +// // The route should NOT be returned in queries from a different consumer +// // namespace. +// assert!(other_rx.next().now_or_never().is_none()); + +// delete_cluster_scoped(&client, consumer_ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn http_route_retries_and_timeouts_service() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn http_route_retries_and_timeouts_egress_net() { +// with_temp_ns(|client, ns| async move { +// // Create an egress network +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_retries_and_timeouts() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let mut svc = mk_service(&ns, "my-svc", 4191); +// svc.annotations_mut() +// .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); +// svc.annotations_mut() +// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); +// let svc = Resource::Service(create(&client, svc).await); + +// retries_and_timeouts(svc, &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_retries_and_timeouts() { +// with_temp_ns(|client, ns| async move { +// // Create a egress net +// let mut egress = mk_egress_net(&ns, "my-egress"); +// egress +// .annotations_mut() +// .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); +// egress +// .annotations_mut() +// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); +// let egress = Resource::EgressNetwork(create(&client, egress).await); +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// retries_and_timeouts(egress, &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_http_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// http_route_reattachment(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_http_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// /* Helpers */ +// struct HttpRouteBuilder(k8s::policy::HttpRoute); + +// fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { +// use k8s::policy::httproute as api; + +// HttpRouteBuilder(api::HttpRoute { +// metadata: kube::api::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(name.to_string()), +// ..Default::default() +// }, +// spec: api::HttpRouteSpec { +// inner: api::CommonRouteSpec { +// parent_refs: Some(vec![api::ParentReference { +// group: Some(parent.group()), +// kind: Some(parent.kind()), +// namespace: Some(parent.namespace()), +// name: parent.name(), +// section_name: None, +// port, +// }]), +// }, +// hostnames: None, +// rules: Some(vec![api::HttpRouteRule { +// matches: Some(vec![api::HttpRouteMatch { +// path: Some(api::HttpPathMatch::Exact { +// value: "/foo".to_string(), +// }), +// headers: None, +// query_params: None, +// method: Some("GET".to_string()), +// }]), +// filters: None, +// backend_refs: None, +// timeouts: None, +// }]), +// }, +// status: None, +// }) +// } + +// impl HttpRouteBuilder { +// fn with_backends( +// self, +// backends: Option<&[Resource]>, +// backends_ns: Option, +// backend_filters: Option>, +// ) -> Self { +// let mut route = self.0; +// let backend_refs = backends.map(|backends| { +// backends +// .iter() +// .map(|backend| k8s::policy::httproute::HttpBackendRef { +// backend_ref: Some(k8s_gateway_api::BackendRef { +// weight: None, +// inner: k8s_gateway_api::BackendObjectReference { +// name: backend.name(), +// port: Some(8888), +// group: Some(backend.group()), +// kind: Some(backend.kind()), +// namespace: backends_ns.clone(), +// }, +// }), +// filters: backend_filters.clone(), +// }) +// .collect() +// }); +// route.spec.rules.iter_mut().flatten().for_each(|rule| { +// rule.backend_refs = backend_refs.clone(); +// }); +// Self(route) +// } + +// fn with_filters(self, filters: Option>) -> Self { +// let mut route = self.0; +// route +// .spec +// .rules +// .iter_mut() +// .flatten() +// .for_each(|rule| rule.filters = filters.clone()); +// Self(route) +// } + +// fn with_annotations(self, annotations: BTreeMap) -> Self { +// let mut route = self.0; +// route.metadata.annotations = Some(annotations); +// Self(route) +// } + +// fn build(self) -> k8s::policy::HttpRoute { +// self.0 +// } +// } + +// fn mk_empty_http_route( +// ns: &str, +// name: &str, +// parent: &Resource, +// port: u16, +// ) -> k8s::policy::HttpRoute { +// use k8s::policy::httproute as api; +// api::HttpRoute { +// metadata: kube::api::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(name.to_string()), +// ..Default::default() +// }, +// spec: api::HttpRouteSpec { +// inner: api::CommonRouteSpec { +// parent_refs: Some(vec![api::ParentReference { +// group: Some(parent.group()), +// kind: Some(parent.kind()), +// namespace: Some(parent.namespace()), +// name: parent.name(), +// section_name: None, +// port: Some(port), +// }]), +// }, +// hostnames: None, +// rules: Some(vec![]), +// }, +// status: None, +// } +// } + +// async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); +// } + +// async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; +// await_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a route with no rules. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// assert_eq!(route.rules.len(), 0); +// }); +// } + +// async fn parent_with_http_routes_without_backends( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let _route = create( +// client, +// mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), +// ) +// .await; +// await_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a route with the logical backend. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let backends = route_backends_first_available(route); +// let backend = assert_singleton(backends); +// assert_backend_matches_parent(backend, &parent, 4191); +// }); +// } + +// async fn parent_with_http_routes_with_backend( +// parent: Resource, +// rule_backend: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let backends = [rule_backend.clone()]; +// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( +// Some(&backends), +// None, +// None, +// ); +// let _route = create(client, route.build()).await; +// await_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a route with a backend with no filters. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let backends = route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); +// let filters = &backend.backend.as_ref().unwrap().filters; +// assert_eq!(filters.len(), 0); +// }); +// } + +// async fn parent_with_http_routes_with_invalid_backend( +// parent: Resource, +// backend: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let backends = [backend]; +// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( +// Some(&backends), +// None, +// None, +// ); +// let _route = create(client, route.build()).await; +// await_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a route with a backend. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let backends = route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_backend_has_failure_filter(backend); +// }); +// } + +// async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// // Routes should be returned in sorted order by creation timestamp then +// // name. To ensure that this test isn't timing dependant, routes should +// // be created in alphabetical order. +// let _a_route = create( +// client, +// mk_http_route(ns, "a-route", &parent, Some(4191)).build(), +// ) +// .await; +// await_route_status(client, ns, "a-route").await; + +// // First route update. +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let _b_route = create( +// client, +// mk_http_route(ns, "b-route", &parent, Some(4191)).build(), +// ) +// .await; +// await_route_status(client, ns, "b-route").await; + +// // Second route update. +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// detect_http_routes(&config, |routes| { +// let num_routes = match parent { +// Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default +// Resource::Service(_) => 2, // two routes for service +// }; +// assert_eq!(routes.len(), num_routes); +// assert_eq!(route_name(&routes[0]), "a-route"); +// assert_eq!(route_name(&routes[1]), "b-route"); +// }); +// } + +// async fn parent_with_consecutive_failure_accrual( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// detect_failure_accrual(&config, |accrual| { +// let consecutive = failure_accrual_consecutive(accrual); +// assert_eq!(8, consecutive.max_failures); +// assert_eq!( +// &grpc::outbound::ExponentialBackoff { +// min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), +// max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), +// jitter_ratio: 1.0_f32, +// }, +// consecutive +// .backoff +// .as_ref() +// .expect("backoff must be configured") +// ); +// }); +// } + +// async fn parent_with_consecutive_failure_accrual_defaults_no_config( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect default max_failures and default backoff +// detect_failure_accrual(&config, |accrual| { +// let consecutive = failure_accrual_consecutive(accrual); +// assert_eq!(7, consecutive.max_failures); +// assert_default_accrual_backoff!(consecutive +// .backoff +// .as_ref() +// .expect("backoff must be configured")); +// }); +// } + +// async fn parent_with_consecutive_failure_accrual_defaults_max_fails( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect default backoff and overridden max_failures +// detect_failure_accrual(&config, |accrual| { +// let consecutive = failure_accrual_consecutive(accrual); +// assert_eq!(8, consecutive.max_failures); +// assert_default_accrual_backoff!(consecutive +// .backoff +// .as_ref() +// .expect("backoff must be configured")); +// }); +// } + +// async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( +// parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect defaults for everything except for the jitter ratio +// detect_failure_accrual(&config, |accrual| { +// let consecutive = failure_accrual_consecutive(accrual); +// assert_eq!(7, consecutive.max_failures); +// assert_eq!( +// &grpc::outbound::ExponentialBackoff { +// min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), +// max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), +// jitter_ratio: 1.0_f32, +// }, +// consecutive +// .backoff +// .as_ref() +// .expect("backoff must be configured") +// ); +// }); +// } + +// async fn parent_with_default_failure_accrual( +// parent_default_config: Resource, +// parent_max_failures: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect failure accrual config to be default (no failure accrual) +// detect_failure_accrual(&config, |accrual| { +// assert!( +// accrual.is_none(), +// "consecutive failure accrual should not be configured for service" +// ); +// }); + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Expect failure accrual config to be default (no failure accrual) +// detect_failure_accrual(&config, |accrual| { +// assert!( +// accrual.is_none(), +// "consecutive failure accrual should not be configured for service" +// ) +// }); +// } + +// async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // Proxy protocol should be opaque. +// match config.protocol.unwrap().kind.unwrap() { +// grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} +// _ => panic!("proxy protocol must be Opaque"), +// }; +// } + +// async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let backends = [backend.clone()]; +// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) +// .with_backends(Some(&backends), None, None) +// .with_filters(Some(vec![ +// k8s::policy::httproute::HttpRouteFilter::RequestHeaderModifier { +// request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { +// set: Some(vec![k8s_gateway_api::HttpHeader { +// name: "set".to_string(), +// value: "set-value".to_string(), +// }]), +// add: Some(vec![k8s_gateway_api::HttpHeader { +// name: "add".to_string(), +// value: "add-value".to_string(), +// }]), +// remove: Some(vec!["remove".to_string()]), +// }, +// }, +// k8s::policy::httproute::HttpRouteFilter::RequestRedirect { +// request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { +// scheme: Some("http".to_string()), +// hostname: Some("host".to_string()), +// path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { +// replace_prefix_match: "/path".to_string(), +// }), +// port: Some(5555), +// status_code: Some(302), +// }, +// }, +// ])); +// let _route = create(client, route.build()).await; +// await_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// // There should be a route with filters. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// let filters = &rule.filters; +// assert_eq!( +// *filters, +// vec![ +// grpc::outbound::http_route::Filter { +// kind: Some( +// grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( +// grpc::http_route::RequestHeaderModifier { +// add: Some(grpc::http_types::Headers { +// headers: vec![grpc::http_types::headers::Header { +// name: "add".to_string(), +// value: "add-value".into(), +// }] +// }), +// set: Some(grpc::http_types::Headers { +// headers: vec![grpc::http_types::headers::Header { +// name: "set".to_string(), +// value: "set-value".into(), +// }] +// }), +// remove: vec!["remove".to_string()], +// } +// ) +// ) +// }, +// grpc::outbound::http_route::Filter { +// kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( +// grpc::http_route::RequestRedirect { +// scheme: Some(grpc::http_types::Scheme { +// r#type: Some(grpc::http_types::scheme::Type::Registered( +// grpc::http_types::scheme::Registered::Http.into(), +// )) +// }), +// host: "host".to_string(), +// path: Some(linkerd2_proxy_api::http_route::PathModifier { +// replace: Some( +// linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( +// "/path".to_string() +// ) +// ) +// }), +// port: 5555, +// status: 302, +// } +// )) +// } +// ] +// ); +// }); +// } + +// async fn backend_with_filters( +// parent: Resource, +// backend_for_parent: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// // There should be a default route. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// let backends = [backend_for_parent.clone()]; +// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( +// Some(&backends), +// None, +// Some(vec![ +// k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { +// request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { +// set: Some(vec![k8s_gateway_api::HttpHeader { +// name: "set".to_string(), +// value: "set-value".to_string(), +// }]), +// add: Some(vec![k8s_gateway_api::HttpHeader { +// name: "add".to_string(), +// value: "add-value".to_string(), +// }]), +// remove: Some(vec!["remove".to_string()]), +// }, +// }, +// k8s_gateway_api::HttpRouteFilter::RequestRedirect { +// request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { +// scheme: Some("http".to_string()), +// hostname: Some("host".to_string()), +// path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { +// replace_prefix_match: "/path".to_string(), +// }), +// port: Some(5555), +// status_code: Some(302), +// }, +// }, +// ]), +// ); +// let _route = create(client, route.build()).await; +// await_route_status(client, ns, "foo-route").await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// // There should be a route without rule filters. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// assert_eq!(rule.filters.len(), 0); +// let backends = route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); +// let filters = &backend.backend.as_ref().unwrap().filters; +// assert_eq!( +// *filters, +// vec![ +// grpc::outbound::http_route::Filter { +// kind: Some( +// grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( +// grpc::http_route::RequestHeaderModifier { +// add: Some(grpc::http_types::Headers { +// headers: vec![grpc::http_types::headers::Header { +// name: "add".to_string(), +// value: "add-value".into(), +// }] +// }), +// set: Some(grpc::http_types::Headers { +// headers: vec![grpc::http_types::headers::Header { +// name: "set".to_string(), +// value: "set-value".into(), +// }] +// }), +// remove: vec!["remove".to_string()], +// } +// ) +// ) +// }, +// grpc::outbound::http_route::Filter { +// kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( +// grpc::http_route::RequestRedirect { +// scheme: Some(grpc::http_types::Scheme { +// r#type: Some(grpc::http_types::scheme::Type::Registered( +// grpc::http_types::scheme::Registered::Http.into(), +// )) +// }), +// host: "host".to_string(), +// path: Some(linkerd2_proxy_api::http_route::PathModifier { +// replace: Some( +// linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( +// "/path".to_string() +// ) +// ) +// }), +// port: 5555, +// status: 302, +// } +// )) +// } +// ] +// ); +// }); +// } + +// async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { +// let _route = create( +// client, +// mk_http_route(ns, "foo-route", &parent, Some(4191)) +// .with_annotations( +// vec![ +// ("retry.linkerd.io/http".to_string(), "5xx".to_string()), +// ("timeout.linkerd.io/response".to_string(), "10s".to_string()), +// ] +// .into_iter() +// .collect(), +// ) +// .build(), +// ) +// .await; + +// await_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// let conditions = rule +// .retry +// .as_ref() +// .expect("retry config expected") +// .conditions +// .as_ref() +// .expect("retry conditions expected"); +// let status_range = assert_singleton(&conditions.status_ranges); +// assert_eq!(status_range.start, 500); +// assert_eq!(status_range.end, 599); +// let timeout = rule +// .timeouts +// .as_ref() +// .expect("timeouts expected") +// .response +// .as_ref() +// .expect("response timeout expected"); +// assert_eq!(timeout.seconds, 10); +// }); +// } + +// async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { +// let _route = create( +// client, +// mk_http_route(ns, "foo-route", &parent, Some(4191)) +// .with_annotations( +// vec![ +// // Route annotations override the timeout config specified +// // on the service. +// ("timeout.linkerd.io/request".to_string(), "5s".to_string()), +// ] +// .into_iter() +// .collect(), +// ) +// .build(), +// ) +// .await; +// await_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// let rule = assert_singleton(&route.rules); +// let conditions = rule +// .retry +// .as_ref() +// .expect("retry config expected") +// .conditions +// .as_ref() +// .expect("retry conditions expected"); +// let status_range = assert_singleton(&conditions.status_ranges); +// // Retry config inherited from the service. +// assert_eq!(status_range.start, 500); +// assert_eq!(status_range.end, 599); +// let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); +// // Service timeout config overridden by route timeout config. +// assert_eq!(timeouts.response, None); +// let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); +// assert_eq!(request_timeout.seconds, 5); +// }); +// } + +// async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { +// let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; +// await_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be attached. +// detect_http_routes(&config, |routes| { +// let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); +// assert_route_name_eq(route, "foo-route"); +// }); + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = "other".to_string(); +// update(client, route.clone()).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be unattached and the default route should be present. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = parent.name(); +// update(client, route).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be attached again. +// detect_http_routes(&config, |routes| { +// let route = assert_route_attached(routes, &parent); +// assert_route_name_eq(route, "foo-route"); +// }); +// } diff --git a/policy-test/tests/outbound_api_tcp.rs b/policy-test/tests/outbound_api_tcp.rs index 7749a6fa92fe0..ad62dc86c8ede 100644 --- a/policy-test/tests/outbound_api_tcp.rs +++ b/policy-test/tests/outbound_api_tcp.rs @@ -1,644 +1,643 @@ -use futures::prelude::*; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - assert_resource_meta, assert_status_accepted, await_egress_net_status, await_tcp_route_status, - create, create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, - mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -}; -use maplit::{btreemap, convert_args}; - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tcp_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - parent_with_tcp_routes_with_backend( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_tcp_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_tcp_routes_with_backend( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tcp_routes_with_cross_namespace_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let backend_ns_name = format!("{}-backend", ns); - let backend_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(backend_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - let backend_name = "backend"; - let backend_svc = - Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); - let backends = [backend_svc.clone()]; - let route = mk_tcp_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); - let _route = create(&client, route.build()).await; - await_tcp_route_status(&client, &ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - let routes = tcp_routes(&config); - let route = assert_singleton(routes); - let backends = tcp_route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - - delete_cluster_scoped(&client, backend_ns).await - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tcp_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "invalid", 4191); - - parent_with_tcp_routes_with_invalid_backend( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_tcp_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - let backend = mk_egress_net(&ns, "invalid"); - - parent_with_tcp_routes_with_invalid_backend( - Resource::EgressNetwork(egress), - Resource::EgressNetwork(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_multiple_tcp_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_multiple_tcp_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_multiple_tcp_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_multiple_tcp_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn tcp_route_with_no_port() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let _route = create( - &client, - mk_tcp_route(&ns, "foo-route", &svc, None) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tcp_route_status(&client, &ns, "foo-route").await; - - let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_4191); - - let routes = tcp_routes(&config_4191); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_9999); - - let routes = tcp_routes(&config_9999); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn producer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tcp_route_status(&client, &ns, "foo-route").await; - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?producer_config); - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let routes = tcp_routes(&producer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - - let routes = tcp_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn pre_existing_producer_route() { - // We test the scenario where outbound policy watches are initiated after - // a produce route already exists. - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tcp_route_status(&client, &ns, "foo-route").await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - let routes = tcp_routes(&producer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - - // The route should be returned in queries from a consumer namespace. - let routes = tcp_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn consumer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let consumer_ns_name = format!("{}-consumer", ns); - let consumer_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(consumer_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = - retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; - let other_config = other_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?other_config); - - // A route created in a different namespace as its parent service is - // called a consumer route. It should be returned in outbound policy - // requests for that service ONLY when the request comes from the - // consumer namespace. - let _route = create( - &client, - mk_tcp_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) - .with_backends(&[svc]) - .build(), - ) - .await; - await_tcp_route_status(&client, &consumer_ns_name, "foo-route").await; - - // The route should NOT be returned in queries from the producer namespace. - // There should be a default route. - assert!(producer_rx.next().now_or_never().is_none()); - - // The route should be returned in queries from the same consumer - // namespace. - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let routes = tcp_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tcp_route_name_eq(route, "foo-route"); - - // The route should NOT be returned in queries from a different consumer - // namespace. - assert!(other_rx.next().now_or_never().is_none()); - - delete_cluster_scoped(&client, consumer_ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_tcp_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - tcp_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_tcp_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - tcp_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct TcpRouteBuilder(k8s_gateway_api::TcpRoute); - -fn mk_tcp_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TcpRouteBuilder { - use k8s_gateway_api as api; - - TcpRouteBuilder(api::TcpRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::TcpRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - rules: vec![api::TcpRouteRule { - backend_refs: Vec::default(), - }], - }, - status: None, - }) -} - -impl TcpRouteBuilder { - fn with_backends(self, backends: &[Resource]) -> Self { - let mut route = self.0; - let backend_refs: Vec<_> = backends - .iter() - .map(|backend| k8s_gateway_api::BackendRef { - weight: None, - inner: k8s_gateway_api::BackendObjectReference { - name: backend.name(), - port: Some(8888), - group: Some(backend.group()), - kind: Some(backend.kind()), - namespace: Some(backend.namespace()), - }, - }) - .collect(); - route.spec.rules.iter_mut().for_each(|rule| { - rule.backend_refs = backend_refs.clone(); - }); - Self(route) - } - - fn build(self) -> k8s_gateway_api::TcpRoute { - self.0 - } -} - -async fn parent_with_tcp_routes_with_backend( - parent: Resource, - rule_backend: Resource, - client: &kube::Client, - ns: &str, -) { - let backends = [rule_backend.clone()]; - let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); - let _route = create(client, route.build()).await; - await_tcp_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tcp_routes(&config); - let route = assert_singleton(routes); - let backends = tcp_route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); -} - -async fn parent_with_tcp_routes_with_invalid_backend( - parent: Resource, - backend: Resource, - client: &kube::Client, - ns: &str, -) { - let backends = [backend]; - let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); - let _route = create(client, route.build()).await; - await_tcp_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tcp_routes(&config); - let route = assert_singleton(routes); - let backends = tcp_route_backends_random_available(route); - assert_singleton(backends); -} - -async fn parent_with_multiple_tcp_routes(parent: Resource, client: &kube::Client, ns: &str) { - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - client, - mk_tcp_route(ns, "a-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tcp_route_status(client, ns, "a-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let _b_route = create( - client, - mk_tcp_route(ns, "b-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tcp_route_status(client, ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tcp_routes(&config); - assert_eq!(routes.len(), 1); - assert_eq!(tcp_route_name(&routes[0]), "a-route"); -} - -async fn tcp_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create( - client, - mk_tcp_route(ns, "foo-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tcp_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached. - let routes = tcp_routes(&config); - let tcp_route = assert_singleton(routes); - assert_tcp_route_name_eq(tcp_route, "foo-route"); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached again. - // The route should be attached. - let routes = tcp_routes(&config); - let tcp_route = assert_singleton(routes); - assert_tcp_route_name_eq(tcp_route, "foo-route"); -} +// use futures::prelude::*; +// use linkerd_policy_controller_k8s_api as k8s; +// use linkerd_policy_test::{ +// assert_resource_meta, assert_status_accepted, await_egress_net_status, await_tcp_route_status, +// create, create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, +// mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, +// }; +// use maplit::{btreemap, convert_args}; + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_tcp_routes_with_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend_svc = create_service(&client, &ns, "backend", 8888).await; +// parent_with_tcp_routes_with_backend( +// Resource::Service(svc), +// Resource::Service(backend_svc), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_tcp_routes_with_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_tcp_routes_with_backend( +// Resource::EgressNetwork(egress.clone()), +// Resource::EgressNetwork(egress), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_tcp_routes_with_cross_namespace_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let backend_ns_name = format!("{}-backend", ns); +// let backend_ns = create_cluster_scoped( +// &client, +// k8s::Namespace { +// metadata: k8s::ObjectMeta { +// name: Some(backend_ns_name.clone()), +// labels: Some(convert_args!(btreemap!( +// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), +// ))), +// ..Default::default() +// }, +// ..Default::default() +// }, +// ) +// .await; +// let backend_name = "backend"; +// let backend_svc = +// Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); +// let backends = [backend_svc.clone()]; +// let route = mk_tcp_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); +// let _route = create(&client, route.build()).await; +// await_tcp_route_status(&client, &ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &svc, 4191); + +// let routes = tcp_routes(&config); +// let route = assert_singleton(routes); +// let backends = tcp_route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); + +// delete_cluster_scoped(&client, backend_ns).await +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_tcp_routes_with_invalid_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend = mk_service(&ns, "invalid", 4191); + +// parent_with_tcp_routes_with_invalid_backend( +// Resource::Service(svc), +// Resource::Service(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_tcp_routes_with_invalid_backend() { +// with_temp_ns(|client, ns| async move { +// // Create an egress network +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// let backend = mk_egress_net(&ns, "invalid"); + +// parent_with_tcp_routes_with_invalid_backend( +// Resource::EgressNetwork(egress), +// Resource::EgressNetwork(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_multiple_tcp_routes() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// parent_with_multiple_tcp_routes(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_multiple_tcp_routes() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_multiple_tcp_routes(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn tcp_route_with_no_port() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let _route = create( +// &client, +// mk_tcp_route(&ns, "foo-route", &svc, None) +// .with_backends(&[svc.clone()]) +// .build(), +// ) +// .await; +// await_tcp_route_status(&client, &ns, "foo-route").await; + +// let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; + +// let config_4191 = rx_4191 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config_4191); + +// let routes = tcp_routes(&config_4191); +// let route = assert_singleton(routes); +// assert_tcp_route_name_eq(route, "foo-route"); + +// let config_9999 = rx_9999 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config_9999); + +// let routes = tcp_routes(&config_9999); +// let route = assert_singleton(routes); +// assert_tcp_route_name_eq(route, "foo-route"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn producer_route() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// // A route created in the same namespace as its parent service is called +// // a producer route. It should be returned in outbound policy requests +// // for that service from ALL namespaces. +// let _route = create( +// &client, +// mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) +// .with_backends(&[svc.clone()]) +// .build(), +// ) +// .await; +// await_tcp_route_status(&client, &ns, "foo-route").await; + +// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?producer_config); +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// let routes = tcp_routes(&producer_config); +// let route = assert_singleton(routes); +// assert_tcp_route_name_eq(route, "foo-route"); + +// let routes = tcp_routes(&consumer_config); +// let route = assert_singleton(routes); +// assert_tcp_route_name_eq(route, "foo-route"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn pre_existing_producer_route() { +// // We test the scenario where outbound policy watches are initiated after +// // a produce route already exists. +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// // A route created in the same namespace as its parent service is called +// // a producer route. It should be returned in outbound policy requests +// // for that service from ALL namespaces. +// let _route = create( +// &client, +// mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) +// .with_backends(&[svc.clone()]) +// .build(), +// ) +// .await; +// await_tcp_route_status(&client, &ns, "foo-route").await; + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// // The route should be returned in queries from the producer namespace. +// let routes = tcp_routes(&producer_config); +// let route = assert_singleton(routes); +// assert_tcp_route_name_eq(route, "foo-route"); + +// // The route should be returned in queries from a consumer namespace. +// let routes = tcp_routes(&consumer_config); +// let route = assert_singleton(routes); +// assert_tcp_route_name_eq(route, "foo-route"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn consumer_route() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let consumer_ns_name = format!("{}-consumer", ns); +// let consumer_ns = create_cluster_scoped( +// &client, +// k8s::Namespace { +// metadata: k8s::ObjectMeta { +// name: Some(consumer_ns_name.clone()), +// labels: Some(convert_args!(btreemap!( +// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), +// ))), +// ..Default::default() +// }, +// ..Default::default() +// }, +// ) +// .await; + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = +// retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; +// let other_config = other_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?other_config); + +// // A route created in a different namespace as its parent service is +// // called a consumer route. It should be returned in outbound policy +// // requests for that service ONLY when the request comes from the +// // consumer namespace. +// let _route = create( +// &client, +// mk_tcp_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) +// .with_backends(&[svc]) +// .build(), +// ) +// .await; +// await_tcp_route_status(&client, &consumer_ns_name, "foo-route").await; + +// // The route should NOT be returned in queries from the producer namespace. +// // There should be a default route. +// assert!(producer_rx.next().now_or_never().is_none()); + +// // The route should be returned in queries from the same consumer +// // namespace. +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// let routes = tcp_routes(&consumer_config); +// let route = assert_singleton(routes); +// assert_tcp_route_name_eq(route, "foo-route"); + +// // The route should NOT be returned in queries from a different consumer +// // namespace. +// assert!(other_rx.next().now_or_never().is_none()); + +// delete_cluster_scoped(&client, consumer_ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_tcp_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// tcp_route_reattachment(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_tcp_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// tcp_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// /* Helpers */ +// struct TcpRouteBuilder(k8s_gateway_api::TcpRoute); + +// fn mk_tcp_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TcpRouteBuilder { +// use k8s_gateway_api as api; + +// TcpRouteBuilder(api::TcpRoute { +// metadata: kube::api::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(name.to_string()), +// ..Default::default() +// }, +// spec: api::TcpRouteSpec { +// inner: api::CommonRouteSpec { +// parent_refs: Some(vec![api::ParentReference { +// group: Some(parent.group()), +// kind: Some(parent.kind()), +// namespace: Some(parent.namespace()), +// name: parent.name(), +// section_name: None, +// port, +// }]), +// }, +// rules: vec![api::TcpRouteRule { +// backend_refs: Vec::default(), +// }], +// }, +// status: None, +// }) +// } + +// impl TcpRouteBuilder { +// fn with_backends(self, backends: &[Resource]) -> Self { +// let mut route = self.0; +// let backend_refs: Vec<_> = backends +// .iter() +// .map(|backend| k8s_gateway_api::BackendRef { +// weight: None, +// inner: k8s_gateway_api::BackendObjectReference { +// name: backend.name(), +// port: Some(8888), +// group: Some(backend.group()), +// kind: Some(backend.kind()), +// namespace: Some(backend.namespace()), +// }, +// }) +// .collect(); +// route.spec.rules.iter_mut().for_each(|rule| { +// rule.backend_refs = backend_refs.clone(); +// }); +// Self(route) +// } + +// fn build(self) -> k8s_gateway_api::TcpRoute { +// self.0 +// } +// } + +// async fn parent_with_tcp_routes_with_backend( +// parent: Resource, +// rule_backend: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let backends = [rule_backend.clone()]; +// let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); +// let _route = create(client, route.build()).await; +// await_tcp_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let routes = tcp_routes(&config); +// let route = assert_singleton(routes); +// let backends = tcp_route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); +// } + +// async fn parent_with_tcp_routes_with_invalid_backend( +// parent: Resource, +// backend: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let backends = [backend]; +// let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); +// let _route = create(client, route.build()).await; +// await_tcp_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let routes = tcp_routes(&config); +// let route = assert_singleton(routes); +// let backends = tcp_route_backends_random_available(route); +// assert_singleton(backends); +// } + +// async fn parent_with_multiple_tcp_routes(parent: Resource, client: &kube::Client, ns: &str) { +// // Routes should be returned in sorted order by creation timestamp then +// // name. To ensure that this test isn't timing dependant, routes should +// // be created in alphabetical order. +// let _a_route = create( +// client, +// mk_tcp_route(ns, "a-route", &parent, Some(4191)) +// .with_backends(&[parent.clone()]) +// .build(), +// ) +// .await; +// await_tcp_route_status(client, ns, "a-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + +// // First route update. +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let _b_route = create( +// client, +// mk_tcp_route(ns, "b-route", &parent, Some(4191)) +// .with_backends(&[parent.clone()]) +// .build(), +// ) +// .await; +// await_tcp_route_status(client, ns, "b-route").await; + +// // Second route update. +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let routes = tcp_routes(&config); +// assert_eq!(routes.len(), 1); +// assert_eq!(tcp_route_name(&routes[0]), "a-route"); +// } + +// async fn tcp_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { +// let mut route = create( +// client, +// mk_tcp_route(ns, "foo-route", &parent, Some(4191)) +// .with_backends(&[parent.clone()]) +// .build(), +// ) +// .await; +// await_tcp_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be attached. +// let routes = tcp_routes(&config); +// let tcp_route = assert_singleton(routes); +// assert_tcp_route_name_eq(tcp_route, "foo-route"); + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = "other".to_string(); +// update(client, route.clone()).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be unattached and the default route should be present. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = parent.name(); +// update(client, route).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be attached again. +// // The route should be attached. +// let routes = tcp_routes(&config); +// let tcp_route = assert_singleton(routes); +// assert_tcp_route_name_eq(tcp_route, "foo-route"); +// } diff --git a/policy-test/tests/outbound_api_tls.rs b/policy-test/tests/outbound_api_tls.rs index cf7569dd15040..06ab43f5b52b5 100644 --- a/policy-test/tests/outbound_api_tls.rs +++ b/policy-test/tests/outbound_api_tls.rs @@ -1,650 +1,649 @@ -use futures::prelude::*; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - assert_resource_meta, assert_status_accepted, await_egress_net_status, await_tls_route_status, - create, create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, - grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -}; -use maplit::{btreemap, convert_args}; - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tls_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend_svc = create_service(&client, &ns, "backend", 8888).await; - parent_with_tls_routes_with_backend( - Resource::Service(svc), - Resource::Service(backend_svc), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_tls_routes_with_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_tls_routes_with_backend( - Resource::EgressNetwork(egress.clone()), - Resource::EgressNetwork(egress), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tls_routes_with_cross_namespace_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let backend_ns_name = format!("{}-backend", ns); - let backend_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(backend_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - let backend_name = "backend"; - let backend_svc = - Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); - let backends = [backend_svc.clone()]; - let route = mk_tls_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); - let _route = create(&client, route.build()).await; - await_tls_route_status(&client, &ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &svc, 4191); - - let routes = tls_routes(&config); - let route = assert_singleton(routes); - let backends = tls_route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - - delete_cluster_scoped(&client, backend_ns).await - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_tls_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - let backend = mk_service(&ns, "invalid", 4191); - - parent_with_tls_routes_with_invalid_backend( - Resource::Service(svc), - Resource::Service(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_tls_routes_with_invalid_backend() { - with_temp_ns(|client, ns| async move { - // Create an egress network - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - let backend = mk_egress_net(&ns, "invalid"); - - parent_with_tls_routes_with_invalid_backend( - Resource::EgressNetwork(egress), - Resource::EgressNetwork(backend), - &client, - &ns, - ) - .await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_with_multiple_tls_routes() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - parent_with_multiple_tls_routes(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_with_multiple_http_routes() { - with_temp_ns(|client, ns| async move { - // Create an egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - parent_with_multiple_tls_routes(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn tls_route_with_no_port() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let _route = create( - &client, - mk_tls_route(&ns, "foo-route", &svc, None) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tls_route_status(&client, &ns, "foo-route").await; - - let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - - let config_4191 = rx_4191 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_4191); - - let routes = tls_routes(&config_4191); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - - let config_9999 = rx_9999 - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config_9999); - - let routes = tls_routes(&config_9999); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn producer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_tls_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tls_route_status(&client, &ns, "foo-route").await; - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?producer_config); - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let routes = tls_routes(&producer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - - let routes = tls_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn pre_existing_producer_route() { - // We test the scenario where outbound policy watches are initiated after - // a produce route already exists. - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - // A route created in the same namespace as its parent service is called - // a producer route. It should be returned in outbound policy requests - // for that service from ALL namespaces. - let _route = create( - &client, - mk_tls_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(&[svc.clone()]) - .build(), - ) - .await; - await_tls_route_status(&client, &ns, "foo-route").await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - // The route should be returned in queries from the producer namespace. - let routes = tls_routes(&producer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - - // The route should be returned in queries from a consumer namespace. - let routes = tls_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn consumer_route() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - - let consumer_ns_name = format!("{}-consumer", ns); - let consumer_ns = create_cluster_scoped( - &client, - k8s::Namespace { - metadata: k8s::ObjectMeta { - name: Some(consumer_ns_name.clone()), - labels: Some(convert_args!(btreemap!( - "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), - ))), - ..Default::default() - }, - ..Default::default() - }, - ) - .await; - - let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let producer_config = producer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?producer_config); - - let mut consumer_rx = - retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; - let other_config = other_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?other_config); - - // A route created in a different namespace as its parent service is - // called a consumer route. It should be returned in outbound policy - // requests for that service ONLY when the request comes from the - // consumer namespace. - let _route = create( - &client, - mk_tls_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) - .with_backends(&[svc]) - .build(), - ) - .await; - await_tls_route_status(&client, &consumer_ns_name, "foo-route").await; - - // The route should NOT be returned in queries from the producer namespace. - // There should be a default route. - assert!(producer_rx.next().now_or_never().is_none()); - - // The route should be returned in queries from the same consumer - // namespace. - let consumer_config = consumer_rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?consumer_config); - - let routes = tls_routes(&consumer_config); - let route = assert_singleton(routes); - assert_tls_route_name_eq(route, "foo-route"); - - // The route should NOT be returned in queries from a different consumer - // namespace. - assert!(other_rx.next().now_or_never().is_none()); - - delete_cluster_scoped(&client, consumer_ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn service_tls_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - tls_route_reattachment(Resource::Service(svc), &client, &ns).await; - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn egress_net_tls_route_reattachment() { - with_temp_ns(|client, ns| async move { - // Create a egress net - let egress = create_egress_network(&client, &ns, "my-egress").await; - let status = await_egress_net_status(&client, &ns, "my-egress").await; - assert_status_accepted(status.conditions); - - tls_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; - }) - .await; -} - -/* Helpers */ - -struct TlsRouteBuilder(k8s_gateway_api::TlsRoute); - -fn mk_tls_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TlsRouteBuilder { - use k8s_gateway_api as api; - - TlsRouteBuilder(api::TlsRoute { - metadata: kube::api::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some(name.to_string()), - ..Default::default() - }, - spec: api::TlsRouteSpec { - inner: api::CommonRouteSpec { - parent_refs: Some(vec![api::ParentReference { - group: Some(parent.group()), - kind: Some(parent.kind()), - namespace: Some(parent.namespace()), - name: parent.name(), - section_name: None, - port, - }]), - }, - hostnames: None, - rules: vec![api::TlsRouteRule { - backend_refs: Vec::default(), - }], - }, - status: None, - }) -} - -impl TlsRouteBuilder { - fn with_backends(self, backends: &[Resource]) -> Self { - let mut route = self.0; - let backend_refs: Vec<_> = backends - .iter() - .map(|backend| k8s_gateway_api::BackendRef { - weight: None, - inner: k8s_gateway_api::BackendObjectReference { - name: backend.name(), - port: Some(8888), - group: Some(backend.group()), - kind: Some(backend.kind()), - namespace: Some(backend.namespace()), - }, - }) - .collect(); - route.spec.rules.iter_mut().for_each(|rule| { - rule.backend_refs = backend_refs.clone(); - }); - Self(route) - } - - fn build(self) -> k8s_gateway_api::TlsRoute { - self.0 - } -} - -async fn parent_with_tls_routes_with_backend( - parent: Resource, - rule_backend: Resource, - client: &kube::Client, - ns: &str, -) { - let backends = [rule_backend.clone()]; - let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); - let _route = create(client, route.build()).await; - await_tls_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tls_routes(&config); - let route = assert_route_attached(routes, &parent); - let backends = tls_route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); -} - -async fn parent_with_tls_routes_with_invalid_backend( - parent: Resource, - backend: Resource, - client: &kube::Client, - ns: &str, -) { - let backends = [backend]; - let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); - let _route = create(client, route.build()).await; - await_tls_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tls_routes(&config); - let route = assert_route_attached(routes, &parent); - let backends = tls_route_backends_random_available(route); - assert_singleton(backends); -} - -async fn parent_with_multiple_tls_routes(parent: Resource, client: &kube::Client, ns: &str) { - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - client, - mk_tls_route(ns, "a-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tls_route_status(client, ns, "a-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let _b_route = create( - client, - mk_tls_route(ns, "b-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tls_route_status(client, ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - let routes = tls_routes(&config); - let num_routes = match parent { - Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default - Resource::Service(_) => 2, // two routes for service - }; - assert_eq!(routes.len(), num_routes); - assert_eq!(tls_route_name(&routes[0]), "a-route"); - assert_eq!(tls_route_name(&routes[1]), "b-route"); -} - -async fn tls_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { - let mut route = create( - client, - mk_tls_route(ns, "foo-route", &parent, Some(4191)) - .with_backends(&[parent.clone()]) - .build(), - ) - .await; - await_tls_route_status(client, ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached. - let routes = tls_routes(&config); - let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); - assert_tls_route_name_eq(tls_route, "foo-route"); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &parent, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = parent.name(); - update(client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, &parent, 4191); - - // The route should be attached again. - // The route should be attached. - let routes = tls_routes(&config); - let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); - assert_tls_route_name_eq(tls_route, "foo-route"); -} +// use futures::prelude::*; +// use linkerd_policy_controller_k8s_api as k8s; +// use linkerd_policy_test::{ +// assert_resource_meta, assert_status_accepted, await_egress_net_status, await_tls_route_status, +// create, create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, +// grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, +// }; +// use maplit::{btreemap, convert_args}; + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_tls_routes_with_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend_svc = create_service(&client, &ns, "backend", 8888).await; +// parent_with_tls_routes_with_backend( +// Resource::Service(svc), +// Resource::Service(backend_svc), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_tls_routes_with_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_tls_routes_with_backend( +// Resource::EgressNetwork(egress.clone()), +// Resource::EgressNetwork(egress), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_tls_routes_with_cross_namespace_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let backend_ns_name = format!("{}-backend", ns); +// let backend_ns = create_cluster_scoped( +// &client, +// k8s::Namespace { +// metadata: k8s::ObjectMeta { +// name: Some(backend_ns_name.clone()), +// labels: Some(convert_args!(btreemap!( +// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), +// ))), +// ..Default::default() +// }, +// ..Default::default() +// }, +// ) +// .await; +// let backend_name = "backend"; +// let backend_svc = +// Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); +// let backends = [backend_svc.clone()]; +// let route = mk_tls_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); +// let _route = create(&client, route.build()).await; +// await_tls_route_status(&client, &ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &svc, 4191); + +// let routes = tls_routes(&config); +// let route = assert_singleton(routes); +// let backends = tls_route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); + +// delete_cluster_scoped(&client, backend_ns).await +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_tls_routes_with_invalid_backend() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// let backend = mk_service(&ns, "invalid", 4191); + +// parent_with_tls_routes_with_invalid_backend( +// Resource::Service(svc), +// Resource::Service(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_tls_routes_with_invalid_backend() { +// with_temp_ns(|client, ns| async move { +// // Create an egress network +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// let backend = mk_egress_net(&ns, "invalid"); + +// parent_with_tls_routes_with_invalid_backend( +// Resource::EgressNetwork(egress), +// Resource::EgressNetwork(backend), +// &client, +// &ns, +// ) +// .await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_with_multiple_tls_routes() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// parent_with_multiple_tls_routes(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_with_multiple_http_routes() { +// with_temp_ns(|client, ns| async move { +// // Create an egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// parent_with_multiple_tls_routes(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn tls_route_with_no_port() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let _route = create( +// &client, +// mk_tls_route(&ns, "foo-route", &svc, None) +// .with_backends(&[svc.clone()]) +// .build(), +// ) +// .await; +// await_tls_route_status(&client, &ns, "foo-route").await; + +// let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; + +// let config_4191 = rx_4191 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config_4191); + +// let routes = tls_routes(&config_4191); +// let route = assert_singleton(routes); +// assert_tls_route_name_eq(route, "foo-route"); + +// let config_9999 = rx_9999 +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config_9999); + +// let routes = tls_routes(&config_9999); +// let route = assert_singleton(routes); +// assert_tls_route_name_eq(route, "foo-route"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn producer_route() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// // A route created in the same namespace as its parent service is called +// // a producer route. It should be returned in outbound policy requests +// // for that service from ALL namespaces. +// let _route = create( +// &client, +// mk_tls_route(&ns, "foo-route", &svc, Some(4191)) +// .with_backends(&[svc.clone()]) +// .build(), +// ) +// .await; +// await_tls_route_status(&client, &ns, "foo-route").await; + +// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?producer_config); +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// let routes = tls_routes(&producer_config); +// let route = assert_singleton(routes); +// assert_tls_route_name_eq(route, "foo-route"); + +// let routes = tls_routes(&consumer_config); +// let route = assert_singleton(routes); +// assert_tls_route_name_eq(route, "foo-route"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn pre_existing_producer_route() { +// // We test the scenario where outbound policy watches are initiated after +// // a produce route already exists. +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// // A route created in the same namespace as its parent service is called +// // a producer route. It should be returned in outbound policy requests +// // for that service from ALL namespaces. +// let _route = create( +// &client, +// mk_tls_route(&ns, "foo-route", &svc, Some(4191)) +// .with_backends(&[svc.clone()]) +// .build(), +// ) +// .await; +// await_tls_route_status(&client, &ns, "foo-route").await; + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// // The route should be returned in queries from the producer namespace. +// let routes = tls_routes(&producer_config); +// let route = assert_singleton(routes); +// assert_tls_route_name_eq(route, "foo-route"); + +// // The route should be returned in queries from a consumer namespace. +// let routes = tls_routes(&consumer_config); +// let route = assert_singleton(routes); +// assert_tls_route_name_eq(route, "foo-route"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn consumer_route() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + +// let consumer_ns_name = format!("{}-consumer", ns); +// let consumer_ns = create_cluster_scoped( +// &client, +// k8s::Namespace { +// metadata: k8s::ObjectMeta { +// name: Some(consumer_ns_name.clone()), +// labels: Some(convert_args!(btreemap!( +// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), +// ))), +// ..Default::default() +// }, +// ..Default::default() +// }, +// ) +// .await; + +// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; +// let producer_config = producer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?producer_config); + +// let mut consumer_rx = +// retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; +// let other_config = other_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?other_config); + +// // A route created in a different namespace as its parent service is +// // called a consumer route. It should be returned in outbound policy +// // requests for that service ONLY when the request comes from the +// // consumer namespace. +// let _route = create( +// &client, +// mk_tls_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) +// .with_backends(&[svc]) +// .build(), +// ) +// .await; +// await_tls_route_status(&client, &consumer_ns_name, "foo-route").await; + +// // The route should NOT be returned in queries from the producer namespace. +// // There should be a default route. +// assert!(producer_rx.next().now_or_never().is_none()); + +// // The route should be returned in queries from the same consumer +// // namespace. +// let consumer_config = consumer_rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?consumer_config); + +// let routes = tls_routes(&consumer_config); +// let route = assert_singleton(routes); +// assert_tls_route_name_eq(route, "foo-route"); + +// // The route should NOT be returned in queries from a different consumer +// // namespace. +// assert!(other_rx.next().now_or_never().is_none()); + +// delete_cluster_scoped(&client, consumer_ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn service_tls_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a service +// let svc = create_service(&client, &ns, "my-svc", 4191).await; +// tls_route_reattachment(Resource::Service(svc), &client, &ns).await; +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn egress_net_tls_route_reattachment() { +// with_temp_ns(|client, ns| async move { +// // Create a egress net +// let egress = create_egress_network(&client, &ns, "my-egress").await; +// let status = await_egress_net_status(&client, &ns, "my-egress").await; +// assert_status_accepted(status.conditions); + +// tls_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; +// }) +// .await; +// } + +// /* Helpers */ +// struct TlsRouteBuilder(k8s_gateway_api::TlsRoute); + +// fn mk_tls_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TlsRouteBuilder { +// use k8s_gateway_api as api; + +// TlsRouteBuilder(api::TlsRoute { +// metadata: kube::api::ObjectMeta { +// namespace: Some(ns.to_string()), +// name: Some(name.to_string()), +// ..Default::default() +// }, +// spec: api::TlsRouteSpec { +// inner: api::CommonRouteSpec { +// parent_refs: Some(vec![api::ParentReference { +// group: Some(parent.group()), +// kind: Some(parent.kind()), +// namespace: Some(parent.namespace()), +// name: parent.name(), +// section_name: None, +// port, +// }]), +// }, +// hostnames: None, +// rules: vec![api::TlsRouteRule { +// backend_refs: Vec::default(), +// }], +// }, +// status: None, +// }) +// } + +// impl TlsRouteBuilder { +// fn with_backends(self, backends: &[Resource]) -> Self { +// let mut route = self.0; +// let backend_refs: Vec<_> = backends +// .iter() +// .map(|backend| k8s_gateway_api::BackendRef { +// weight: None, +// inner: k8s_gateway_api::BackendObjectReference { +// name: backend.name(), +// port: Some(8888), +// group: Some(backend.group()), +// kind: Some(backend.kind()), +// namespace: Some(backend.namespace()), +// }, +// }) +// .collect(); +// route.spec.rules.iter_mut().for_each(|rule| { +// rule.backend_refs = backend_refs.clone(); +// }); +// Self(route) +// } + +// fn build(self) -> k8s_gateway_api::TlsRoute { +// self.0 +// } +// } + +// async fn parent_with_tls_routes_with_backend( +// parent: Resource, +// rule_backend: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let backends = [rule_backend.clone()]; +// let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); +// let _route = create(client, route.build()).await; +// await_tls_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let routes = tls_routes(&config); +// let route = assert_route_attached(routes, &parent); +// let backends = tls_route_backends_random_available(route); +// let backend = assert_singleton(backends); +// assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); +// } + +// async fn parent_with_tls_routes_with_invalid_backend( +// parent: Resource, +// backend: Resource, +// client: &kube::Client, +// ns: &str, +// ) { +// let backends = [backend]; +// let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); +// let _route = create(client, route.build()).await; +// await_tls_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let routes = tls_routes(&config); +// let route = assert_route_attached(routes, &parent); +// let backends = tls_route_backends_random_available(route); +// assert_singleton(backends); +// } + +// async fn parent_with_multiple_tls_routes(parent: Resource, client: &kube::Client, ns: &str) { +// // Routes should be returned in sorted order by creation timestamp then +// // name. To ensure that this test isn't timing dependant, routes should +// // be created in alphabetical order. +// let _a_route = create( +// client, +// mk_tls_route(ns, "a-route", &parent, Some(4191)) +// .with_backends(&[parent.clone()]) +// .build(), +// ) +// .await; +// await_tls_route_status(client, ns, "a-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + +// // First route update. +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let _b_route = create( +// client, +// mk_tls_route(ns, "b-route", &parent, Some(4191)) +// .with_backends(&[parent.clone()]) +// .build(), +// ) +// .await; +// await_tls_route_status(client, ns, "b-route").await; + +// // Second route update. +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// let routes = tls_routes(&config); +// let num_routes = match parent { +// Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default +// Resource::Service(_) => 2, // two routes for service +// }; +// assert_eq!(routes.len(), num_routes); +// assert_eq!(tls_route_name(&routes[0]), "a-route"); +// assert_eq!(tls_route_name(&routes[1]), "b-route"); +// } + +// async fn tls_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { +// let mut route = create( +// client, +// mk_tls_route(ns, "foo-route", &parent, Some(4191)) +// .with_backends(&[parent.clone()]) +// .build(), +// ) +// .await; +// await_tls_route_status(client, ns, "foo-route").await; + +// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an initial config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be attached. +// let routes = tls_routes(&config); +// let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); +// assert_tls_route_name_eq(tls_route, "foo-route"); + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = "other".to_string(); +// update(client, route.clone()).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be unattached and the default route should be present. +// detect_http_routes(&config, |routes| { +// let route = assert_singleton(routes); +// assert_route_is_default(route, &parent, 4191); +// }); + +// route +// .spec +// .inner +// .parent_refs +// .as_mut() +// .unwrap() +// .first_mut() +// .unwrap() +// .name = parent.name(); +// update(client, route).await; + +// let config = rx +// .next() +// .await +// .expect("watch must not fail") +// .expect("watch must return an updated config"); +// tracing::trace!(?config); + +// assert_resource_meta(&config.metadata, &parent, 4191); + +// // The route should be attached again. +// // The route should be attached. +// let routes = tls_routes(&config); +// let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); +// assert_tls_route_name_eq(tls_route, "foo-route"); +// } diff --git a/policy-test/tests/outbound_http_route_status.rs b/policy-test/tests/outbound_http_route_status.rs index 430760a0ae825..916320b979fd1 100644 --- a/policy-test/tests/outbound_http_route_status.rs +++ b/policy-test/tests/outbound_http_route_status.rs @@ -1,250 +1,250 @@ -use k8s::Condition; -use k8s_gateway_api::{ParentReference, RouteParentStatus, RouteStatus}; -use k8s_openapi::chrono::Utc; -use kube::ResourceExt; -use linkerd_policy_controller_core::POLICY_CONTROLLER_NAME; -use linkerd_policy_controller_k8s_api as k8s; -use linkerd_policy_test::{ - await_condition, await_route_status, create, find_route_condition, mk_route, with_temp_ns, -}; - -#[tokio::test(flavor = "current_thread")] -async fn accepted_parent() { - with_temp_ns(|client, ns| async move { - // Create a parent Service - let svc_name = "test-service"; - let svc = k8s::Service { - metadata: k8s::ObjectMeta { - namespace: Some(ns.clone()), - name: Some(svc_name.to_string()), - ..Default::default() - }, - spec: Some(k8s::ServiceSpec { - type_: Some("ClusterIP".to_string()), - ports: Some(vec![k8s::ServicePort { - port: 80, - ..Default::default() - }]), - ..Default::default() - }), - ..k8s::Service::default() - }; - let svc = create(&client, svc).await; - let svc_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), - section_name: None, - port: Some(80), - }]; - - // Create a route that references the Service resource. - let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; - // Wait until route is updated with a status - let statuses = await_route_status(&client, &ns, "test-route").await.parents; - - let route_status = statuses - .clone() - .into_iter() - .find(|route_status| route_status.parent_ref.name == svc_name) - .expect("must have at least one parent status"); - - // Check status references to parent we have created - assert_eq!(route_status.parent_ref.group.as_deref(), Some("core")); - assert_eq!(route_status.parent_ref.kind.as_deref(), Some("Service")); - - // Check status is accepted with a status of 'True' - let cond = find_route_condition(&statuses, svc_name) - .expect("must have at least one 'Accepted' condition for accepted servuce"); - assert_eq!(cond.status, "True"); - assert_eq!(cond.reason, "Accepted") - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn no_cluster_ip() { - with_temp_ns(|client, ns| async move { - // Create a parent Service - let svc = k8s::Service { - metadata: k8s::ObjectMeta { - namespace: Some(ns.clone()), - name: Some("test-service".to_string()), - ..Default::default() - }, - spec: Some(k8s::ServiceSpec { - cluster_ip: Some("None".to_string()), - type_: Some("ClusterIP".to_string()), - ports: Some(vec![k8s::ServicePort { - port: 80, - ..Default::default() - }]), - ..Default::default() - }), - ..k8s::Service::default() - }; - let svc = create(&client, svc).await; - let svc_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), - section_name: None, - port: Some(80), - }]; - - // Create a route that references the Service resource. - let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; - // Wait until route is updated with a status - let status = await_route_status(&client, &ns, "test-route").await; - let cond = find_route_condition(&status.parents, "test-service") - .expect("must have at least one 'Accepted' condition set for parent"); - // Parent with no ClusterIP should not match. - assert_eq!(cond.status, "False"); - assert_eq!(cond.reason, "NoMatchingParent"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn external_name() { - with_temp_ns(|client, ns| async move { - // Create a parent Service - let svc = k8s::Service { - metadata: k8s::ObjectMeta { - namespace: Some(ns.clone()), - name: Some("test-service".to_string()), - ..Default::default() - }, - spec: Some(k8s::ServiceSpec { - type_: Some("ExternalName".to_string()), - external_name: Some("linkerd.io".to_string()), - ports: Some(vec![k8s::ServicePort { - port: 80, - ..Default::default() - }]), - ..Default::default() - }), - ..k8s::Service::default() - }; - let svc = create(&client, svc).await; - let svc_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), - section_name: None, - port: Some(80), - }]; - - // Create a route that references the Service resource. - let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; - // Wait until route is updated with a status - let status = await_route_status(&client, &ns, "test-route").await; - let cond = find_route_condition(&status.parents, "test-service") - .expect("must have at least one 'Accepted' condition set for parent"); - // Parent with ExternalName should not match. - assert_eq!(cond.status, "False"); - assert_eq!(cond.reason, "NoMatchingParent"); - }) - .await; -} - -#[tokio::test(flavor = "current_thread")] -async fn multiple_statuses() { - with_temp_ns(|client, ns| async move { - // Create a parent Service - let svc_name = "test-service"; - let svc = k8s::Service { - metadata: k8s::ObjectMeta { - namespace: Some(ns.clone()), - name: Some(svc_name.to_string()), - ..Default::default() - }, - spec: Some(k8s::ServiceSpec { - type_: Some("ClusterIP".to_string()), - ports: Some(vec![k8s::ServicePort { - port: 80, - ..Default::default() - }]), - ..Default::default() - }), - ..k8s::Service::default() - }; - let svc = create(&client, svc).await; - let svc_ref = vec![k8s::policy::httproute::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), - section_name: None, - port: Some(80), - }]; - - // Create a route that references the Service resource. - let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; - - // Patch a status onto the HttpRoute. - let value = serde_json::json!({ - "apiVersion": "policy.linkerd.io", - "kind": "HTTPRoute", - "name": "test-route", - "status": k8s::policy::httproute::HttpRouteStatus { - inner: RouteStatus { - parents: vec![RouteParentStatus { - conditions: vec![Condition { - last_transition_time: k8s::Time(Utc::now()), - message: "".to_string(), - observed_generation: None, - reason: "Accepted".to_string(), - status: "True".to_string(), - type_: "Accepted".to_string(), - }], - controller_name: "someone/else".to_string(), - parent_ref: ParentReference { - group: Some("gateway.networking.k8s.io".to_string()), - name: "foo".to_string(), - kind: Some("Gateway".to_string()), - namespace: Some("bar".to_string()), - port: None, - section_name: None, - }, - }], - }, - }, - }); - let patch = k8s::Patch::Merge(value); - let patch_params = k8s::PatchParams::apply("someone/else"); - let api = k8s::Api::::namespaced(client.clone(), &ns); - api.patch_status("test-route", &patch_params, &patch) - .await - .expect("failed to patch status"); - - await_condition( - &client, - &ns, - "test-route", - |obj: Option<&k8s::policy::HttpRoute>| -> bool { - obj.and_then(|route| route.status.as_ref()) - .map(|status| { - let statuses = &status.inner.parents; - - let other_status_found = statuses - .iter() - .any(|route_status| route_status.controller_name == "someone/else"); - - let linkerd_status_found = statuses.iter().any(|route_status| { - route_status.controller_name == POLICY_CONTROLLER_NAME - }); - - other_status_found && linkerd_status_found - }) - .unwrap_or(false) - }, - ) - .await - .expect("must have both statuses"); - }) - .await; -} +// use k8s::Condition; +// use k8s_gateway_api::{ParentReference, RouteParentStatus, RouteStatus}; +// use k8s_openapi::chrono::Utc; +// use kube::ResourceExt; +// use linkerd_policy_controller_core::POLICY_CONTROLLER_NAME; +// use linkerd_policy_controller_k8s_api as k8s; +// use linkerd_policy_test::{ +// await_condition, await_route_status, create, find_route_condition, mk_route, with_temp_ns, +// }; + +// #[tokio::test(flavor = "current_thread")] +// async fn accepted_parent() { +// with_temp_ns(|client, ns| async move { +// // Create a parent Service +// let svc_name = "test-service"; +// let svc = k8s::Service { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.clone()), +// name: Some(svc_name.to_string()), +// ..Default::default() +// }, +// spec: Some(k8s::ServiceSpec { +// type_: Some("ClusterIP".to_string()), +// ports: Some(vec![k8s::ServicePort { +// port: 80, +// ..Default::default() +// }]), +// ..Default::default() +// }), +// ..k8s::Service::default() +// }; +// let svc = create(&client, svc).await; +// let svc_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("core".to_string()), +// kind: Some("Service".to_string()), +// namespace: svc.namespace(), +// name: svc.name_unchecked(), +// section_name: None, +// port: Some(80), +// }]; + +// // Create a route that references the Service resource. +// let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; +// // Wait until route is updated with a status +// let statuses = await_route_status(&client, &ns, "test-route").await.parents; + +// let route_status = statuses +// .clone() +// .into_iter() +// .find(|route_status| route_status.parent_ref.name == svc_name) +// .expect("must have at least one parent status"); + +// // Check status references to parent we have created +// assert_eq!(route_status.parent_ref.group.as_deref(), Some("core")); +// assert_eq!(route_status.parent_ref.kind.as_deref(), Some("Service")); + +// // Check status is accepted with a status of 'True' +// let cond = find_route_condition(&statuses, svc_name) +// .expect("must have at least one 'Accepted' condition for accepted servuce"); +// assert_eq!(cond.status, "True"); +// assert_eq!(cond.reason, "Accepted") +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn no_cluster_ip() { +// with_temp_ns(|client, ns| async move { +// // Create a parent Service +// let svc = k8s::Service { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.clone()), +// name: Some("test-service".to_string()), +// ..Default::default() +// }, +// spec: Some(k8s::ServiceSpec { +// cluster_ip: Some("None".to_string()), +// type_: Some("ClusterIP".to_string()), +// ports: Some(vec![k8s::ServicePort { +// port: 80, +// ..Default::default() +// }]), +// ..Default::default() +// }), +// ..k8s::Service::default() +// }; +// let svc = create(&client, svc).await; +// let svc_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("core".to_string()), +// kind: Some("Service".to_string()), +// namespace: svc.namespace(), +// name: svc.name_unchecked(), +// section_name: None, +// port: Some(80), +// }]; + +// // Create a route that references the Service resource. +// let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; +// // Wait until route is updated with a status +// let status = await_route_status(&client, &ns, "test-route").await; +// let cond = find_route_condition(&status.parents, "test-service") +// .expect("must have at least one 'Accepted' condition set for parent"); +// // Parent with no ClusterIP should not match. +// assert_eq!(cond.status, "False"); +// assert_eq!(cond.reason, "NoMatchingParent"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn external_name() { +// with_temp_ns(|client, ns| async move { +// // Create a parent Service +// let svc = k8s::Service { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.clone()), +// name: Some("test-service".to_string()), +// ..Default::default() +// }, +// spec: Some(k8s::ServiceSpec { +// type_: Some("ExternalName".to_string()), +// external_name: Some("linkerd.io".to_string()), +// ports: Some(vec![k8s::ServicePort { +// port: 80, +// ..Default::default() +// }]), +// ..Default::default() +// }), +// ..k8s::Service::default() +// }; +// let svc = create(&client, svc).await; +// let svc_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("core".to_string()), +// kind: Some("Service".to_string()), +// namespace: svc.namespace(), +// name: svc.name_unchecked(), +// section_name: None, +// port: Some(80), +// }]; + +// // Create a route that references the Service resource. +// let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; +// // Wait until route is updated with a status +// let status = await_route_status(&client, &ns, "test-route").await; +// let cond = find_route_condition(&status.parents, "test-service") +// .expect("must have at least one 'Accepted' condition set for parent"); +// // Parent with ExternalName should not match. +// assert_eq!(cond.status, "False"); +// assert_eq!(cond.reason, "NoMatchingParent"); +// }) +// .await; +// } + +// #[tokio::test(flavor = "current_thread")] +// async fn multiple_statuses() { +// with_temp_ns(|client, ns| async move { +// // Create a parent Service +// let svc_name = "test-service"; +// let svc = k8s::Service { +// metadata: k8s::ObjectMeta { +// namespace: Some(ns.clone()), +// name: Some(svc_name.to_string()), +// ..Default::default() +// }, +// spec: Some(k8s::ServiceSpec { +// type_: Some("ClusterIP".to_string()), +// ports: Some(vec![k8s::ServicePort { +// port: 80, +// ..Default::default() +// }]), +// ..Default::default() +// }), +// ..k8s::Service::default() +// }; +// let svc = create(&client, svc).await; +// let svc_ref = vec![k8s::policy::httproute::ParentReference { +// group: Some("core".to_string()), +// kind: Some("Service".to_string()), +// namespace: svc.namespace(), +// name: svc.name_unchecked(), +// section_name: None, +// port: Some(80), +// }]; + +// // Create a route that references the Service resource. +// let _route = create(&client, mk_route(&ns, "test-route", Some(svc_ref))).await; + +// // Patch a status onto the HttpRoute. +// let value = serde_json::json!({ +// "apiVersion": "policy.linkerd.io", +// "kind": "HTTPRoute", +// "name": "test-route", +// "status": k8s::policy::httproute::HttpRouteStatus { +// inner: RouteStatus { +// parents: vec![RouteParentStatus { +// conditions: vec![Condition { +// last_transition_time: k8s::Time(Utc::now()), +// message: "".to_string(), +// observed_generation: None, +// reason: "Accepted".to_string(), +// status: "True".to_string(), +// type_: "Accepted".to_string(), +// }], +// controller_name: "someone/else".to_string(), +// parent_ref: ParentReference { +// group: Some("gateway.networking.k8s.io".to_string()), +// name: "foo".to_string(), +// kind: Some("Gateway".to_string()), +// namespace: Some("bar".to_string()), +// port: None, +// section_name: None, +// }, +// }], +// }, +// }, +// }); +// let patch = k8s::Patch::Merge(value); +// let patch_params = k8s::PatchParams::apply("someone/else"); +// let api = k8s::Api::::namespaced(client.clone(), &ns); +// api.patch_status("test-route", &patch_params, &patch) +// .await +// .expect("failed to patch status"); + +// await_condition( +// &client, +// &ns, +// "test-route", +// |obj: Option<&k8s::policy::HttpRoute>| -> bool { +// obj.and_then(|route| route.status.as_ref()) +// .map(|status| { +// let statuses = &status.inner.parents; + +// let other_status_found = statuses +// .iter() +// .any(|route_status| route_status.controller_name == "someone/else"); + +// let linkerd_status_found = statuses.iter().any(|route_status| { +// route_status.controller_name == POLICY_CONTROLLER_NAME +// }); + +// other_status_found && linkerd_status_found +// }) +// .unwrap_or(false) +// }, +// ) +// .await +// .expect("must have both statuses"); +// }) +// .await; +// } From 4c041a7dd9bb666ae6d06798a342acc623b49032 Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Tue, 17 Dec 2024 06:57:25 +0000 Subject: [PATCH 3/9] checkpoint. tests pass so far Signed-off-by: Alex Leong --- policy-test/src/test_route.rs | 115 +++++++++--------------------- policy-test/tests/outbound_api.rs | 75 +++++++++++++------ 2 files changed, 89 insertions(+), 101 deletions(-) diff --git a/policy-test/src/test_route.rs b/policy-test/src/test_route.rs index a74f1deb3c782..8254f207c1cb9 100644 --- a/policy-test/src/test_route.rs +++ b/policy-test/src/test_route.rs @@ -1,17 +1,11 @@ -use std::future::Future; - use k8s_gateway_api::{self as gateway, BackendRef, ParentReference}; use k8s_openapi::Resource; -use kube::Client; use linkerd2_proxy_api::{meta, meta::Metadata, outbound}; use linkerd_policy_controller_k8s_api::{ self as k8s, policy, Condition, Resource as _, ResourceExt, }; -use crate::{ - create, - outbound_api::{detect_http_routes, grpc_routes, tcp_routes, tls_routes}, -}; +use crate::outbound_api::{detect_http_routes, grpc_routes, tcp_routes, tls_routes}; pub trait TestRoute: kube::Resource @@ -27,12 +21,11 @@ pub trait TestRoute: type Backend; type Filter; - fn create_route( - client: &Client, + fn make_route( ns: impl ToString, parents: Vec, rules: Vec>, - ) -> impl Future; + ) -> Self; fn routes(config: &outbound::OutboundPolicy, f: F) where F: Fn(&[Self::Route]); @@ -66,9 +59,8 @@ pub trait TestParent: + Send + Sync { - async fn create_parent(client: &Client, ns: impl ToString) -> Self; fn make_parent(ns: impl ToString) -> Self; - async fn create_backend(client: &Client, ns: impl ToString) -> Self; + fn make_backend(ns: impl ToString) -> Option; fn conditions(&self) -> Vec<&Condition>; fn obj_ref(&self) -> ParentReference; fn ip(&self) -> &str; @@ -79,8 +71,7 @@ impl TestRoute for gateway::HttpRoute { type Backend = outbound::http_route::RouteBackend; type Filter = outbound::http_route::Filter; - async fn create_route( - client: &Client, + fn make_route( ns: impl ToString, parents: Vec, rules: Vec>, @@ -102,7 +93,7 @@ impl TestRoute for gateway::HttpRoute { } }) .collect(); - let route = gateway::HttpRoute { + gateway::HttpRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -116,8 +107,7 @@ impl TestRoute for gateway::HttpRoute { rules: Some(rules), }, status: None, - }; - create(client, route).await + } } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -197,8 +187,7 @@ impl TestRoute for policy::HttpRoute { type Backend = outbound::http_route::RouteBackend; type Filter = outbound::http_route::Filter; - async fn create_route( - client: &Client, + fn make_route( ns: impl ToString, parents: Vec, rules: Vec>, @@ -221,7 +210,7 @@ impl TestRoute for policy::HttpRoute { } }) .collect(); - let route = policy::HttpRoute { + policy::HttpRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -235,8 +224,7 @@ impl TestRoute for policy::HttpRoute { rules: Some(rules), }, status: None, - }; - create(client, route).await + } } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -316,8 +304,7 @@ impl TestRoute for gateway::GrpcRoute { type Backend = outbound::grpc_route::RouteBackend; type Filter = outbound::grpc_route::Filter; - async fn create_route( - client: &Client, + fn make_route( ns: impl ToString, parents: Vec, rules: Vec>, @@ -340,7 +327,7 @@ impl TestRoute for gateway::GrpcRoute { } }) .collect(); - let route = gateway::GrpcRoute { + gateway::GrpcRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -354,8 +341,7 @@ impl TestRoute for gateway::GrpcRoute { rules: Some(rules), }, status: None, - }; - create(client, route).await + } } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -435,8 +421,7 @@ impl TestRoute for gateway::TlsRoute { type Backend = outbound::tls_route::RouteBackend; type Filter = outbound::tls_route::Filter; - async fn create_route( - client: &Client, + fn make_route( ns: impl ToString, parents: Vec, rules: Vec>, @@ -447,7 +432,7 @@ impl TestRoute for gateway::TlsRoute { backend_refs: backends, }) .collect(); - let route = gateway::TlsRoute { + gateway::TlsRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -461,8 +446,7 @@ impl TestRoute for gateway::TlsRoute { rules, }, status: None, - }; - create(client, route).await + } } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -542,8 +526,7 @@ impl TestRoute for gateway::TcpRoute { type Backend = outbound::opaque_route::RouteBackend; type Filter = outbound::opaque_route::Filter; - async fn create_route( - client: &Client, + fn make_route( ns: impl ToString, parents: Vec, rules: Vec>, @@ -554,7 +537,7 @@ impl TestRoute for gateway::TcpRoute { backend_refs: backends, }) .collect(); - let route = gateway::TcpRoute { + gateway::TcpRoute { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), name: Some("foo-route".to_string()), @@ -567,8 +550,7 @@ impl TestRoute for gateway::TcpRoute { rules, }, status: None, - }; - create(client, route).await + } } fn routes(config: &outbound::OutboundPolicy, f: F) @@ -644,25 +626,6 @@ impl TestRoute for gateway::TcpRoute { } impl TestParent for k8s::Service { - async fn create_parent(client: &Client, ns: impl ToString) -> Self { - let service = k8s::Service { - metadata: k8s::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some("my-svc".to_string()), - ..Default::default() - }, - spec: Some(k8s::ServiceSpec { - ports: Some(vec![k8s::ServicePort { - port: 4191, - ..Default::default() - }]), - ..Default::default() - }), - ..k8s::Service::default() - }; - create(client, service).await - } - fn make_parent(ns: impl ToString) -> Self { k8s::Service { metadata: k8s::ObjectMeta { @@ -681,7 +644,7 @@ impl TestParent for k8s::Service { } } - async fn create_backend(client: &Client, ns: impl ToString) -> Self { + fn make_backend(ns: impl ToString) -> Option { let service = k8s::Service { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), @@ -697,7 +660,7 @@ impl TestParent for k8s::Service { }), ..k8s::Service::default() }; - create(client, service).await + Some(service) } fn conditions(&self) -> Vec<&Condition> { @@ -727,32 +690,24 @@ impl TestParent for k8s::Service { } } -fn make_egress(ns: impl ToString) -> policy::EgressNetwork { - policy::EgressNetwork { - metadata: k8s::ObjectMeta { - namespace: Some(ns.to_string()), - name: Some("my-egress".to_string()), - ..Default::default() - }, - spec: policy::EgressNetworkSpec { - networks: None, - traffic_policy: policy::egress_network::TrafficPolicy::Allow, - }, - status: None, - } -} - impl TestParent for policy::EgressNetwork { - async fn create_parent(client: &Client, ns: impl ToString) -> Self { - create(client, make_egress(ns)).await - } - fn make_parent(ns: impl ToString) -> Self { - make_egress(ns) + policy::EgressNetwork { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("my-egress".to_string()), + ..Default::default() + }, + spec: policy::EgressNetworkSpec { + networks: None, + traffic_policy: policy::egress_network::TrafficPolicy::Allow, + }, + status: None, + } } - async fn create_backend(_client: &Client, ns: impl ToString) -> Self { - make_egress(ns) + fn make_backend(_ns: impl ToString) -> Option { + None } fn conditions(&self) -> Vec<&Condition> { diff --git a/policy-test/tests/outbound_api.rs b/policy-test/tests/outbound_api.rs index 8a1caa880795f..637ae013d6108 100644 --- a/policy-test/tests/outbound_api.rs +++ b/policy-test/tests/outbound_api.rs @@ -10,10 +10,14 @@ use linkerd_policy_test::{ test_route::{TestParent, TestRoute}, with_temp_ns, }; +use tracing::debug_span; #[tokio::test(flavor = "current_thread")] async fn parent_does_not_exist() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { let port = 4191; // Some IP address in the cluster networks which we assume is not @@ -37,6 +41,10 @@ async fn parent_does_not_exist() { #[tokio::test(flavor = "current_thread")] async fn parent_with_no_routes() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); with_temp_ns(|client, ns| async move { let port = 4191; // Create a parent with no routes. @@ -69,6 +77,10 @@ async fn parent_with_no_routes() { #[tokio::test(flavor = "current_thread")] async fn http_route_with_no_rules() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); with_temp_ns(|client, ns| async move { let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; @@ -89,7 +101,11 @@ async fn http_route_with_no_rules() { assert_route_is_default::(route, &parent.obj_ref(), port); }); - let route = R::create_route(&client, ns.clone(), vec![parent.obj_ref()], vec![]).await; + let route = create( + &client, + R::make_route(ns.clone(), vec![parent.obj_ref()], vec![]), + ) + .await; let status = await_route_status(&client, &route).await; assert_status_accepted(status); @@ -104,7 +120,7 @@ async fn http_route_with_no_rules() { // There should be a route with no rules. R::routes(&config, |routes| { - let outbound_route = assert_singleton(routes); + let outbound_route = routes.first().expect("route must exist"); let rules = &R::rules_first_available(outbound_route); assert!(route.meta_eq(R::extract_meta(outbound_route))); assert!(rules.is_empty()); @@ -122,6 +138,12 @@ async fn http_route_with_no_rules() { #[tokio::test(flavor = "current_thread")] async fn http_routes_without_backends() { async fn test() { + let _span = debug_span!( + "test", + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ) + .entered(); with_temp_ns(|client, ns| async move { // Create a parent let port = 4191; @@ -144,8 +166,11 @@ async fn http_routes_without_backends() { }); // Create a route with one rule with no backends. - let route = - R::create_route(&client, ns.clone(), vec![parent.obj_ref()], vec![vec![]]).await; + let route = create( + &client, + R::make_route(ns.clone(), vec![parent.obj_ref()], vec![vec![]]), + ) + .await; let status = await_route_status(&client, &route).await; assert_status_accepted(status); @@ -180,6 +205,10 @@ async fn http_routes_without_backends() { #[tokio::test(flavor = "current_thread")] async fn routes_with_backend() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); with_temp_ns(|client, ns| async move { // Create a parent let port = 4191; @@ -187,8 +216,10 @@ async fn routes_with_backend() { // Create a backend let backend_port = 8888; - let backend = P::make_backend(&ns); - create(&client, backend.clone()).await; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; let config = rx @@ -207,22 +238,24 @@ async fn routes_with_backend() { }); let dt = Default::default(); - let route = R::create_route( + let route = create( &client, - ns, - vec![parent.obj_ref()], - vec![vec![gateway::BackendRef { - weight: None, - inner: gateway::BackendObjectReference { - group: Some(P::group(&dt).to_string()), - kind: Some(P::kind(&dt).to_string()), - name: backend.name_unchecked(), - namespace: backend.namespace(), - port: Some(backend_port), - }, - }]], - ); - create(&client, route.clone()).await; + R::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![gateway::BackendRef { + weight: None, + inner: gateway::BackendObjectReference { + group: Some(P::group(&dt).to_string()), + kind: Some(P::kind(&dt).to_string()), + name: backend.name_unchecked(), + namespace: backend.namespace(), + port: Some(backend_port), + }, + }]], + ), + ) + .await; let status = await_route_status(&client, &route).await; assert_status_accepted(status); From 6116a5ae5b812ab1bfe8a5901089777584fc770a Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Tue, 17 Dec 2024 07:58:01 +0000 Subject: [PATCH 4/9] checkpoint, more tests Signed-off-by: Alex Leong --- policy-test/src/test_route.rs | 49 +++++ policy-test/tests/outbound_api.rs | 208 ++++++++++++++++++++-- policy-test/tests/outbound_api_gateway.rs | 156 ---------------- policy-test/tests/outbound_api_linkerd.rs | 109 ------------ policy-test/tests/outbound_api_tcp.rs | 86 --------- 5 files changed, 244 insertions(+), 364 deletions(-) diff --git a/policy-test/src/test_route.rs b/policy-test/src/test_route.rs index 8254f207c1cb9..4a537dec7ffa2 100644 --- a/policy-test/src/test_route.rs +++ b/policy-test/src/test_route.rs @@ -35,6 +35,7 @@ pub trait TestRoute: fn rules_random_available(route: &Self::Route) -> Vec>; fn backend(backend: &Self::Backend) -> &outbound::Backend; fn conditions(&self) -> Option>; + fn is_failure_filter(filter: &Self::Filter) -> bool; fn meta_eq(&self, meta: &Metadata) -> bool { let meta = match &meta.kind { @@ -63,6 +64,19 @@ pub trait TestParent: fn make_backend(ns: impl ToString) -> Option; fn conditions(&self) -> Vec<&Condition>; fn obj_ref(&self) -> ParentReference; + fn backend_ref(&self, port: u16) -> gateway::BackendRef { + let dt = Default::default(); + gateway::BackendRef { + weight: None, + inner: gateway::BackendObjectReference { + group: Some(Self::group(&dt).to_string()), + kind: Some(Self::kind(&dt).to_string()), + name: self.name_unchecked(), + namespace: self.namespace(), + port: Some(port), + }, + } + } fn ip(&self) -> &str; } @@ -180,6 +194,13 @@ impl TestRoute for gateway::HttpRoute { .collect() }) } + + fn is_failure_filter(filter: &outbound::http_route::Filter) -> bool { + match filter.kind.as_ref().unwrap() { + outbound::http_route::filter::Kind::FailureInjector(_) => true, + _ => false, + } + } } impl TestRoute for policy::HttpRoute { @@ -297,6 +318,13 @@ impl TestRoute for policy::HttpRoute { .collect() }) } + + fn is_failure_filter(filter: &outbound::http_route::Filter) -> bool { + match filter.kind.as_ref().unwrap() { + outbound::http_route::filter::Kind::FailureInjector(_) => true, + _ => false, + } + } } impl TestRoute for gateway::GrpcRoute { @@ -414,6 +442,13 @@ impl TestRoute for gateway::GrpcRoute { .collect() }) } + + fn is_failure_filter(filter: &outbound::grpc_route::Filter) -> bool { + match filter.kind.as_ref().unwrap() { + outbound::grpc_route::filter::Kind::FailureInjector(_) => true, + _ => false, + } + } } impl TestRoute for gateway::TlsRoute { @@ -519,6 +554,13 @@ impl TestRoute for gateway::TlsRoute { .collect() }) } + + fn is_failure_filter(filter: &outbound::tls_route::Filter) -> bool { + match filter.kind.as_ref().unwrap() { + outbound::tls_route::filter::Kind::Invalid(_) => true, + _ => false, + } + } } impl TestRoute for gateway::TcpRoute { @@ -623,6 +665,13 @@ impl TestRoute for gateway::TcpRoute { .collect() }) } + + fn is_failure_filter(filter: &outbound::opaque_route::Filter) -> bool { + match filter.kind.as_ref().unwrap() { + outbound::opaque_route::filter::Kind::Invalid(_) => true, + _ => false, + } + } } impl TestParent for k8s::Service { diff --git a/policy-test/tests/outbound_api.rs b/policy-test/tests/outbound_api.rs index 637ae013d6108..47e6dc72590b5 100644 --- a/policy-test/tests/outbound_api.rs +++ b/policy-test/tests/outbound_api.rs @@ -1,8 +1,9 @@ use futures::StreamExt; use k8s_gateway_api::{self as gateway}; -use linkerd_policy_controller_k8s_api::{self as k8s, policy, ResourceExt}; +use linkerd_policy_controller_k8s_api::{self as k8s, policy}; use linkerd_policy_test::{ - assert_resource_meta, assert_status_accepted, await_route_status, create, grpc, + assert_resource_meta, assert_status_accepted, await_route_status, create, + create_cluster_scoped, delete_cluster_scoped, grpc, outbound_api::{ assert_backend_matches_reference, assert_route_is_default, assert_singleton, retry_watch_outbound_policy, @@ -10,6 +11,7 @@ use linkerd_policy_test::{ test_route::{TestParent, TestRoute}, with_temp_ns, }; +use maplit::{btreemap, convert_args}; use tracing::debug_span; #[tokio::test(flavor = "current_thread")] @@ -237,22 +239,12 @@ async fn routes_with_backend() { assert_route_is_default::(route, &parent.obj_ref(), port); }); - let dt = Default::default(); let route = create( &client, R::make_route( ns, vec![parent.obj_ref()], - vec![vec![gateway::BackendRef { - weight: None, - inner: gateway::BackendObjectReference { - group: Some(P::group(&dt).to_string()), - kind: Some(P::kind(&dt).to_string()), - name: backend.name_unchecked(), - namespace: backend.namespace(), - port: Some(backend_port), - }, - }]], + vec![vec![backend.backend_ref(backend_port)]], ), ) .await; @@ -300,3 +292,193 @@ async fn routes_with_backend() { test::().await; test::().await; } + +#[tokio::test(flavor = "current_thread")] +async fn service_with_routes_with_cross_namespace_backend() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let backend_ns_name = format!("{}-backend", ns); + let backend_ns = create_cluster_scoped( + &client, + k8s::Namespace { + metadata: k8s::ObjectMeta { + name: Some(backend_ns_name.clone()), + labels: Some(convert_args!(btreemap!( + "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), + ))), + ..Default::default() + }, + ..Default::default() + }, + ) + .await; + + // Create a cross namespace backend + let backend_port = 8888; + let backend = match P::make_backend(&backend_ns_name) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + let route = create( + &client, + R::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + let status = await_route_status(&client, &route).await; + assert_status_accepted(status); + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with a backend with no filters. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + let rules = &R::rules_random_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + let backends = assert_singleton(rules); + + let filters = R::backend_filters(*assert_singleton(backends)); + assert!(filters.is_empty()); + + let outbound_backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference( + outbound_backend, + &backend.obj_ref(), + backend_port, + ); + }); + + delete_cluster_scoped(&client, backend_ns).await + }) + .await + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn routes_with_invalid_backend() { + async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let backend_port = 8888; + let mut backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + backend.meta_mut().name = Some("invalid".to_string()); + let route = create( + &client, + R::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + let status = await_route_status(&client, &route).await; + assert_status_accepted(status); + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with a backend with a failure filter. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + let rules = &R::rules_random_available(outbound_route); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + let backends = assert_singleton(rules); + + let filters = R::backend_filters(*assert_singleton(backends)); + let filter = assert_singleton(&filters); + assert!(R::is_failure_filter(filter)); + + let outbound_backend = R::backend(*assert_singleton(backends)); + assert_backend_matches_reference( + outbound_backend, + &backend.obj_ref(), + backend_port, + ); + }); + }) + .await + } + + test::().await; + test::().await; + test::().await; + //test::().await; // TODO: No filters returned? + // test::().await; // TODO: No filters returned? + test::().await; + test::().await; + test::().await; + // test::().await; // TODO: No filters returned? + // test::().await; // TODO: No filters returned? +} diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs index 1c56d2bd25562..714ada52ff236 100644 --- a/policy-test/tests/outbound_api_gateway.rs +++ b/policy-test/tests/outbound_api_gateway.rs @@ -16,162 +16,6 @@ // // These two files should be kept in sync to ensure that Linkerd can read and // // function correctly with both types of resources. -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_http_route_without_rules() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// parent_with_http_route_without_rules(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_http_route_without_rules() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_http_route_without_rules(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_http_routes_without_backends() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// parent_with_http_routes_without_backends(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_http_routes_without_backends() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_http_routes_without_backends(Resource::EgressNetwork(egress), &client, &ns) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_http_routes_with_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend_svc = create_service(&client, &ns, "backend", 8888).await; -// parent_with_http_routes_with_backend( -// Resource::Service(svc), -// Resource::Service(backend_svc), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_http_routes_with_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_http_routes_with_backend( -// Resource::EgressNetwork(egress.clone()), -// Resource::EgressNetwork(egress), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_http_routes_with_cross_namespace_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &svc, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); - -// let backend_ns_name = format!("{}-backend", ns); -// let backend_ns = create_cluster_scoped( -// &client, -// k8s::Namespace { -// metadata: k8s::ObjectMeta { -// name: Some(backend_ns_name.clone()), -// labels: Some(convert_args!(btreemap!( -// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), -// ))), -// ..Default::default() -// }, -// ..Default::default() -// }, -// ) -// .await; -// let backend_name = "backend"; -// let backend_svc = -// Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); -// let backends = [backend_svc.clone()]; -// let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( -// Some(&backends), -// Some(backend_ns_name), -// None, -// ); -// let _route = create(&client, route.build()).await; -// await_gateway_route_status(&client, &ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &svc, 4191); - -// // There should be a route with a backend with no filters. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// let backends = route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); -// let filters = &backend.backend.as_ref().unwrap().filters; -// assert_eq!(filters.len(), 0); -// }); - -// delete_cluster_scoped(&client, backend_ns).await -// }) -// .await; -// } - // // TODO: Test fails until handling of invalid backends is implemented. // #[tokio::test(flavor = "current_thread")] // async fn service_with_http_routes_with_invalid_backend() { diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index 99eb9e3157566..6172f65693665 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -12,115 +12,6 @@ // }; // use maplit::{btreemap, convert_args}; -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_http_routes_with_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend_svc = create_service(&client, &ns, "backend", 8888).await; -// parent_with_http_routes_with_backend( -// Resource::Service(svc), -// Resource::Service(backend_svc), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_http_routes_with_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_http_routes_with_backend( -// Resource::EgressNetwork(egress.clone()), -// Resource::EgressNetwork(egress), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_http_routes_with_cross_namespace_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &svc, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); - -// let backend_ns_name = format!("{}-backend", ns); -// let backend_ns = create_cluster_scoped( -// &client, -// k8s::Namespace { -// metadata: k8s::ObjectMeta { -// name: Some(backend_ns_name.clone()), -// labels: Some(convert_args!(btreemap!( -// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), -// ))), -// ..Default::default() -// }, -// ..Default::default() -// }, -// ) -// .await; -// let backend_name = "backend"; -// let backend_svc = -// Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); -// let backends = [backend_svc.clone()]; -// let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( -// Some(&backends), -// Some(backend_ns_name), -// None, -// ); -// let _route = create(&client, route.build()).await; -// await_route_status(&client, &ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &svc, 4191); - -// // There should be a route with a backend with no filters. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// let backends = route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); -// let filters = &backend.backend.as_ref().unwrap().filters; -// assert_eq!(filters.len(), 0); -// }); - -// delete_cluster_scoped(&client, backend_ns).await -// }) -// .await; -// } - // // TODO: Test fails until handling of invalid backends is implemented. // #[tokio::test(flavor = "current_thread")] // async fn service_with_http_routes_with_invalid_backend() { diff --git a/policy-test/tests/outbound_api_tcp.rs b/policy-test/tests/outbound_api_tcp.rs index ad62dc86c8ede..277dc52493c27 100644 --- a/policy-test/tests/outbound_api_tcp.rs +++ b/policy-test/tests/outbound_api_tcp.rs @@ -7,92 +7,6 @@ // }; // use maplit::{btreemap, convert_args}; -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_tcp_routes_with_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend_svc = create_service(&client, &ns, "backend", 8888).await; -// parent_with_tcp_routes_with_backend( -// Resource::Service(svc), -// Resource::Service(backend_svc), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_tcp_routes_with_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_tcp_routes_with_backend( -// Resource::EgressNetwork(egress.clone()), -// Resource::EgressNetwork(egress), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_tcp_routes_with_cross_namespace_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let backend_ns_name = format!("{}-backend", ns); -// let backend_ns = create_cluster_scoped( -// &client, -// k8s::Namespace { -// metadata: k8s::ObjectMeta { -// name: Some(backend_ns_name.clone()), -// labels: Some(convert_args!(btreemap!( -// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), -// ))), -// ..Default::default() -// }, -// ..Default::default() -// }, -// ) -// .await; -// let backend_name = "backend"; -// let backend_svc = -// Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); -// let backends = [backend_svc.clone()]; -// let route = mk_tcp_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); -// let _route = create(&client, route.build()).await; -// await_tcp_route_status(&client, &ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &svc, 4191); - -// let routes = tcp_routes(&config); -// let route = assert_singleton(routes); -// let backends = tcp_route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - -// delete_cluster_scoped(&client, backend_ns).await -// }) -// .await; -// } - // #[tokio::test(flavor = "current_thread")] // async fn service_with_tcp_routes_with_invalid_backend() { // with_temp_ns(|client, ns| async move { From 0fccc9f9ba63b05f06de08e03cb54f42e737b0d4 Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Wed, 18 Dec 2024 00:17:55 +0000 Subject: [PATCH 5/9] checkpoint; more tests Signed-off-by: Alex Leong --- policy-test/src/lib.rs | 28 +-- policy-test/tests/outbound_api.rs | 268 ++++++++++++++++++++-- policy-test/tests/outbound_api_gateway.rs | 145 ------------ policy-test/tests/outbound_api_linkerd.rs | 145 ------------ policy-test/tests/outbound_api_tcp.rs | 62 ----- policy-test/tests/outbound_api_tls.rs | 148 ------------ 6 files changed, 265 insertions(+), 531 deletions(-) diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index 73020517c1302..7b16609bdebfc 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -208,25 +208,25 @@ pub async fn await_pod_ip(client: &kube::Client, ns: &str, name: &str) -> std::n .expect("pod IP must be valid") } -// Waits until an HttpRoute with the given namespace and name has a status set -// on it, then returns the generic route status representation. -pub async fn await_route_status( - client: &kube::Client, - route: &R, -) -> Vec { +// Waits until an HttpRoute with the given namespace and name has been accepted. +pub async fn await_route_accepted(client: &kube::Client, route: &R) { await_condition( client, &route.namespace().unwrap(), &route.name_unchecked(), - |obj: Option<&R>| -> bool { obj.and_then(|route| route.conditions()).is_some() }, + |obj: Option<&R>| -> bool { + obj.map_or(false, |route| { + let conditions = route + .conditions() + .unwrap_or_default() + .into_iter() + .map(|c| c.clone()) + .collect::>(); + is_status_accepted(&conditions) + }) + }, ) - .await - .expect("must fetch route") - .conditions() - .expect("route must contain a status representation") - .into_iter() - .map(|c| c.clone()) - .collect() + .await; } // Waits until an HttpRoute with the given namespace and name has a status set diff --git a/policy-test/tests/outbound_api.rs b/policy-test/tests/outbound_api.rs index 47e6dc72590b5..cbde33888fccf 100644 --- a/policy-test/tests/outbound_api.rs +++ b/policy-test/tests/outbound_api.rs @@ -1,12 +1,14 @@ +use std::time::Duration; + use futures::StreamExt; use k8s_gateway_api::{self as gateway}; use linkerd_policy_controller_k8s_api::{self as k8s, policy}; use linkerd_policy_test::{ - assert_resource_meta, assert_status_accepted, await_route_status, create, - create_cluster_scoped, delete_cluster_scoped, grpc, + assert_resource_meta, await_route_accepted, create, create_cluster_scoped, + delete_cluster_scoped, grpc, outbound_api::{ assert_backend_matches_reference, assert_route_is_default, assert_singleton, - retry_watch_outbound_policy, + detect_failure_accrual, failure_accrual_consecutive, retry_watch_outbound_policy, }, test_route::{TestParent, TestRoute}, with_temp_ns, @@ -108,8 +110,7 @@ async fn http_route_with_no_rules() { R::make_route(ns.clone(), vec![parent.obj_ref()], vec![]), ) .await; - let status = await_route_status(&client, &route).await; - assert_status_accepted(status); + await_route_accepted(&client, &route).await; let config = rx .next() @@ -173,8 +174,7 @@ async fn http_routes_without_backends() { R::make_route(ns.clone(), vec![parent.obj_ref()], vec![vec![]]), ) .await; - let status = await_route_status(&client, &route).await; - assert_status_accepted(status); + await_route_accepted(&client, &route).await; let config = rx .next() @@ -248,8 +248,7 @@ async fn routes_with_backend() { ), ) .await; - let status = await_route_status(&client, &route).await; - assert_status_accepted(status); + await_route_accepted(&client, &route).await; let config = rx .next() @@ -352,8 +351,7 @@ async fn service_with_routes_with_cross_namespace_backend() { ), ) .await; - let status = await_route_status(&client, &route).await; - assert_status_accepted(status); + await_route_accepted(&client, &route).await; let config = rx .next() @@ -437,8 +435,7 @@ async fn routes_with_invalid_backend() { ), ) .await; - let status = await_route_status(&client, &route).await; - assert_status_accepted(status); + await_route_accepted(&client, &route).await; let config = rx .next() @@ -474,11 +471,248 @@ async fn routes_with_invalid_backend() { test::().await; test::().await; test::().await; - //test::().await; // TODO: No filters returned? - // test::().await; // TODO: No filters returned? + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn multiple_routes() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // Routes should be returned in sorted order by creation timestamp then + // name. To ensure that this test isn't timing dependant, routes should + // be created in alphabetical order. + let mut route_a = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_a.meta_mut().name = Some("a-route".to_string()); + let route_a = create(&client, route_a).await; + await_route_accepted(&client, &route_a).await; + + // First route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + let mut route_b = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_b.meta_mut().name = Some("b-route".to_string()); + let route_b = create(&client, route_b).await; + await_route_accepted(&client, &route_b).await; + + // Second route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + R::routes(&config, |routes| { + assert!(route_a.meta_eq(R::extract_meta(&routes[0]))); + assert!(route_b.meta_eq(R::extract_meta(&routes[1]))); + }); + }) + .await + } + + test::().await; + test::().await; + test::().await; + test::().await; + //test::().await; test::().await; test::().await; test::().await; - // test::().await; // TODO: No filters returned? - // test::().await; // TODO: No filters returned? + test::().await; + //test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn multiple_tcp_routes() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // Routes should be returned in sorted order by creation timestamp then + // name. To ensure that this test isn't timing dependant, routes should + // be created in alphabetical order. + let mut route_a = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_a.meta_mut().name = Some("a-route".to_string()); + let route_a = create(&client, route_a).await; + await_route_accepted(&client, &route_a).await; + + // First route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + let mut route_b = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_b.meta_mut().name = Some("b-route".to_string()); + let route_b = create(&client, route_b).await; + await_route_accepted(&client, &route_b).await; + + // Second route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + R::routes(&config, |routes| { + // Only the first TCPRoute should be returned in the config. + assert!(route_a.meta_eq(R::extract_meta(&routes[0]))); + assert_eq!(routes.len(), 1); + }); + }) + .await + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consecutive_failure_accrual() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string() => "8".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string() => "10s".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string() => "10m".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string() => "1.0".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(8, consecutive.max_failures); + assert_eq!( + &grpc::outbound::ExponentialBackoff { + min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), + max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), + jitter_ratio: 1.0_f32, + }, + consecutive + .backoff + .as_ref() + .expect("backoff must be configured") + ); + }); + }) + .await; + } + + test::().await; + test::().await; } diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs index 714ada52ff236..8a93be2e92a36 100644 --- a/policy-test/tests/outbound_api_gateway.rs +++ b/policy-test/tests/outbound_api_gateway.rs @@ -16,151 +16,6 @@ // // These two files should be kept in sync to ensure that Linkerd can read and // // function correctly with both types of resources. -// // TODO: Test fails until handling of invalid backends is implemented. -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_http_routes_with_invalid_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend = mk_service(&ns, "invalid", 4191); - -// parent_with_http_routes_with_invalid_backend( -// Resource::Service(svc), -// Resource::Service(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// // TODO: Test fails until handling of invalid backends is implemented. -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_http_routes_with_invalid_backend() { -// with_temp_ns(|client, ns| async move { -// // Create an egress network -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// let backend = mk_egress_net(&ns, "invalid"); - -// parent_with_http_routes_with_invalid_backend( -// Resource::EgressNetwork(egress), -// Resource::EgressNetwork(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// // TODO: Investigate why the policy controller is only returning one route in this -// // case instead of two. -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_multiple_http_routes() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// // TODO: Investigate why the policy controller is only returning one route in this -// // case instead of two. -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_multiple_http_routes() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_consecutive_failure_accrual() { -// with_temp_ns(|client, ns| async move { -// let svc = create_annotated_service( -// &client, -// &ns, -// "consecutive-accrual-svc", -// 80, -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), -// "10s".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), -// "10m".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), -// "1.0".to_string(), -// ), -// ]), -// ) -// .await; -// parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_consecutive_failure_accrual() { -// with_temp_ns(|client, ns| async move { -// let egress = create_annotated_egress_network( -// &client, -// &ns, -// "consecutive-accrual-egress", -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), -// "10s".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), -// "10m".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), -// "1.0".to_string(), -// ), -// ]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) -// .await; -// }) -// .await; -// } - // #[tokio::test(flavor = "current_thread")] // async fn service_with_consecutive_failure_accrual_defaults_no_config() { // with_temp_ns(|client, ns| async move { diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index 6172f65693665..0f77987d37856 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -12,151 +12,6 @@ // }; // use maplit::{btreemap, convert_args}; -// // TODO: Test fails until handling of invalid backends is implemented. -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_http_routes_with_invalid_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend = mk_service(&ns, "invalid", 4191); - -// parent_with_http_routes_with_invalid_backend( -// Resource::Service(svc), -// Resource::Service(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// // TODO: Test fails until handling of invalid backends is implemented. -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_http_routes_with_invalid_backend() { -// with_temp_ns(|client, ns| async move { -// // Create an egress network -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// let backend = mk_egress_net(&ns, "invalid"); - -// parent_with_http_routes_with_invalid_backend( -// Resource::EgressNetwork(egress), -// Resource::EgressNetwork(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// // TODO: Investigate why the policy controller is only returning one route in this -// // case instead of two. -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_multiple_http_routes() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// // TODO: Investigate why the policy controller is only returning one route in this -// // case instead of two. -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_multiple_http_routes() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_consecutive_failure_accrual() { -// with_temp_ns(|client, ns| async move { -// let svc = create_annotated_service( -// &client, -// &ns, -// "consecutive-accrual-svc", -// 80, -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), -// "10s".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), -// "10m".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), -// "1.0".to_string(), -// ), -// ]), -// ) -// .await; -// parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_consecutive_failure_accrual() { -// with_temp_ns(|client, ns| async move { -// let egress = create_annotated_egress_network( -// &client, -// &ns, -// "consecutive-accrual-egress", -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), -// "10s".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), -// "10m".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), -// "1.0".to_string(), -// ), -// ]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) -// .await; -// }) -// .await; -// } - // #[tokio::test(flavor = "current_thread")] // async fn service_with_consecutive_failure_accrual_defaults_no_config() { // with_temp_ns(|client, ns| async move { diff --git a/policy-test/tests/outbound_api_tcp.rs b/policy-test/tests/outbound_api_tcp.rs index 277dc52493c27..7aca0840f5e5a 100644 --- a/policy-test/tests/outbound_api_tcp.rs +++ b/policy-test/tests/outbound_api_tcp.rs @@ -7,68 +7,6 @@ // }; // use maplit::{btreemap, convert_args}; -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_tcp_routes_with_invalid_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend = mk_service(&ns, "invalid", 4191); - -// parent_with_tcp_routes_with_invalid_backend( -// Resource::Service(svc), -// Resource::Service(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_tcp_routes_with_invalid_backend() { -// with_temp_ns(|client, ns| async move { -// // Create an egress network -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// let backend = mk_egress_net(&ns, "invalid"); - -// parent_with_tcp_routes_with_invalid_backend( -// Resource::EgressNetwork(egress), -// Resource::EgressNetwork(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_multiple_tcp_routes() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// parent_with_multiple_tcp_routes(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_multiple_tcp_routes() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_multiple_tcp_routes(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - // #[tokio::test(flavor = "current_thread")] // async fn tcp_route_with_no_port() { // with_temp_ns(|client, ns| async move { diff --git a/policy-test/tests/outbound_api_tls.rs b/policy-test/tests/outbound_api_tls.rs index 06ab43f5b52b5..4f69549f86d9e 100644 --- a/policy-test/tests/outbound_api_tls.rs +++ b/policy-test/tests/outbound_api_tls.rs @@ -7,154 +7,6 @@ // }; // use maplit::{btreemap, convert_args}; -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_tls_routes_with_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend_svc = create_service(&client, &ns, "backend", 8888).await; -// parent_with_tls_routes_with_backend( -// Resource::Service(svc), -// Resource::Service(backend_svc), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_tls_routes_with_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_tls_routes_with_backend( -// Resource::EgressNetwork(egress.clone()), -// Resource::EgressNetwork(egress), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_tls_routes_with_cross_namespace_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let backend_ns_name = format!("{}-backend", ns); -// let backend_ns = create_cluster_scoped( -// &client, -// k8s::Namespace { -// metadata: k8s::ObjectMeta { -// name: Some(backend_ns_name.clone()), -// labels: Some(convert_args!(btreemap!( -// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), -// ))), -// ..Default::default() -// }, -// ..Default::default() -// }, -// ) -// .await; -// let backend_name = "backend"; -// let backend_svc = -// Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); -// let backends = [backend_svc.clone()]; -// let route = mk_tls_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); -// let _route = create(&client, route.build()).await; -// await_tls_route_status(&client, &ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &svc, 4191); - -// let routes = tls_routes(&config); -// let route = assert_singleton(routes); -// let backends = tls_route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - -// delete_cluster_scoped(&client, backend_ns).await -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_tls_routes_with_invalid_backend() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend = mk_service(&ns, "invalid", 4191); - -// parent_with_tls_routes_with_invalid_backend( -// Resource::Service(svc), -// Resource::Service(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_tls_routes_with_invalid_backend() { -// with_temp_ns(|client, ns| async move { -// // Create an egress network -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// let backend = mk_egress_net(&ns, "invalid"); - -// parent_with_tls_routes_with_invalid_backend( -// Resource::EgressNetwork(egress), -// Resource::EgressNetwork(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_multiple_tls_routes() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// parent_with_multiple_tls_routes(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_multiple_http_routes() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_multiple_tls_routes(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - // #[tokio::test(flavor = "current_thread")] // async fn tls_route_with_no_port() { // with_temp_ns(|client, ns| async move { From dd90c9a60e9ffa8b70b76c82c44034cd6c174c15 Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Thu, 19 Dec 2024 00:56:57 +0000 Subject: [PATCH 6/9] checkpoint. more tests Signed-off-by: Alex Leong --- policy-test/tests/outbound_api.rs | 154 +---- .../tests/outbound_api_failure_accrual.rs | 244 ++++++++ policy-test/tests/outbound_api_gateway.rs | 347 ----------- policy-test/tests/outbound_api_grpc.rs | 224 ++++++- policy-test/tests/outbound_api_http.rs | 575 ++++++++++++++++++ policy-test/tests/outbound_api_linkerd.rs | 347 ----------- policy-test/tests/outbound_api_tcp.rs | 98 +++ 7 files changed, 1158 insertions(+), 831 deletions(-) create mode 100644 policy-test/tests/outbound_api_failure_accrual.rs create mode 100644 policy-test/tests/outbound_api_http.rs diff --git a/policy-test/tests/outbound_api.rs b/policy-test/tests/outbound_api.rs index cbde33888fccf..e0a62a6c84308 100644 --- a/policy-test/tests/outbound_api.rs +++ b/policy-test/tests/outbound_api.rs @@ -4,8 +4,8 @@ use futures::StreamExt; use k8s_gateway_api::{self as gateway}; use linkerd_policy_controller_k8s_api::{self as k8s, policy}; use linkerd_policy_test::{ - assert_resource_meta, await_route_accepted, create, create_cluster_scoped, - delete_cluster_scoped, grpc, + assert_default_accrual_backoff, assert_resource_meta, await_route_accepted, create, + create_cluster_scoped, delete_cluster_scoped, grpc, outbound_api::{ assert_backend_matches_reference, assert_route_is_default, assert_singleton, detect_failure_accrual, failure_accrual_consecutive, retry_watch_outbound_policy, @@ -14,7 +14,6 @@ use linkerd_policy_test::{ with_temp_ns, }; use maplit::{btreemap, convert_args}; -use tracing::debug_span; #[tokio::test(flavor = "current_thread")] async fn parent_does_not_exist() { @@ -79,13 +78,13 @@ async fn parent_with_no_routes() { } #[tokio::test(flavor = "current_thread")] -async fn http_route_with_no_rules() { +async fn route_with_no_rules() { async fn test() { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()) - ); with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; @@ -133,20 +132,20 @@ async fn http_route_with_no_rules() { } test::().await; - test::().await; test::().await; + test::().await; test::().await; + test::().await; + test::().await; } #[tokio::test(flavor = "current_thread")] -async fn http_routes_without_backends() { +async fn routes_without_backends() { async fn test() { - let _span = debug_span!( - "test", + tracing::debug!( parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()) - ) - .entered(); + route = %R::kind(&R::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { // Create a parent let port = 4191; @@ -199,9 +198,11 @@ async fn http_routes_without_backends() { } test::().await; - test::().await; test::().await; + test::().await; test::().await; + test::().await; + test::().await; } #[tokio::test(flavor = "current_thread")] @@ -568,106 +569,15 @@ async fn multiple_routes() { test::().await; test::().await; test::().await; - //test::().await; test::().await; test::().await; test::().await; test::().await; - //test::().await; -} - -#[tokio::test(flavor = "current_thread")] -async fn multiple_tcp_routes() { - async fn test() { - with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()), - ); - // Create a parent - let port = 4191; - let parent = create(&client, P::make_parent(&ns)).await; - - // Create a backend - let backend_port = 8888; - let backend = match P::make_backend(&ns) { - Some(b) => create(&client, b).await, - None => parent.clone(), - }; - - let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, parent.obj_ref(), port); - - // There should be a default route. - gateway::HttpRoute::routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default::(route, &parent.obj_ref(), port); - }); - - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let mut route_a = R::make_route( - ns.clone(), - vec![parent.obj_ref()], - vec![vec![backend.backend_ref(backend_port)]], - ); - route_a.meta_mut().name = Some("a-route".to_string()); - let route_a = create(&client, route_a).await; - await_route_accepted(&client, &route_a).await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, parent.obj_ref(), port); - - let mut route_b = R::make_route( - ns.clone(), - vec![parent.obj_ref()], - vec![vec![backend.backend_ref(backend_port)]], - ); - route_b.meta_mut().name = Some("b-route".to_string()); - let route_b = create(&client, route_b).await; - await_route_accepted(&client, &route_b).await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_resource_meta(&config.metadata, parent.obj_ref(), port); - - R::routes(&config, |routes| { - // Only the first TCPRoute should be returned in the config. - assert!(route_a.meta_eq(R::extract_meta(&routes[0]))); - assert_eq!(routes.len(), 1); - }); - }) - .await - } - - test::().await; - test::().await; } #[tokio::test(flavor = "current_thread")] -async fn consecutive_failure_accrual() { - async fn test() { +async fn opaque_service() { + async fn test() { with_temp_ns(|client, ns| async move { tracing::debug!( parent = %P::kind(&P::DynamicType::default()), @@ -676,11 +586,7 @@ async fn consecutive_failure_accrual() { let port = 4191; let mut parent = P::make_parent(&ns); parent.meta_mut().annotations = Some(btreemap! { - "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), - "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string() => "8".to_string(), - "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string() => "10s".to_string(), - "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string() => "10m".to_string(), - "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string() => "1.0".to_string(), + "config.linkerd.io/opaque-ports".to_string() => port.to_string(), }); let parent = create(&client, parent).await; @@ -694,21 +600,11 @@ async fn consecutive_failure_accrual() { assert_resource_meta(&config.metadata, parent.obj_ref(), port); - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); + // Proxy protocol should be opaque. + match config.protocol.unwrap().kind.unwrap() { + grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} + _ => panic!("proxy protocol must be Opaque"), + }; }) .await; } diff --git a/policy-test/tests/outbound_api_failure_accrual.rs b/policy-test/tests/outbound_api_failure_accrual.rs new file mode 100644 index 0000000000000..0016a7ba07758 --- /dev/null +++ b/policy-test/tests/outbound_api_failure_accrual.rs @@ -0,0 +1,244 @@ +use std::time::Duration; + +use futures::StreamExt; +use linkerd_policy_controller_k8s_api::{self as k8s, policy}; +use linkerd_policy_test::{ + assert_default_accrual_backoff, assert_resource_meta, create, grpc, + outbound_api::{ + detect_failure_accrual, failure_accrual_consecutive, retry_watch_outbound_policy, + }, + test_route::TestParent, + with_temp_ns, +}; +use maplit::btreemap; + +#[tokio::test(flavor = "current_thread")] +async fn consecutive_failure_accrual() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string() => "8".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string() => "10s".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string() => "10m".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string() => "1.0".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(8, consecutive.max_failures); + assert_eq!( + &grpc::outbound::ExponentialBackoff { + min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), + max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), + jitter_ratio: 1.0_f32, + }, + consecutive + .backoff + .as_ref() + .expect("backoff must be configured") + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consecutive_failure_accrual_defaults_no_config() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a service configured to do consecutive failure accrual, but + // with no additional configuration + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // Expect default max_failures and default backoff + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(7, consecutive.max_failures); + assert_default_accrual_backoff!(consecutive + .backoff + .as_ref() + .expect("backoff must be configured")); + }); + }) + .await + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consecutive_failure_accrual_defaults_max_fails() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a service configured to do consecutive failure accrual with + // max number of failures and with default backoff + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string() => "8".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // Expect default backoff and overridden max_failures + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(8, consecutive.max_failures); + assert_default_accrual_backoff!(consecutive + .backoff + .as_ref() + .expect("backoff must be configured")); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consecutive_failure_accrual_defaults_jitter() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a service configured to do consecutive failure accrual with + // max number of failures and with default backoff + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual".to_string() => "consecutive".to_string(), + "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string() => "1.0".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // Expect defaults for everything except for the jitter ratio + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(7, consecutive.max_failures); + assert_eq!( + &grpc::outbound::ExponentialBackoff { + min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), + max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), + jitter_ratio: 1.0_f32, + }, + consecutive + .backoff + .as_ref() + .expect("backoff must be configured") + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn default_failure_accrual() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create Service with consecutive failure accrual config for + // max_failures but no mode + let port = 4191; + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string() => "8".to_string(), + }); + let parent = create(&client, parent).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // Expect failure accrual config to be default (no failure accrual) + detect_failure_accrual(&config, |accrual| { + assert!( + accrual.is_none(), + "consecutive failure accrual should not be configured for service" + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs index 8a93be2e92a36..f085b15f4bd6a 100644 --- a/policy-test/tests/outbound_api_gateway.rs +++ b/policy-test/tests/outbound_api_gateway.rs @@ -16,353 +16,6 @@ // // These two files should be kept in sync to ensure that Linkerd can read and // // function correctly with both types of resources. -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_consecutive_failure_accrual_defaults_no_config() { -// with_temp_ns(|client, ns| async move { -// // Create a service configured to do consecutive failure accrual, but -// // with no additional configuration -// let svc_no_config = create_annotated_service( -// &client, -// &ns, -// "default-accrual-svc", -// 80, -// BTreeMap::from([( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// )]), -// ) -// .await; - -// parent_with_consecutive_failure_accrual_defaults_no_config( -// Resource::Service(svc_no_config), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_consecutive_failure_accrual_defaults_max_fails() { -// with_temp_ns(|client, ns| async move { -// // Create a service configured to do consecutive failure accrual with -// // max number of failures and with default backoff -// let svc_max_fails = create_annotated_service( -// &client, -// &ns, -// "no-backoff-svc", -// 80, -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// ), -// ]), -// ) -// .await; - -// parent_with_consecutive_failure_accrual_defaults_max_fails( -// Resource::Service(svc_max_fails), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_consecutive_failure_accrual_defaults_jitter() { -// with_temp_ns(|client, ns| async move { -// // Create a service configured to do consecutive failure accrual with -// // only the jitter ratio configured in the backoff -// let svc_jitter = create_annotated_service( -// &client, -// &ns, -// "only-jitter-svc", -// 80, -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), -// "1.0".to_string(), -// ), -// ]), -// ) -// .await; - -// parent_with_consecutive_failure_accrual_defaults_max_jitter( -// Resource::Service(svc_jitter), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { -// with_temp_ns(|client, ns| async move { -// // Create a egress network configured to do consecutive failure accrual, but -// // with no additional configuration -// let egress_no_config = create_annotated_egress_network( -// &client, -// &ns, -// "default-accrual-egress", -// BTreeMap::from([( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// )]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "default-accrual-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_consecutive_failure_accrual_defaults_no_config( -// Resource::EgressNetwork(egress_no_config), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { -// with_temp_ns(|client, ns| async move { -// // Create a egress network configured to do consecutive failure accrual with -// // max number of failures and with default backoff -// let egress_max_fails = create_annotated_egress_network( -// &client, -// &ns, -// "no-backoff-egress", -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// ), -// ]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "no-backoff-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_consecutive_failure_accrual_defaults_max_fails( -// Resource::EgressNetwork(egress_max_fails), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net configured to do consecutive failure accrual with -// // only the jitter ratio configured in the backoff -// let egress_jitter = create_annotated_egress_network( -// &client, -// &ns, -// "only-jitter-egress", -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), -// "1.0".to_string(), -// ), -// ]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "only-jitter-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_consecutive_failure_accrual_defaults_max_jitter( -// Resource::EgressNetwork(egress_jitter), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_default_failure_accrual() { -// with_temp_ns(|client, ns| async move { -// // Default config for Service, no failure accrual -// let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; - -// // Create Service with consecutive failure accrual config for -// // max_failures but no mode -// let svc_max_fails = create_annotated_service( -// &client, -// &ns, -// "default-max-failure-svc", -// 80, -// BTreeMap::from([( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// )]), -// ) -// .await; - -// parent_with_default_failure_accrual( -// Resource::Service(svc_default), -// Resource::Service(svc_max_fails), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_default_failure_accrual() { -// with_temp_ns(|client, ns| async move { -// // Default config for EgressNetwork, no failure accrual -// let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; -// let status = await_egress_net_status(&client, &ns, "default-failure-accrual").await; -// assert_status_accepted(status.conditions); - -// // Create EgressNetwork with consecutive failure accrual config for -// // max_failures but no mode -// let egress_max_fails = create_annotated_egress_network( -// &client, -// &ns, -// "default-max-failure-egress", -// BTreeMap::from([( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// )]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "default-max-failure-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_default_failure_accrual( -// Resource::EgressNetwork(egress_default), -// Resource::EgressNetwork(egress_max_fails), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn opaque_service() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; -// opaque_parent(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn opaque_egress_net() { -// with_temp_ns(|client, ns| async move { -// // Create an egress network -// let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; -// opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn route_with_filters_service() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend = mk_service(&ns, "backend", 4191); - -// route_with_filters( -// Resource::Service(svc), -// Resource::Service(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn route_with_filters_egress_net() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// route_with_filters( -// Resource::EgressNetwork(egress.clone()), -// Resource::EgressNetwork(egress), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn backend_with_filters_service() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend_svc = create_service(&client, &ns, "backend", 8888).await; -// backend_with_filters( -// Resource::Service(svc), -// Resource::Service(backend_svc), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn backend_with_filters_egress_net() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// backend_with_filters( -// Resource::EgressNetwork(egress.clone()), -// Resource::EgressNetwork(egress), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - // #[tokio::test(flavor = "current_thread")] // async fn http_route_with_no_port() { // with_temp_ns(|client, ns| async move { diff --git a/policy-test/tests/outbound_api_grpc.rs b/policy-test/tests/outbound_api_grpc.rs index 43db75a623c83..cd7a9ed97a1ac 100644 --- a/policy-test/tests/outbound_api_grpc.rs +++ b/policy-test/tests/outbound_api_grpc.rs @@ -1,11 +1,219 @@ -// use futures::prelude::*; -// use kube::ResourceExt; -// use linkerd_policy_test::{ -// assert_resource_meta, assert_status_accepted, await_egress_net_status, await_grpc_route_status, -// create, create_egress_network, create_service, mk_egress_net, mk_service, outbound_api::*, -// update, with_temp_ns, Resource, -// }; -// use std::collections::BTreeMap; +use futures::StreamExt; +use linkerd2_proxy_api::{self as api, outbound}; +use linkerd_policy_controller_k8s_api::{self as k8s, gateway, policy}; +use linkerd_policy_test::{ + assert_resource_meta, await_route_accepted, create, + outbound_api::{assert_route_is_default, assert_singleton, retry_watch_outbound_policy}, + test_route::{TestParent, TestRoute}, + with_temp_ns, +}; + +#[tokio::test(flavor = "current_thread")] +async fn grpc_route_with_filters_service() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = gateway::GrpcRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + rule.filters = Some(vec![gateway::GrpcRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }]); + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with filters. + gateway::GrpcRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::GrpcRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let filters = &rule.filters; + assert_eq!( + *filters, + vec![outbound::grpc_route::Filter { + kind: Some(outbound::grpc_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + )) + }] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn policy_grpc_route_with_backend_filters() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = gateway::GrpcRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + for backend in rule.backend_refs.iter_mut().flatten() { + backend.filters = Some(vec![gateway::GrpcRouteFilter::RequestHeaderModifier { + request_header_modifier: gateway::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }]); + } + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with backend filters. + gateway::GrpcRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::GrpcRoute::extract_meta(outbound_route))); + let rules = gateway::GrpcRoute::rules_random_available(outbound_route); + let rule = assert_singleton(&rules); + let backend = assert_singleton(rule); + assert_eq!( + backend.filters, + vec![outbound::grpc_route::Filter { + kind: Some(outbound::grpc_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + )) + }] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} // #[tokio::test(flavor = "current_thread")] // async fn service_grpc_route_retries_and_timeouts() { diff --git a/policy-test/tests/outbound_api_http.rs b/policy-test/tests/outbound_api_http.rs new file mode 100644 index 0000000000000..98a1c4afdcab5 --- /dev/null +++ b/policy-test/tests/outbound_api_http.rs @@ -0,0 +1,575 @@ +use futures::StreamExt; +use linkerd2_proxy_api::{self as api, outbound}; +use linkerd_policy_controller_k8s_api::{self as k8s, gateway, policy}; +use linkerd_policy_test::{ + assert_resource_meta, await_route_accepted, create, + outbound_api::{assert_route_is_default, assert_singleton, retry_watch_outbound_policy}, + test_route::{TestParent, TestRoute}, + with_temp_ns, +}; + +#[tokio::test(flavor = "current_thread")] +async fn gateway_http_route_with_filters_service() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = gateway::HttpRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + rule.filters = Some(vec![ + gateway::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + gateway::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]); + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with filters. + gateway::HttpRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::HttpRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let filters = &rule.filters; + assert_eq!( + *filters, + vec![ + outbound::http_route::Filter { + kind: Some( + outbound::http_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::Redirect( + api::http_route::RequestRedirect { + scheme: Some(api::http_types::Scheme { + r#type: Some(api::http_types::scheme::Type::Registered( + api::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn policy_http_route_with_filters_service() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = policy::HttpRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + rule.filters = Some(vec![ + policy::httproute::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + policy::httproute::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]); + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with filters. + policy::HttpRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(policy::HttpRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let filters = &rule.filters; + assert_eq!( + *filters, + vec![ + outbound::http_route::Filter { + kind: Some( + outbound::http_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::Redirect( + api::http_route::RequestRedirect { + scheme: Some(api::http_types::Scheme { + r#type: Some(api::http_types::scheme::Type::Registered( + api::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn gateway_http_route_with_backend_filters() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = gateway::HttpRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + for backend in rule.backend_refs.iter_mut().flatten() { + backend.filters = Some(vec![ + gateway::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + gateway::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]); + } + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with backend filters. + gateway::HttpRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::HttpRoute::extract_meta(outbound_route))); + let rules = gateway::HttpRoute::rules_random_available(outbound_route); + let rule = assert_singleton(&rules); + let backend = assert_singleton(rule); + assert_eq!( + backend.filters, + vec![ + outbound::http_route::Filter { + kind: Some( + outbound::http_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::Redirect( + api::http_route::RequestRedirect { + scheme: Some(api::http_types::Scheme { + r#type: Some(api::http_types::scheme::Type::Registered( + api::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn policy_http_route_with_backend_filters() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + let mut route = policy::HttpRoute::make_route( + ns, + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + for rule in route.spec.rules.iter_mut().flatten() { + for backend in rule.backend_refs.iter_mut().flatten() { + backend.filters = Some(vec![ + gateway::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: gateway::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + gateway::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]); + } + } + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a route with backend filters. + policy::HttpRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(policy::HttpRoute::extract_meta(outbound_route))); + let rules = policy::HttpRoute::rules_random_available(outbound_route); + let rule = assert_singleton(&rules); + let backend = assert_singleton(rule); + assert_eq!( + backend.filters, + vec![ + outbound::http_route::Filter { + kind: Some( + outbound::http_route::filter::Kind::RequestHeaderModifier( + api::http_route::RequestHeaderModifier { + add: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(api::http_types::Headers { + headers: vec![api::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::Redirect( + api::http_route::RequestRedirect { + scheme: Some(api::http_types::Scheme { + r#type: Some(api::http_types::scheme::Type::Registered( + api::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); + }) + .await; + } + + test::().await; + test::().await; +} diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index 0f77987d37856..350fca3198f78 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -12,353 +12,6 @@ // }; // use maplit::{btreemap, convert_args}; -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_consecutive_failure_accrual_defaults_no_config() { -// with_temp_ns(|client, ns| async move { -// // Create a service configured to do consecutive failure accrual, but -// // with no additional configuration -// let svc_no_config = create_annotated_service( -// &client, -// &ns, -// "default-accrual-svc", -// 80, -// BTreeMap::from([( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// )]), -// ) -// .await; - -// parent_with_consecutive_failure_accrual_defaults_no_config( -// Resource::Service(svc_no_config), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_consecutive_failure_accrual_defaults_max_fails() { -// with_temp_ns(|client, ns| async move { -// // Create a service configured to do consecutive failure accrual with -// // max number of failures and with default backoff -// let svc_max_fails = create_annotated_service( -// &client, -// &ns, -// "no-backoff-svc", -// 80, -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// ), -// ]), -// ) -// .await; - -// parent_with_consecutive_failure_accrual_defaults_max_fails( -// Resource::Service(svc_max_fails), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_consecutive_failure_accrual_defaults_jitter() { -// with_temp_ns(|client, ns| async move { -// // Create a service configured to do consecutive failure accrual with -// // only the jitter ratio configured in the backoff -// let svc_jitter = create_annotated_service( -// &client, -// &ns, -// "only-jitter-svc", -// 80, -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), -// "1.0".to_string(), -// ), -// ]), -// ) -// .await; - -// parent_with_consecutive_failure_accrual_defaults_max_jitter( -// Resource::Service(svc_jitter), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { -// with_temp_ns(|client, ns| async move { -// // Create a egress network configured to do consecutive failure accrual, but -// // with no additional configuration -// let egress_no_config = create_annotated_egress_network( -// &client, -// &ns, -// "default-accrual-egress", -// BTreeMap::from([( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// )]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "default-accrual-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_consecutive_failure_accrual_defaults_no_config( -// Resource::EgressNetwork(egress_no_config), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { -// with_temp_ns(|client, ns| async move { -// // Create a egress network configured to do consecutive failure accrual with -// // max number of failures and with default backoff -// let egress_max_fails = create_annotated_egress_network( -// &client, -// &ns, -// "no-backoff-egress", -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// ), -// ]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "no-backoff-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_consecutive_failure_accrual_defaults_max_fails( -// Resource::EgressNetwork(egress_max_fails), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net configured to do consecutive failure accrual with -// // only the jitter ratio configured in the backoff -// let egress_jitter = create_annotated_egress_network( -// &client, -// &ns, -// "only-jitter-egress", -// BTreeMap::from([ -// ( -// "balancer.linkerd.io/failure-accrual".to_string(), -// "consecutive".to_string(), -// ), -// ( -// "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), -// "1.0".to_string(), -// ), -// ]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "only-jitter-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_consecutive_failure_accrual_defaults_max_jitter( -// Resource::EgressNetwork(egress_jitter), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_with_default_failure_accrual() { -// with_temp_ns(|client, ns| async move { -// // Default config for Service, no failure accrual -// let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; - -// // Create Service with consecutive failure accrual config for -// // max_failures but no mode -// let svc_max_fails = create_annotated_service( -// &client, -// &ns, -// "default-max-failure-svc", -// 80, -// BTreeMap::from([( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// )]), -// ) -// .await; - -// parent_with_default_failure_accrual( -// Resource::Service(svc_default), -// Resource::Service(svc_max_fails), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_with_default_failure_accrual() { -// with_temp_ns(|client, ns| async move { -// // Default config for EgressNetwork, no failure accrual -// let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; -// let status = await_egress_net_status(&client, &ns, "default-failure-accrual").await; -// assert_status_accepted(status.conditions); - -// // Create EgressNetwork with consecutive failure accrual config for -// // max_failures but no mode -// let egress_max_fails = create_annotated_egress_network( -// &client, -// &ns, -// "default-max-failure-egress", -// BTreeMap::from([( -// "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), -// "8".to_string(), -// )]), -// ) -// .await; -// let status = await_egress_net_status(&client, &ns, "default-max-failure-egress").await; -// assert_status_accepted(status.conditions); - -// parent_with_default_failure_accrual( -// Resource::EgressNetwork(egress_default), -// Resource::EgressNetwork(egress_max_fails), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn opaque_service() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; -// opaque_parent(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn opaque_egress_net() { -// with_temp_ns(|client, ns| async move { -// // Create an egress network -// let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; -// opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn route_with_filters_service() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend = mk_service(&ns, "backend", 4191); - -// route_with_filters( -// Resource::Service(svc), -// Resource::Service(backend), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn route_with_filters_egress_net() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// route_with_filters( -// Resource::EgressNetwork(egress.clone()), -// Resource::EgressNetwork(egress), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn backend_with_filters_service() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// let backend_svc = create_service(&client, &ns, "backend", 8888).await; -// backend_with_filters( -// Resource::Service(svc), -// Resource::Service(backend_svc), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn backend_with_filters_egress_net() { -// with_temp_ns(|client, ns| async move { -// // Create an egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// backend_with_filters( -// Resource::EgressNetwork(egress.clone()), -// Resource::EgressNetwork(egress), -// &client, -// &ns, -// ) -// .await; -// }) -// .await; -// } - // #[tokio::test(flavor = "current_thread")] // async fn http_route_with_no_port() { // with_temp_ns(|client, ns| async move { diff --git a/policy-test/tests/outbound_api_tcp.rs b/policy-test/tests/outbound_api_tcp.rs index 7aca0840f5e5a..97ad040ea0270 100644 --- a/policy-test/tests/outbound_api_tcp.rs +++ b/policy-test/tests/outbound_api_tcp.rs @@ -7,6 +7,104 @@ // }; // use maplit::{btreemap, convert_args}; +use futures::StreamExt; +use linkerd_policy_controller_k8s_api::{self as k8s, gateway, policy}; +use linkerd_policy_test::{ + assert_resource_meta, await_route_accepted, create, + outbound_api::{assert_route_is_default, assert_singleton, retry_watch_outbound_policy}, + test_route::{TestParent, TestRoute}, + with_temp_ns, +}; + +#[tokio::test(flavor = "current_thread")] +async fn multiple_tcp_routes() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // Routes should be returned in sorted order by creation timestamp then + // name. To ensure that this test isn't timing dependant, routes should + // be created in alphabetical order. + let mut route_a = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_a.meta_mut().name = Some("a-route".to_string()); + let route_a = create(&client, route_a).await; + await_route_accepted(&client, &route_a).await; + + // First route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + let mut route_b = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route_b.meta_mut().name = Some("b-route".to_string()); + let route_b = create(&client, route_b).await; + await_route_accepted(&client, &route_b).await; + + // Second route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + R::routes(&config, |routes| { + // Only the first TCPRoute should be returned in the config. + assert!(route_a.meta_eq(R::extract_meta(&routes[0]))); + assert_eq!(routes.len(), 1); + }); + }) + .await + } + + test::().await; + test::().await; +} + // #[tokio::test(flavor = "current_thread")] // async fn tcp_route_with_no_port() { // with_temp_ns(|client, ns| async move { From 41a2d8e9c3a1abc9af4646aefd7e4cb653f4aef2 Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Thu, 9 Jan 2025 00:49:20 +0000 Subject: [PATCH 7/9] finished porting tests Signed-off-by: Alex Leong --- policy-test/src/test_route.rs | 51 + .../tests/inbound_http_route_status.rs | 677 +++++---- policy-test/tests/outbound_api.rs | 505 ++++++- policy-test/tests/outbound_api_gateway.rs | 1336 ----------------- policy-test/tests/outbound_api_grpc.rs | 198 ++- policy-test/tests/outbound_api_http.rs | 148 ++ policy-test/tests/outbound_api_linkerd.rs | 1333 ---------------- policy-test/tests/outbound_api_tcp.rs | 487 ------ policy-test/tests/outbound_api_tls.rs | 501 ------- 9 files changed, 1181 insertions(+), 4055 deletions(-) delete mode 100644 policy-test/tests/outbound_api_gateway.rs delete mode 100644 policy-test/tests/outbound_api_linkerd.rs delete mode 100644 policy-test/tests/outbound_api_tls.rs diff --git a/policy-test/src/test_route.rs b/policy-test/src/test_route.rs index 4a537dec7ffa2..1624b139e9d85 100644 --- a/policy-test/src/test_route.rs +++ b/policy-test/src/test_route.rs @@ -29,6 +29,7 @@ pub trait TestRoute: fn routes(config: &outbound::OutboundPolicy, f: F) where F: Fn(&[Self::Route]); + fn parents_mut(&mut self) -> Vec<&mut ParentReference>; fn extract_meta(route: &Self::Route) -> &Metadata; fn backend_filters(backend: &Self::Backend) -> Vec<&Self::Filter>; fn rules_first_available(route: &Self::Route) -> Vec>; @@ -201,6 +202,16 @@ impl TestRoute for gateway::HttpRoute { _ => false, } } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } } impl TestRoute for policy::HttpRoute { @@ -325,6 +336,16 @@ impl TestRoute for policy::HttpRoute { _ => false, } } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } } impl TestRoute for gateway::GrpcRoute { @@ -449,6 +470,16 @@ impl TestRoute for gateway::GrpcRoute { _ => false, } } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } } impl TestRoute for gateway::TlsRoute { @@ -561,6 +592,16 @@ impl TestRoute for gateway::TlsRoute { _ => false, } } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } } impl TestRoute for gateway::TcpRoute { @@ -672,6 +713,16 @@ impl TestRoute for gateway::TcpRoute { _ => false, } } + + fn parents_mut(&mut self) -> Vec<&mut ParentReference> { + self.spec + .inner + .parent_refs + .as_mut() + .unwrap() + .iter_mut() + .collect() + } } impl TestParent for k8s::Service { diff --git a/policy-test/tests/inbound_http_route_status.rs b/policy-test/tests/inbound_http_route_status.rs index f4d22cae3bd7e..ca453386d8617 100644 --- a/policy-test/tests/inbound_http_route_status.rs +++ b/policy-test/tests/inbound_http_route_status.rs @@ -1,349 +1,370 @@ -// use kube::ResourceExt; -// use linkerd_policy_controller_k8s_api as k8s; -// use linkerd_policy_test::{ -// await_condition, await_route_status, create, find_route_condition, mk_route, update, -// with_temp_ns, -// }; +use kube::ResourceExt; +use linkerd_policy_controller_k8s_api as k8s; +use linkerd_policy_test::{ + await_condition, create, find_route_condition, mk_route, update, with_temp_ns, +}; -// #[tokio::test(flavor = "current_thread")] -// async fn inbound_accepted_parent() { -// with_temp_ns(|client, ns| async move { -// // Create a test 'Server' -// let server_name = "test-accepted-server"; -// let server = k8s::policy::Server { -// metadata: k8s::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(server_name.to_string()), -// ..Default::default() -// }, -// spec: k8s::policy::ServerSpec { -// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( -// Some(("app", server_name)), -// )), -// port: k8s::policy::server::Port::Name("http".to_string()), -// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), -// access_policy: None, -// }, -// }; -// let server = create(&client, server).await; -// let srv_ref = vec![k8s::policy::httproute::ParentReference { -// group: Some("policy.linkerd.io".to_string()), -// kind: Some("Server".to_string()), -// namespace: server.namespace(), -// name: server.name_unchecked(), -// section_name: None, -// port: None, -// }]; +#[tokio::test(flavor = "current_thread")] +async fn inbound_accepted_parent() { + with_temp_ns(|client, ns| async move { + // Create a test 'Server' + let server_name = "test-accepted-server"; + let server = k8s::policy::Server { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(server_name.to_string()), + ..Default::default() + }, + spec: k8s::policy::ServerSpec { + selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( + Some(("app", server_name)), + )), + port: k8s::policy::server::Port::Name("http".to_string()), + proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, + }, + }; + let server = create(&client, server).await; + let srv_ref = vec![k8s::policy::httproute::ParentReference { + group: Some("policy.linkerd.io".to_string()), + kind: Some("Server".to_string()), + namespace: server.namespace(), + name: server.name_unchecked(), + section_name: None, + port: None, + }]; -// // Create a route that references the Server resource. -// let route = create(&client, mk_route(&ns, "test-accepted-route", Some(srv_ref))).await; -// // Wait until route is updated with a status -// let statuses = await_route_status(&client, &route).await.parents; + // Create a route that references the Server resource. + let route = create(&client, mk_route(&ns, "test-accepted-route", Some(srv_ref))).await; + // Wait until route is updated with a status + let statuses = await_route_status(&client, &ns, "test-accepted-route") + .await + .parents; -// let route_status = statuses -// .clone() -// .into_iter() -// .find(|route_status| route_status.parent_ref.name == server_name) -// .expect("must have at least one parent status"); + let route_status = statuses + .clone() + .into_iter() + .find(|route_status| route_status.parent_ref.name == server_name) + .expect("must have at least one parent status"); -// // Check status references to parent we have created -// assert_eq!( -// route_status.parent_ref.group.as_deref(), -// Some("policy.linkerd.io") -// ); -// assert_eq!(route_status.parent_ref.kind.as_deref(), Some("Server")); + // Check status references to parent we have created + assert_eq!( + route_status.parent_ref.group.as_deref(), + Some("policy.linkerd.io") + ); + assert_eq!(route_status.parent_ref.kind.as_deref(), Some("Server")); -// // Check status is accepted with a status of 'True' -// let cond = find_route_condition(&statuses, server_name) -// .expect("must have at least one 'Accepted' condition for accepted server"); -// assert_eq!(cond.status, "True"); -// assert_eq!(cond.reason, "Accepted") -// }) -// .await; -// } + // Check status is accepted with a status of 'True' + let cond = find_route_condition(&statuses, server_name) + .expect("must have at least one 'Accepted' condition for accepted server"); + assert_eq!(cond.status, "True"); + assert_eq!(cond.reason, "Accepted") + }) + .await; +} -// #[tokio::test(flavor = "current_thread")] -// async fn inbound_multiple_parents() { -// with_temp_ns(|client, ns| async move { -// // Exercise accepted test with a valid, and an invalid parent reference -// let srv_refs = vec![ -// k8s::policy::httproute::ParentReference { -// group: Some("policy.linkerd.io".to_string()), -// kind: Some("Server".to_string()), -// namespace: Some(ns.clone()), -// name: "test-valid-server".to_string(), -// section_name: None, -// port: None, -// }, -// k8s::policy::httproute::ParentReference { -// group: Some("policy.linkerd.io".to_string()), -// kind: Some("Server".to_string()), -// namespace: Some(ns.clone()), -// name: "test-invalid-server".to_string(), -// section_name: None, -// port: None, -// }, -// ]; +#[tokio::test(flavor = "current_thread")] +async fn inbound_multiple_parents() { + with_temp_ns(|client, ns| async move { + // Exercise accepted test with a valid, and an invalid parent reference + let srv_refs = vec![ + k8s::policy::httproute::ParentReference { + group: Some("policy.linkerd.io".to_string()), + kind: Some("Server".to_string()), + namespace: Some(ns.clone()), + name: "test-valid-server".to_string(), + section_name: None, + port: None, + }, + k8s::policy::httproute::ParentReference { + group: Some("policy.linkerd.io".to_string()), + kind: Some("Server".to_string()), + namespace: Some(ns.clone()), + name: "test-invalid-server".to_string(), + section_name: None, + port: None, + }, + ]; -// // Create only one of the parents -// let server = k8s::policy::Server { -// metadata: k8s::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some("test-valid-server".to_string()), -// ..Default::default() -// }, -// spec: k8s::policy::ServerSpec { -// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( -// Some(("app", "test-valid-server")), -// )), -// port: k8s::policy::server::Port::Name("http".to_string()), -// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), -// access_policy: None, -// }, -// }; -// let _server = create(&client, server).await; + // Create only one of the parents + let server = k8s::policy::Server { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some("test-valid-server".to_string()), + ..Default::default() + }, + spec: k8s::policy::ServerSpec { + selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( + Some(("app", "test-valid-server")), + )), + port: k8s::policy::server::Port::Name("http".to_string()), + proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, + }, + }; + let _server = create(&client, server).await; -// // Create a route that references both parents. -// let _route = create( -// &client, -// mk_route(&ns, "test-multiple-parents-route", Some(srv_refs)), -// ) -// .await; -// // Wait until route is updated with a status -// let parent_status = await_route_status(&client, &ns, "test-multiple-parents-route") -// .await -// .parents; + // Create a route that references both parents. + let _route = create( + &client, + mk_route(&ns, "test-multiple-parents-route", Some(srv_refs)), + ) + .await; + // Wait until route is updated with a status + let parent_status = await_route_status(&client, &ns, "test-multiple-parents-route") + .await + .parents; -// // Find status for invalid parent and extract the condition -// let invalid_cond = find_route_condition(&parent_status, "test-invalid-server") -// .expect("must have at least one 'Accepted' condition set for invalid parent"); -// // Route shouldn't be accepted -// assert_eq!(invalid_cond.status, "False"); -// assert_eq!(invalid_cond.reason, "NoMatchingParent"); + // Find status for invalid parent and extract the condition + let invalid_cond = find_route_condition(&parent_status, "test-invalid-server") + .expect("must have at least one 'Accepted' condition set for invalid parent"); + // Route shouldn't be accepted + assert_eq!(invalid_cond.status, "False"); + assert_eq!(invalid_cond.reason, "NoMatchingParent"); -// // Find status for valid parent and extract the condition -// let valid_cond = find_route_condition(&parent_status, "test-valid-server") -// .expect("must have at least one 'Accepted' condition set for valid parent"); -// assert_eq!(valid_cond.status, "True"); -// assert_eq!(valid_cond.reason, "Accepted") -// }) -// .await -// } + // Find status for valid parent and extract the condition + let valid_cond = find_route_condition(&parent_status, "test-valid-server") + .expect("must have at least one 'Accepted' condition set for valid parent"); + assert_eq!(valid_cond.status, "True"); + assert_eq!(valid_cond.reason, "Accepted") + }) + .await +} -// #[tokio::test(flavor = "current_thread")] -// async fn inbound_no_parent_ref_patch() { -// with_temp_ns(|client, ns| async move { -// // Create a test 'Server' -// let server_name = "test-accepted-server"; -// let server = k8s::policy::Server { -// metadata: k8s::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(server_name.to_string()), -// ..Default::default() -// }, -// spec: k8s::policy::ServerSpec { -// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( -// Some(("app", server_name)), -// )), -// port: k8s::policy::server::Port::Name("http".to_string()), -// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), -// access_policy: None, -// }, -// }; -// let server = create(&client, server).await; -// let srv_ref = vec![k8s::policy::httproute::ParentReference { -// group: Some("policy.linkerd.io".to_string()), -// kind: Some("Server".to_string()), -// namespace: server.namespace(), -// name: server.name_unchecked(), -// section_name: None, -// port: None, -// }]; -// // Create a route with a parent reference. -// let route = create( -// &client, -// mk_route(&ns, "test-no-parent-refs-route", Some(srv_ref)), -// ) -// .await; +#[tokio::test(flavor = "current_thread")] +async fn inbound_no_parent_ref_patch() { + with_temp_ns(|client, ns| async move { + // Create a test 'Server' + let server_name = "test-accepted-server"; + let server = k8s::policy::Server { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(server_name.to_string()), + ..Default::default() + }, + spec: k8s::policy::ServerSpec { + selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( + Some(("app", server_name)), + )), + port: k8s::policy::server::Port::Name("http".to_string()), + proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, + }, + }; + let server = create(&client, server).await; + let srv_ref = vec![k8s::policy::httproute::ParentReference { + group: Some("policy.linkerd.io".to_string()), + kind: Some("Server".to_string()), + namespace: server.namespace(), + name: server.name_unchecked(), + section_name: None, + port: None, + }]; + // Create a route with a parent reference. + let route = create( + &client, + mk_route(&ns, "test-no-parent-refs-route", Some(srv_ref)), + ) + .await; -// // Status may not be set straight away. To account for that, wrap a -// // status condition watcher in a timeout. -// let status = await_route_status(&client, &ns, "test-no-parent-refs-route").await; -// // If timeout has elapsed, then route did not receive a status patch -// assert!( -// status.parents.len() == 1, -// "HTTPRoute Status should have 1 parent status" -// ); + // Status may not be set straight away. To account for that, wrap a + // status condition watcher in a timeout. + let status = await_route_status(&client, &ns, "test-no-parent-refs-route").await; + // If timeout has elapsed, then route did not receive a status patch + assert!( + status.parents.len() == 1, + "HTTPRoute Status should have 1 parent status" + ); -// // Update route to remove parent_refs -// let _route = update(&client, mk_route(&ns, "test-no-parent-refs-route", None)).await; + // Update route to remove parent_refs + let _route = update(&client, mk_route(&ns, "test-no-parent-refs-route", None)).await; -// // Wait for the status to be updated to contain no parent statuses. -// await_condition::( -// &client, -// &ns, -// &route.name_unchecked(), -// |obj: Option<&k8s::policy::HttpRoute>| -> bool { -// obj.and_then(|route| route.status.as_ref()) -// .is_some_and(|status| status.inner.parents.is_empty()) -// }, -// ) -// .await -// .expect("HTTPRoute Status should have no parent status"); -// }) -// .await -// } + // Wait for the status to be updated to contain no parent statuses. + await_condition::( + &client, + &ns, + &route.name_unchecked(), + |obj: Option<&k8s::policy::HttpRoute>| -> bool { + obj.and_then(|route| route.status.as_ref()) + .is_some_and(|status| status.inner.parents.is_empty()) + }, + ) + .await + .expect("HTTPRoute Status should have no parent status"); + }) + .await +} -// #[tokio::test(flavor = "current_thread")] -// // Tests that inbound routes (routes attached to a `Server`) are properly -// // reconciled when the parentReference changes. Additionally, tests that routes -// // whose parentRefs do not exist are patched with an appropriate status. -// async fn inbound_accepted_reconcile_no_parent() { -// with_temp_ns(|client, ns| async move { -// // Given a route with a nonexistent parentReference, we expect to have an -// // 'Accepted' condition with 'False' as a status. -// let server_name = "test-reconcile-inbound-server"; -// let srv_ref = vec![k8s::policy::httproute::ParentReference { -// group: Some("policy.linkerd.io".to_string()), -// kind: Some("Server".to_string()), -// namespace: Some(ns.clone()), -// name: server_name.to_string(), -// section_name: None, -// port: None, -// }]; -// let _route = create( -// &client, -// mk_route(&ns, "test-reconcile-inbound-route", Some(srv_ref)), -// ) -// .await; -// let route_status = await_route_status(&client, &ns, "test-reconcile-inbound-route").await; -// let cond = find_route_condition(&route_status.parents, server_name) -// .expect("must have at least one 'Accepted' condition set for parent"); -// // Test when parent ref does not exist we get Accepted { False }. -// assert_eq!(cond.status, "False"); -// assert_eq!(cond.reason, "NoMatchingParent"); +#[tokio::test(flavor = "current_thread")] +// Tests that inbound routes (routes attached to a `Server`) are properly +// reconciled when the parentReference changes. Additionally, tests that routes +// whose parentRefs do not exist are patched with an appropriate status. +async fn inbound_accepted_reconcile_no_parent() { + with_temp_ns(|client, ns| async move { + // Given a route with a nonexistent parentReference, we expect to have an + // 'Accepted' condition with 'False' as a status. + let server_name = "test-reconcile-inbound-server"; + let srv_ref = vec![k8s::policy::httproute::ParentReference { + group: Some("policy.linkerd.io".to_string()), + kind: Some("Server".to_string()), + namespace: Some(ns.clone()), + name: server_name.to_string(), + section_name: None, + port: None, + }]; + let _route = create( + &client, + mk_route(&ns, "test-reconcile-inbound-route", Some(srv_ref)), + ) + .await; + let route_status = await_route_status(&client, &ns, "test-reconcile-inbound-route").await; + let cond = find_route_condition(&route_status.parents, server_name) + .expect("must have at least one 'Accepted' condition set for parent"); + // Test when parent ref does not exist we get Accepted { False }. + assert_eq!(cond.status, "False"); + assert_eq!(cond.reason, "NoMatchingParent"); -// // Create the 'Server' that route references and expect it to be picked up -// // by the index. Consequently, route will have its status reconciled. -// let server = k8s::policy::Server { -// metadata: k8s::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(server_name.to_string()), -// ..Default::default() -// }, -// spec: k8s::policy::ServerSpec { -// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( -// Some(("app", server_name)), -// )), -// port: k8s::policy::server::Port::Name("http".to_string()), -// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), -// access_policy: None, -// }, -// }; -// create(&client, server).await; + // Create the 'Server' that route references and expect it to be picked up + // by the index. Consequently, route will have its status reconciled. + let server = k8s::policy::Server { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(server_name.to_string()), + ..Default::default() + }, + spec: k8s::policy::ServerSpec { + selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( + Some(("app", server_name)), + )), + port: k8s::policy::server::Port::Name("http".to_string()), + proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, + }, + }; + create(&client, server).await; -// // HTTPRoute may not be patched instantly, await the route condition -// // status becoming accepted. -// let _route_status = await_condition( -// &client, -// &ns, -// "test-reconcile-inbound-route", -// |obj: Option<&k8s::policy::httproute::HttpRoute>| -> bool { -// tracing::trace!(?obj, "got route status"); -// let status = match obj.and_then(|route| route.status.as_ref()) { -// Some(status) => status, -// None => return false, -// }; -// let cond = match find_route_condition(&status.inner.parents, server_name) { -// Some(cond) => cond, -// None => return false, -// }; -// cond.status == "True" && cond.reason == "Accepted" -// }, -// ) -// .await -// .expect("must fetch route") -// .status -// .expect("route must contain a status representation"); -// }) -// .await; -// } + // HTTPRoute may not be patched instantly, await the route condition + // status becoming accepted. + let _route_status = await_condition( + &client, + &ns, + "test-reconcile-inbound-route", + |obj: Option<&k8s::policy::httproute::HttpRoute>| -> bool { + tracing::trace!(?obj, "got route status"); + let status = match obj.and_then(|route| route.status.as_ref()) { + Some(status) => status, + None => return false, + }; + let cond = match find_route_condition(&status.inner.parents, server_name) { + Some(cond) => cond, + None => return false, + }; + cond.status == "True" && cond.reason == "Accepted" + }, + ) + .await + .expect("must fetch route") + .status + .expect("route must contain a status representation"); + }) + .await; +} -// #[tokio::test(flavor = "current_thread")] -// async fn inbound_accepted_reconcile_parent_delete() { -// with_temp_ns(|client, ns| async move { -// // Attach a route to a Server and expect the route to be patched with an -// // Accepted status. -// let server_name = "test-reconcile-delete-server"; -// let server = k8s::policy::Server { -// metadata: k8s::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(server_name.to_string()), -// ..Default::default() -// }, -// spec: k8s::policy::ServerSpec { -// selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( -// Some(("app", server_name)), -// )), -// port: k8s::policy::server::Port::Name("http".to_string()), -// proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), -// access_policy: None, -// }, -// }; -// create(&client, server).await; +#[tokio::test(flavor = "current_thread")] +async fn inbound_accepted_reconcile_parent_delete() { + with_temp_ns(|client, ns| async move { + // Attach a route to a Server and expect the route to be patched with an + // Accepted status. + let server_name = "test-reconcile-delete-server"; + let server = k8s::policy::Server { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(server_name.to_string()), + ..Default::default() + }, + spec: k8s::policy::ServerSpec { + selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::from_iter( + Some(("app", server_name)), + )), + port: k8s::policy::server::Port::Name("http".to_string()), + proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, + }, + }; + create(&client, server).await; -// // Create parentReference and route -// let srv_ref = vec![k8s::policy::httproute::ParentReference { -// group: Some("policy.linkerd.io".to_string()), -// kind: Some("Server".to_string()), -// namespace: Some(ns.clone()), -// name: server_name.to_string(), -// section_name: None, -// port: None, -// }]; -// let _route = create( -// &client, -// mk_route(&ns, "test-reconcile-delete-route", Some(srv_ref)), -// ) -// .await; -// let route_status = await_route_status(&client, &ns, "test-reconcile-delete-route").await; -// let cond = find_route_condition(&route_status.parents, server_name) -// .expect("must have at least one 'Accepted' condition"); -// assert_eq!(cond.status, "True"); -// assert_eq!(cond.reason, "Accepted"); + // Create parentReference and route + let srv_ref = vec![k8s::policy::httproute::ParentReference { + group: Some("policy.linkerd.io".to_string()), + kind: Some("Server".to_string()), + namespace: Some(ns.clone()), + name: server_name.to_string(), + section_name: None, + port: None, + }]; + let _route = create( + &client, + mk_route(&ns, "test-reconcile-delete-route", Some(srv_ref)), + ) + .await; + let route_status = await_route_status(&client, &ns, "test-reconcile-delete-route").await; + let cond = find_route_condition(&route_status.parents, server_name) + .expect("must have at least one 'Accepted' condition"); + assert_eq!(cond.status, "True"); + assert_eq!(cond.reason, "Accepted"); -// // Delete Server -// let api: kube::Api = kube::Api::namespaced(client.clone(), &ns); -// api.delete( -// "test-reconcile-delete-server", -// &kube::api::DeleteParams::default(), -// ) -// .await -// .expect("API delete request failed"); + // Delete Server + let api: kube::Api = kube::Api::namespaced(client.clone(), &ns); + api.delete( + "test-reconcile-delete-server", + &kube::api::DeleteParams::default(), + ) + .await + .expect("API delete request failed"); -// // HTTPRoute may not be patched instantly, await the route condition -// // becoming NoMatchingParent. -// let _route_status = await_condition( -// &client, -// &ns, -// "test-reconcile-delete-route", -// |obj: Option<&k8s::policy::httproute::HttpRoute>| -> bool { -// tracing::trace!(?obj, "got route status"); -// let status = match obj.and_then(|route| route.status.as_ref()) { -// Some(status) => status, -// None => return false, -// }; -// let cond = match find_route_condition(&status.inner.parents, server_name) { -// Some(cond) => cond, -// None => return false, -// }; -// cond.status == "False" && cond.reason == "NoMatchingParent" -// }, -// ) -// .await -// .expect("must fetch route") -// .status -// .expect("route must contain a status representation"); -// }) -// .await; -// } + // HTTPRoute may not be patched instantly, await the route condition + // becoming NoMatchingParent. + let _route_status = await_condition( + &client, + &ns, + "test-reconcile-delete-route", + |obj: Option<&k8s::policy::httproute::HttpRoute>| -> bool { + tracing::trace!(?obj, "got route status"); + let status = match obj.and_then(|route| route.status.as_ref()) { + Some(status) => status, + None => return false, + }; + let cond = match find_route_condition(&status.inner.parents, server_name) { + Some(cond) => cond, + None => return false, + }; + cond.status == "False" && cond.reason == "NoMatchingParent" + }, + ) + .await + .expect("must fetch route") + .status + .expect("route must contain a status representation"); + }) + .await; +} + +// Waits until an HttpRoute with the given namespace and name has a status set +// on it, then returns the generic route status representation. +async fn await_route_status( + client: &kube::Client, + ns: &str, + name: &str, +) -> k8s::policy::httproute::RouteStatus { + use k8s::policy::httproute as api; + let route_status = await_condition(client, ns, name, |obj: Option<&api::HttpRoute>| -> bool { + obj.and_then(|route| route.status.as_ref()).is_some() + }) + .await + .expect("must fetch route") + .status + .expect("route must contain a status representation") + .inner; + tracing::trace!(?route_status, name, ns, "got route status"); + route_status +} diff --git a/policy-test/tests/outbound_api.rs b/policy-test/tests/outbound_api.rs index e0a62a6c84308..1e5140420c745 100644 --- a/policy-test/tests/outbound_api.rs +++ b/policy-test/tests/outbound_api.rs @@ -1,17 +1,15 @@ -use std::time::Duration; - -use futures::StreamExt; +use futures::{FutureExt, StreamExt}; use k8s_gateway_api::{self as gateway}; use linkerd_policy_controller_k8s_api::{self as k8s, policy}; use linkerd_policy_test::{ - assert_default_accrual_backoff, assert_resource_meta, await_route_accepted, create, - create_cluster_scoped, delete_cluster_scoped, grpc, + assert_resource_meta, await_route_accepted, create, create_cluster_scoped, + delete_cluster_scoped, grpc, outbound_api::{ assert_backend_matches_reference, assert_route_is_default, assert_singleton, - detect_failure_accrual, failure_accrual_consecutive, retry_watch_outbound_policy, + retry_watch_outbound_policy, }, test_route::{TestParent, TestRoute}, - with_temp_ns, + update, with_temp_ns, }; use maplit::{btreemap, convert_args}; @@ -612,3 +610,496 @@ async fn opaque_service() { test::().await; test::().await; } + +#[tokio::test(flavor = "current_thread")] +async fn route_with_no_port() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let port_a = 4191; + let port_b = 9999; + + let mut rx_a = retry_watch_outbound_policy(&client, &ns, parent.ip(), port_a).await; + let config_a = rx_a + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config_a); + + let mut rx_b = retry_watch_outbound_policy(&client, &ns, parent.ip(), port_b).await; + let config_b = rx_b + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config_b); + + // There should be a default route. + gateway::HttpRoute::routes(&config_a, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port_a); + }); + gateway::HttpRoute::routes(&config_b, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port_b); + }); + + // Create a route with no port in the parent_ref. + let mut parent_ref = parent.obj_ref(); + parent_ref.port = None; + let route = create( + &client, + R::make_route( + ns.clone(), + vec![parent_ref], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let config_a = rx_a + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config_a); + assert_resource_meta(&config_a.metadata, parent.obj_ref(), port_a); + + let config_b = rx_b + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config_b); + assert_resource_meta(&config_b.metadata, parent.obj_ref(), port_b); + + // The route should apply to both ports. + R::routes(&config_a, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + R::routes(&config_b, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn producer_route() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut producer_rx = + retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + assert_resource_meta(&producer_config.metadata, parent.obj_ref(), port); + + let mut consumer_rx = + retry_watch_outbound_policy(&client, "consumer_ns", parent.ip(), port).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&producer_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + gateway::HttpRoute::routes(&consumer_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // A route created in the same namespace as its parent service is called + // a producer route. It should be returned in outbound policy requests + // for that service from ALL namespaces. + let route = create( + &client, + R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?producer_config); + assert_resource_meta(&producer_config.metadata, parent.obj_ref(), port); + + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + // The route should be returned in queries from the producer namespace. + R::routes(&producer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + // The route should be returned in queries from a consumer namespace. + R::routes(&consumer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn pre_existing_producer_route() { + async fn test() { + // We test the scenario where outbound policy watches are initiated after + // a produce route already exists. + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + // A route created in the same namespace as its parent service is called + // a producer route. It should be returned in outbound policy requests + // for that service from ALL namespaces. + let route = create( + &client, + R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let mut producer_rx = + retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + assert_resource_meta(&producer_config.metadata, parent.obj_ref(), port); + + let mut consumer_rx = + retry_watch_outbound_policy(&client, "consumer_ns", parent.ip(), port).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + // The route should be returned in queries from the producer namespace. + R::routes(&producer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + // The route should be returned in queries from a consumer namespace. + R::routes(&consumer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consumer_route() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let consumer_ns_name = format!("{}-consumer", ns); + let consumer_ns = create_cluster_scoped( + &client, + k8s::Namespace { + metadata: k8s::ObjectMeta { + name: Some(consumer_ns_name.clone()), + labels: Some(convert_args!(btreemap!( + "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), + ))), + ..Default::default() + }, + ..Default::default() + }, + ) + .await; + + let mut producer_rx = + retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + assert_resource_meta(&producer_config.metadata, parent.obj_ref(), port); + + let mut consumer_rx = + retry_watch_outbound_policy(&client, &consumer_ns_name, parent.ip(), port).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + let mut other_rx = + retry_watch_outbound_policy(&client, "other_ns", parent.ip(), port).await; + let other_config = other_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?other_config); + assert_resource_meta(&other_config.metadata, parent.obj_ref(), port); + + // There should be a default route. + gateway::HttpRoute::routes(&producer_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + gateway::HttpRoute::routes(&consumer_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + gateway::HttpRoute::routes(&other_config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // A route created in a different namespace as its parent service is + // called a consumer route. It should be returned in outbound policy + // requests for that service ONLY when the request comes from the + // consumer namespace. + let route = create( + &client, + R::make_route( + consumer_ns_name.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + // The route should NOT be returned in queries from the producer namespace. + // There should be a default route. + assert!(producer_rx.next().now_or_never().is_none()); + + // The route should be returned in queries from the same consumer + // namespace. + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + assert_resource_meta(&consumer_config.metadata, parent.obj_ref(), port); + + R::routes(&consumer_config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + + // The route should NOT be returned in queries from a different consumer + // namespace. + assert!(other_rx.next().now_or_never().is_none()); + + delete_cluster_scoped(&client, consumer_ns).await; + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn route_reattachment() { + async fn test() { + with_temp_ns(|client, ns| async move { + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut route = create( + &client, + R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ), + ) + .await; + await_route_accepted(&client, &route).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // The route should be attached. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + + // Detatch route. + route.parents_mut().first_mut().unwrap().name = "other".to_string(); + update(&client, route.clone()).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // The route should be unattached and the default route should be present. + gateway::HttpRoute::routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default::(route, &parent.obj_ref(), port); + }); + + // Reattach route. + route.parents_mut().first_mut().unwrap().name = parent.meta().name.clone().unwrap(); + update(&client, route.clone()).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + // The route should be attached again. + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(R::extract_meta(outbound_route))); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; + test::().await; +} diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs deleted file mode 100644 index f085b15f4bd6a..0000000000000 --- a/policy-test/tests/outbound_api_gateway.rs +++ /dev/null @@ -1,1336 +0,0 @@ -// use futures::prelude::*; -// use kube::ResourceExt; -// use linkerd_policy_controller_k8s_api as k8s; -// use linkerd_policy_test::{ -// assert_default_accrual_backoff, assert_resource_meta, assert_status_accepted, -// await_egress_net_status, await_gateway_route_status, create, create_annotated_egress_network, -// create_annotated_service, create_cluster_scoped, create_egress_network, -// create_opaque_egress_network, create_opaque_service, create_service, delete_cluster_scoped, -// grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -// }; -// use maplit::{btreemap, convert_args}; -// use std::{collections::BTreeMap, time::Duration}; - -// // These tests are copies of the tests in outbound_api_gateway.rs but using the -// // policy.linkerd.io HttpRoute kubernetes types instead of the Gateway API ones. -// // These two files should be kept in sync to ensure that Linkerd can read and -// // function correctly with both types of resources. - -// #[tokio::test(flavor = "current_thread")] -// async fn http_route_with_no_port() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let config_4191 = rx_4191 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config_4191); - -// let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; -// let config_9999 = rx_9999 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config_9999); - -// // There should be a default route. -// detect_http_routes(&config_4191, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); -// detect_http_routes(&config_9999, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 9999); -// }); - -// let _route = create(&client, mk_http_route(&ns, "foo-route", &svc, None).build()).await; -// await_gateway_route_status(&client, &ns, "foo-route").await; - -// let config_4191 = rx_4191 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config_4191); - -// // The route should apply to the service. -// detect_http_routes(&config_4191, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); - -// let config_9999 = rx_9999 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config_9999); - -// // The route should apply to other ports too. -// detect_http_routes(&config_9999, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn producer_route() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// // There should be a default route. -// detect_http_routes(&producer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); - -// // A route created in the same namespace as its parent service is called -// // a producer route. It should be returned in outbound policy requests -// // for that service from ALL namespaces. -// let _route = create( -// &client, -// mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), -// ) -// .await; -// await_gateway_route_status(&client, &ns, "foo-route").await; - -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?producer_config); -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// // The route should be returned in queries from the producer namespace. -// detect_http_routes(&producer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); - -// // The route should be returned in queries from a consumer namespace. -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn pre_existing_producer_route() { -// // We test the scenario where outbound policy watches are initiated after -// // a produce route already exists. -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// // A route created in the same namespace as its parent service is called -// // a producer route. It should be returned in outbound policy requests -// // for that service from ALL namespaces. -// let _route = create( -// &client, -// mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), -// ) -// .await; -// await_gateway_route_status(&client, &ns, "foo-route").await; - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// // The route should be returned in queries from the producer namespace. -// detect_http_routes(&producer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); - -// // The route should be returned in queries from a consumer namespace. -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn consumer_route() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let consumer_ns_name = format!("{}-consumer", ns); -// let consumer_ns = create_cluster_scoped( -// &client, -// k8s::Namespace { -// metadata: k8s::ObjectMeta { -// name: Some(consumer_ns_name.clone()), -// labels: Some(convert_args!(btreemap!( -// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), -// ))), -// ..Default::default() -// }, -// ..Default::default() -// }, -// ) -// .await; - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = -// retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; -// let other_config = other_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?other_config); - -// // There should be a default route. -// detect_http_routes(&producer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); -// detect_http_routes(&other_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); - -// // A route created in a different namespace as its parent service is -// // called a consumer route. It should be returned in outbound policy -// // requests for that service ONLY when the request comes from the -// // consumer namespace. -// let _route = create( -// &client, -// mk_http_route(&consumer_ns_name, "foo-route", &svc, Some(4191)).build(), -// ) -// .await; -// await_gateway_route_status(&client, &consumer_ns_name, "foo-route").await; - -// // The route should NOT be returned in queries from the producer namespace. -// // There should be a default route. -// assert!(producer_rx.next().now_or_never().is_none()); - -// // The route should be returned in queries from the same consumer -// // namespace. -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); - -// // The route should NOT be returned in queries from a different consumer -// // namespace. -// assert!(other_rx.next().now_or_never().is_none()); - -// delete_cluster_scoped(&client, consumer_ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn http_route_retries_and_timeouts_service() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn http_route_retries_and_timeouts_egress_net() { -// with_temp_ns(|client, ns| async move { -// // Create an egress network -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_retries_and_timeouts() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let mut svc = mk_service(&ns, "my-svc", 4191); -// svc.annotations_mut() -// .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); -// svc.annotations_mut() -// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); -// let svc = Resource::Service(create(&client, svc).await); - -// retries_and_timeouts(svc, &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_retries_and_timeouts() { -// with_temp_ns(|client, ns| async move { -// // Create a egress net -// let mut egress = mk_egress_net(&ns, "my-egress"); -// egress -// .annotations_mut() -// .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); -// egress -// .annotations_mut() -// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); -// let egress = Resource::EgressNetwork(create(&client, egress).await); -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// retries_and_timeouts(egress, &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_http_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// http_route_reattachment(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_http_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// /* Helpers */ -// struct HttpRouteBuilder(k8s_gateway_api::HttpRoute); - -// fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { -// use k8s_gateway_api as api; - -// HttpRouteBuilder(api::HttpRoute { -// metadata: kube::api::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(name.to_string()), -// ..Default::default() -// }, -// spec: api::HttpRouteSpec { -// inner: api::CommonRouteSpec { -// parent_refs: Some(vec![api::ParentReference { -// group: Some(parent.group()), -// kind: Some(parent.kind()), -// namespace: Some(parent.namespace()), -// name: parent.name(), -// section_name: None, -// port, -// }]), -// }, -// hostnames: None, -// rules: Some(vec![api::HttpRouteRule { -// matches: Some(vec![api::HttpRouteMatch { -// path: Some(api::HttpPathMatch::Exact { -// value: "/foo".to_string(), -// }), -// headers: None, -// query_params: None, -// method: Some("GET".to_string()), -// }]), -// filters: None, -// backend_refs: None, -// }]), -// }, -// status: None, -// }) -// } - -// impl HttpRouteBuilder { -// fn with_backends( -// self, -// backends: Option<&[Resource]>, -// backends_ns: Option, -// backend_filters: Option>, -// ) -> Self { -// let mut route = self.0; -// let backend_refs = backends.map(|backends| { -// backends -// .iter() -// .map(|backend| k8s_gateway_api::HttpBackendRef { -// backend_ref: Some(k8s_gateway_api::BackendRef { -// weight: None, -// inner: k8s_gateway_api::BackendObjectReference { -// name: backend.name(), -// port: Some(8888), -// group: Some(backend.group()), -// kind: Some(backend.kind()), -// namespace: backends_ns.clone(), -// }, -// }), -// filters: backend_filters.clone(), -// }) -// .collect() -// }); -// route.spec.rules.iter_mut().flatten().for_each(|rule| { -// rule.backend_refs = backend_refs.clone(); -// }); -// Self(route) -// } - -// fn with_filters(self, filters: Option>) -> Self { -// let mut route = self.0; -// route -// .spec -// .rules -// .iter_mut() -// .flatten() -// .for_each(|rule| rule.filters = filters.clone()); -// Self(route) -// } - -// fn with_annotations(self, annotations: BTreeMap) -> Self { -// let mut route = self.0; -// route.metadata.annotations = Some(annotations); -// Self(route) -// } - -// fn build(self) -> k8s_gateway_api::HttpRoute { -// self.0 -// } -// } - -// fn mk_empty_http_route( -// ns: &str, -// name: &str, -// parent: &Resource, -// port: u16, -// ) -> k8s_gateway_api::HttpRoute { -// use k8s_gateway_api as api; -// api::HttpRoute { -// metadata: kube::api::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(name.to_string()), -// ..Default::default() -// }, -// spec: api::HttpRouteSpec { -// inner: api::CommonRouteSpec { -// parent_refs: Some(vec![api::ParentReference { -// group: Some(parent.group()), -// kind: Some(parent.kind()), -// namespace: Some(parent.namespace()), -// name: parent.name(), -// section_name: None, -// port: Some(port), -// }]), -// }, -// hostnames: None, -// rules: Some(vec![]), -// }, -// status: None, -// } -// } - -// async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); -// } - -// async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; -// await_gateway_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a route with no rules. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// assert_eq!(route.rules.len(), 0); -// }); -// } - -// async fn parent_with_http_routes_without_backends( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let _route = create( -// client, -// mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), -// ) -// .await; -// await_gateway_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a route with the logical backend. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let backends = route_backends_first_available(route); -// let backend = assert_singleton(backends); -// assert_backend_matches_parent(backend, &parent, 4191); -// }); -// } - -// async fn parent_with_http_routes_with_backend( -// parent: Resource, -// rule_backend: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let backends = [rule_backend.clone()]; -// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( -// Some(&backends), -// None, -// None, -// ); -// let _route = create(client, route.build()).await; -// await_gateway_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a route with a backend with no filters. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let backends = route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); -// let filters = &backend.backend.as_ref().unwrap().filters; -// assert_eq!(filters.len(), 0); -// }); -// } - -// async fn parent_with_http_routes_with_invalid_backend( -// parent: Resource, -// backend: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let backends = [backend]; -// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( -// Some(&backends), -// None, -// None, -// ); -// let _route = create(client, route.build()).await; -// await_gateway_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a route with a backend. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let backends = route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_backend_has_failure_filter(backend); -// }); -// } - -// async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// // Routes should be returned in sorted order by creation timestamp then -// // name. To ensure that this test isn't timing dependant, routes should -// // be created in alphabetical order. -// let _a_route = create( -// client, -// mk_http_route(ns, "a-route", &parent, Some(4191)).build(), -// ) -// .await; -// await_gateway_route_status(client, ns, "a-route").await; - -// // First route update. -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let _b_route = create( -// client, -// mk_http_route(ns, "b-route", &parent, Some(4191)).build(), -// ) -// .await; -// await_gateway_route_status(client, ns, "b-route").await; - -// // Second route update. -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// detect_http_routes(&config, |routes| { -// let num_routes = match parent { -// Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default -// Resource::Service(_) => 2, // two routes for service -// }; -// assert_eq!(routes.len(), num_routes); -// assert_eq!(route_name(&routes[0]), "a-route"); -// assert_eq!(route_name(&routes[1]), "b-route"); -// }); -// } - -// async fn parent_with_consecutive_failure_accrual( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// detect_failure_accrual(&config, |accrual| { -// let consecutive = failure_accrual_consecutive(accrual); -// assert_eq!(8, consecutive.max_failures); -// assert_eq!( -// &grpc::outbound::ExponentialBackoff { -// min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), -// max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), -// jitter_ratio: 1.0_f32, -// }, -// consecutive -// .backoff -// .as_ref() -// .expect("backoff must be configured") -// ); -// }); -// } - -// async fn parent_with_consecutive_failure_accrual_defaults_no_config( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect default max_failures and default backoff -// detect_failure_accrual(&config, |accrual| { -// let consecutive = failure_accrual_consecutive(accrual); -// assert_eq!(7, consecutive.max_failures); -// assert_default_accrual_backoff!(consecutive -// .backoff -// .as_ref() -// .expect("backoff must be configured")); -// }); -// } - -// async fn parent_with_consecutive_failure_accrual_defaults_max_fails( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect default backoff and overridden max_failures -// detect_failure_accrual(&config, |accrual| { -// let consecutive = failure_accrual_consecutive(accrual); -// assert_eq!(8, consecutive.max_failures); -// assert_default_accrual_backoff!(consecutive -// .backoff -// .as_ref() -// .expect("backoff must be configured")); -// }); -// } - -// async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect defaults for everything except for the jitter ratio -// detect_failure_accrual(&config, |accrual| { -// let consecutive = failure_accrual_consecutive(accrual); -// assert_eq!(7, consecutive.max_failures); -// assert_eq!( -// &grpc::outbound::ExponentialBackoff { -// min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), -// max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), -// jitter_ratio: 1.0_f32, -// }, -// consecutive -// .backoff -// .as_ref() -// .expect("backoff must be configured") -// ); -// }); -// } - -// async fn parent_with_default_failure_accrual( -// parent_default_config: Resource, -// parent_max_failures: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect failure accrual config to be default (no failure accrual) -// detect_failure_accrual(&config, |accrual| { -// assert!( -// accrual.is_none(), -// "consecutive failure accrual should not be configured for service" -// ); -// }); - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect failure accrual config to be default (no failure accrual) -// detect_failure_accrual(&config, |accrual| { -// assert!( -// accrual.is_none(), -// "consecutive failure accrual should not be configured for service" -// ) -// }); -// } - -// async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Proxy protocol should be opaque. -// match config.protocol.unwrap().kind.unwrap() { -// grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} -// _ => panic!("proxy protocol must be Opaque"), -// }; -// } - -// async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let backends = [backend.clone()]; -// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) -// .with_backends(Some(&backends), None, None) -// .with_filters(Some(vec![ -// k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { -// request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { -// set: Some(vec![k8s_gateway_api::HttpHeader { -// name: "set".to_string(), -// value: "set-value".to_string(), -// }]), -// add: Some(vec![k8s_gateway_api::HttpHeader { -// name: "add".to_string(), -// value: "add-value".to_string(), -// }]), -// remove: Some(vec!["remove".to_string()]), -// }, -// }, -// k8s_gateway_api::HttpRouteFilter::RequestRedirect { -// request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { -// scheme: Some("http".to_string()), -// hostname: Some("host".to_string()), -// path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { -// replace_prefix_match: "/path".to_string(), -// }), -// port: Some(5555), -// status_code: Some(302), -// }, -// }, -// ])); -// let _route = create(client, route.build()).await; -// await_gateway_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// // There should be a route with filters. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// let filters = &rule.filters; -// assert_eq!( -// *filters, -// vec![ -// grpc::outbound::http_route::Filter { -// kind: Some( -// grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( -// grpc::http_route::RequestHeaderModifier { -// add: Some(grpc::http_types::Headers { -// headers: vec![grpc::http_types::headers::Header { -// name: "add".to_string(), -// value: "add-value".into(), -// }] -// }), -// set: Some(grpc::http_types::Headers { -// headers: vec![grpc::http_types::headers::Header { -// name: "set".to_string(), -// value: "set-value".into(), -// }] -// }), -// remove: vec!["remove".to_string()], -// } -// ) -// ) -// }, -// grpc::outbound::http_route::Filter { -// kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( -// grpc::http_route::RequestRedirect { -// scheme: Some(grpc::http_types::Scheme { -// r#type: Some(grpc::http_types::scheme::Type::Registered( -// grpc::http_types::scheme::Registered::Http.into(), -// )) -// }), -// host: "host".to_string(), -// path: Some(linkerd2_proxy_api::http_route::PathModifier { -// replace: Some( -// linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( -// "/path".to_string() -// ) -// ) -// }), -// port: 5555, -// status: 302, -// } -// )) -// } -// ] -// ); -// }); -// } - -// async fn backend_with_filters( -// parent: Resource, -// backend_for_parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let backends = [backend_for_parent.clone()]; -// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( -// Some(&backends), -// None, -// Some(vec![ -// k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { -// request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { -// set: Some(vec![k8s_gateway_api::HttpHeader { -// name: "set".to_string(), -// value: "set-value".to_string(), -// }]), -// add: Some(vec![k8s_gateway_api::HttpHeader { -// name: "add".to_string(), -// value: "add-value".to_string(), -// }]), -// remove: Some(vec!["remove".to_string()]), -// }, -// }, -// k8s_gateway_api::HttpRouteFilter::RequestRedirect { -// request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { -// scheme: Some("http".to_string()), -// hostname: Some("host".to_string()), -// path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { -// replace_prefix_match: "/path".to_string(), -// }), -// port: Some(5555), -// status_code: Some(302), -// }, -// }, -// ]), -// ); -// let _route = create(client, route.build()).await; -// await_gateway_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// // There should be a route without rule filters. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// assert_eq!(rule.filters.len(), 0); -// let backends = route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); -// let filters = &backend.backend.as_ref().unwrap().filters; -// assert_eq!( -// *filters, -// vec![ -// grpc::outbound::http_route::Filter { -// kind: Some( -// grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( -// grpc::http_route::RequestHeaderModifier { -// add: Some(grpc::http_types::Headers { -// headers: vec![grpc::http_types::headers::Header { -// name: "add".to_string(), -// value: "add-value".into(), -// }] -// }), -// set: Some(grpc::http_types::Headers { -// headers: vec![grpc::http_types::headers::Header { -// name: "set".to_string(), -// value: "set-value".into(), -// }] -// }), -// remove: vec!["remove".to_string()], -// } -// ) -// ) -// }, -// grpc::outbound::http_route::Filter { -// kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( -// grpc::http_route::RequestRedirect { -// scheme: Some(grpc::http_types::Scheme { -// r#type: Some(grpc::http_types::scheme::Type::Registered( -// grpc::http_types::scheme::Registered::Http.into(), -// )) -// }), -// host: "host".to_string(), -// path: Some(linkerd2_proxy_api::http_route::PathModifier { -// replace: Some( -// linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( -// "/path".to_string() -// ) -// ) -// }), -// port: 5555, -// status: 302, -// } -// )) -// } -// ] -// ); -// }); -// } - -// async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { -// let _route = create( -// client, -// mk_http_route(ns, "foo-route", &parent, Some(4191)) -// .with_annotations( -// vec![ -// ("retry.linkerd.io/http".to_string(), "5xx".to_string()), -// ("timeout.linkerd.io/response".to_string(), "10s".to_string()), -// ] -// .into_iter() -// .collect(), -// ) -// .build(), -// ) -// .await; - -// await_gateway_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// let conditions = rule -// .retry -// .as_ref() -// .expect("retry config expected") -// .conditions -// .as_ref() -// .expect("retry conditions expected"); -// let status_range = assert_singleton(&conditions.status_ranges); -// assert_eq!(status_range.start, 500); -// assert_eq!(status_range.end, 599); -// let timeout = rule -// .timeouts -// .as_ref() -// .expect("timeouts expected") -// .response -// .as_ref() -// .expect("response timeout expected"); -// assert_eq!(timeout.seconds, 10); -// }); -// } - -// async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { -// let _route = create( -// client, -// mk_http_route(ns, "foo-route", &parent, Some(4191)) -// .with_annotations( -// vec![ -// // Route annotations override the timeout config specified -// // on the service. -// ("timeout.linkerd.io/request".to_string(), "5s".to_string()), -// ] -// .into_iter() -// .collect(), -// ) -// .build(), -// ) -// .await; -// await_gateway_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// let conditions = rule -// .retry -// .as_ref() -// .expect("retry config expected") -// .conditions -// .as_ref() -// .expect("retry conditions expected"); -// let status_range = assert_singleton(&conditions.status_ranges); -// // Retry config inherited from the service. -// assert_eq!(status_range.start, 500); -// assert_eq!(status_range.end, 599); -// let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); -// // Service timeout config overridden by route timeout config. -// assert_eq!(timeouts.response, None); -// let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); -// assert_eq!(request_timeout.seconds, 5); -// }); -// } - -// async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { -// let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; -// await_gateway_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be attached. -// detect_http_routes(&config, |routes| { -// let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); -// assert_route_name_eq(route, "foo-route"); -// }); - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = "other".to_string(); -// update(client, route.clone()).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be unattached and the default route should be present. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = parent.name(); -// update(client, route).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be attached again. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// assert_route_name_eq(route, "foo-route"); -// }); -// } diff --git a/policy-test/tests/outbound_api_grpc.rs b/policy-test/tests/outbound_api_grpc.rs index cd7a9ed97a1ac..a357189e9f61e 100644 --- a/policy-test/tests/outbound_api_grpc.rs +++ b/policy-test/tests/outbound_api_grpc.rs @@ -1,4 +1,5 @@ use futures::StreamExt; +use kube::Resource; use linkerd2_proxy_api::{self as api, outbound}; use linkerd_policy_controller_k8s_api::{self as k8s, gateway, policy}; use linkerd_policy_test::{ @@ -7,6 +8,7 @@ use linkerd_policy_test::{ test_route::{TestParent, TestRoute}, with_temp_ns, }; +use maplit::btreemap; #[tokio::test(flavor = "current_thread")] async fn grpc_route_with_filters_service() { @@ -215,75 +217,145 @@ async fn policy_grpc_route_with_backend_filters() { test::().await; } -// #[tokio::test(flavor = "current_thread")] -// async fn service_grpc_route_retries_and_timeouts() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); -// grpc_route_retries_and_timeouts(svc, &client, &ns).await; -// }) -// .await; -// } +#[tokio::test(flavor = "current_thread")] +async fn grpc_route_retries_and_timeouts() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let port = 4191; + let parent = create(&client, P::make_parent(&ns)).await; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_grpc_route_retries_and_timeouts() { -// with_temp_ns(|client, ns| async move { -// // Create a egress net -// let egress = -// Resource::EgressNetwork(create_egress_network(&client, &ns, "my-egress").await); -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); + let mut route = gateway::GrpcRoute::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route.meta_mut().annotations = Some(btreemap! { + "retry.linkerd.io/grpc".to_string() => "internal".to_string(), + "timeout.linkerd.io/response".to_string() => "10s".to_string(), + }); + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; -// grpc_route_retries_and_timeouts(egress, &client, &ns).await; -// }) -// .await; -// } + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); -// #[tokio::test(flavor = "current_thread")] -// async fn service_retries_and_timeouts() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let mut svc = mk_service(&ns, "my-svc", 4191); -// svc.annotations_mut() -// .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); -// svc.annotations_mut() -// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); -// let svc = Resource::Service(create(&client, svc).await); - -// parent_retries_and_timeouts(svc, &client, &ns).await; -// }) -// .await; -// } + assert_resource_meta(&config.metadata, parent.obj_ref(), port); -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_retries_and_timeouts() { -// with_temp_ns(|client, ns| async move { -// // Create a egress net -// let mut egress = mk_egress_net(&ns, "my-egress"); -// egress -// .annotations_mut() -// .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); -// egress -// .annotations_mut() -// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); -// let egress = Resource::EgressNetwork(create(&client, egress).await); -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); + gateway::GrpcRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::GrpcRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + assert!(conditions.internal); + let timeout = rule + .timeouts + .as_ref() + .expect("timeouts expected") + .response + .as_ref() + .expect("response timeout expected"); + assert_eq!(timeout.seconds, 10); + }); + }) + .await; + } -// parent_retries_and_timeouts(egress, &client, &ns).await; -// }) -// .await; -// } + test::().await; + test::().await; +} -// #[tokio::test(flavor = "current_thread")] -// async fn service_grpc_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// grpc_route_reattachment(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } +#[tokio::test(flavor = "current_thread")] +async fn parent_retries_and_timeouts() { + async fn test() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); + // Create a parent + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "retry.linkerd.io/grpc".to_string() => "internal".to_string(), + "timeout.linkerd.io/response".to_string() => "10s".to_string(), + }); + let parent = create(&client, parent).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut route = gateway::GrpcRoute::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route.meta_mut().annotations = Some(btreemap! { + // Route annotations override the retry config specified on the parent. + "timeout.linkerd.io/request".to_string() => "5s".to_string(), + }); + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + gateway::GrpcRoute::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(gateway::GrpcRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + + // Retry config inherited from the service. + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + assert!(conditions.internal); + + // Parent timeout config overridden by route timeout config. + let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); + assert_eq!(timeouts.response, None); + let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); + assert_eq!(request_timeout.seconds, 5); + }); + }) + .await; + } + + test::().await; + test::().await; +} // #[tokio::test(flavor = "current_thread")] // async fn egress_net_grpc_route_reattachment() { diff --git a/policy-test/tests/outbound_api_http.rs b/policy-test/tests/outbound_api_http.rs index 98a1c4afdcab5..de1970689a728 100644 --- a/policy-test/tests/outbound_api_http.rs +++ b/policy-test/tests/outbound_api_http.rs @@ -7,6 +7,7 @@ use linkerd_policy_test::{ test_route::{TestParent, TestRoute}, with_temp_ns, }; +use maplit::btreemap; #[tokio::test(flavor = "current_thread")] async fn gateway_http_route_with_filters_service() { @@ -573,3 +574,150 @@ async fn policy_http_route_with_backend_filters() { test::().await; test::().await; } + +#[tokio::test(flavor = "current_thread")] +async fn http_route_retries_and_timeouts() { + async fn test>() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let parent = create(&client, P::make_parent(&ns)).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut route = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route.meta_mut().annotations = Some(btreemap! { + "retry.linkerd.io/http".to_string() => "5xx".to_string(), + "timeout.linkerd.io/response".to_string() => "10s".to_string(), + }); + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(policy::HttpRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + let status_range = assert_singleton(&conditions.status_ranges); + assert_eq!(status_range.start, 500); + assert_eq!(status_range.end, 599); + let timeout = rule + .timeouts + .as_ref() + .expect("timeouts expected") + .response + .as_ref() + .expect("response timeout expected"); + assert_eq!(timeout.seconds, 10); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; +} + +#[tokio::test(flavor = "current_thread")] +async fn parent_retries_and_timeouts() { + async fn test>() { + with_temp_ns(|client, ns| async move { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); + // Create a parent + let mut parent = P::make_parent(&ns); + parent.meta_mut().annotations = Some(btreemap! { + "retry.linkerd.io/http".to_string() => "5xx".to_string(), + "timeout.linkerd.io/response".to_string() => "10s".to_string(), + }); + let parent = create(&client, parent).await; + let port = 4191; + // Create a backend + let backend_port = 8888; + let backend = match P::make_backend(&ns) { + Some(b) => create(&client, b).await, + None => parent.clone(), + }; + + let mut route = R::make_route( + ns.clone(), + vec![parent.obj_ref()], + vec![vec![backend.backend_ref(backend_port)]], + ); + route.meta_mut().annotations = Some(btreemap! { + // Route annotations override the retry config specified on the parent. + "timeout.linkerd.io/request".to_string() => "5s".to_string(), + }); + let route = create(&client, route).await; + await_route_accepted(&client, &route).await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, parent.ip(), port).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + assert_resource_meta(&config.metadata, parent.obj_ref(), port); + + R::routes(&config, |routes| { + let outbound_route = routes.first().expect("route must exist"); + assert!(route.meta_eq(policy::HttpRoute::extract_meta(outbound_route))); + let rule = assert_singleton(&outbound_route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + let status_range = assert_singleton(&conditions.status_ranges); + // Retry config inherited from the service. + assert_eq!(status_range.start, 500); + assert_eq!(status_range.end, 599); + let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); + // Parent timeout config overridden by route timeout config. + assert_eq!(timeouts.response, None); + let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); + assert_eq!(request_timeout.seconds, 5); + }); + }) + .await; + } + + test::().await; + test::().await; + test::().await; + test::().await; +} diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs deleted file mode 100644 index 350fca3198f78..0000000000000 --- a/policy-test/tests/outbound_api_linkerd.rs +++ /dev/null @@ -1,1333 +0,0 @@ -// use std::{collections::BTreeMap, time::Duration}; - -// use futures::prelude::*; -// use kube::ResourceExt; -// use linkerd_policy_controller_k8s_api as k8s; -// use linkerd_policy_test::{ -// assert_default_accrual_backoff, assert_resource_meta, assert_status_accepted, -// await_egress_net_status, await_route_status, create, create_annotated_egress_network, -// create_annotated_service, create_cluster_scoped, create_egress_network, -// create_opaque_egress_network, create_opaque_service, create_service, delete_cluster_scoped, -// grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -// }; -// use maplit::{btreemap, convert_args}; - -// #[tokio::test(flavor = "current_thread")] -// async fn http_route_with_no_port() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let config_4191 = rx_4191 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config_4191); - -// let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; -// let config_9999 = rx_9999 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config_9999); - -// // There should be a default route. -// detect_http_routes(&config_4191, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); -// detect_http_routes(&config_9999, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 9999); -// }); - -// let _route = create(&client, mk_http_route(&ns, "foo-route", &svc, None).build()).await; -// await_route_status(&client, &ns, "foo-route").await; - -// let config_4191 = rx_4191 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config_4191); - -// // The route should apply to the service. -// detect_http_routes(&config_4191, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); - -// let config_9999 = rx_9999 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config_9999); - -// // The route should apply to other ports too. -// detect_http_routes(&config_9999, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn producer_route() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// // There should be a default route. -// detect_http_routes(&producer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); - -// // A route created in the same namespace as its parent service is called -// // a producer route. It should be returned in outbound policy requests -// // for that service from ALL namespaces. -// let _route = create( -// &client, -// mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), -// ) -// .await; -// await_route_status(&client, &ns, "foo-route").await; - -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?producer_config); -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// // The route should be returned in queries from the producer namespace. -// detect_http_routes(&producer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); - -// // The route should be returned in queries from a consumer namespace. -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn pre_existing_producer_route() { -// // We test the scenario where outbound policy watches are initiated after -// // a produce route already exists. -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// // A route created in the same namespace as its parent service is called -// // a producer route. It should be returned in outbound policy requests -// // for that service from ALL namespaces. -// let _route = create( -// &client, -// mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), -// ) -// .await; -// await_route_status(&client, &ns, "foo-route").await; - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// // The route should be returned in queries from the producer namespace. -// detect_http_routes(&producer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); - -// // The route should be returned in queries from a consumer namespace. -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn consumer_route() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let consumer_ns_name = format!("{}-consumer", ns); -// let consumer_ns = create_cluster_scoped( -// &client, -// k8s::Namespace { -// metadata: k8s::ObjectMeta { -// name: Some(consumer_ns_name.clone()), -// labels: Some(convert_args!(btreemap!( -// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), -// ))), -// ..Default::default() -// }, -// ..Default::default() -// }, -// ) -// .await; - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = -// retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; -// let other_config = other_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?other_config); - -// // There should be a default route. -// detect_http_routes(&producer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); -// detect_http_routes(&other_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &svc, 4191); -// }); - -// // A route created in a different namespace as its parent service is -// // called a consumer route. It should be returned in outbound policy -// // requests for that service ONLY when the request comes from the -// // consumer namespace. -// let _route = create( -// &client, -// mk_http_route(&consumer_ns_name, "foo-route", &svc, Some(4191)).build(), -// ) -// .await; -// await_route_status(&client, &consumer_ns_name, "foo-route").await; - -// // The route should NOT be returned in queries from the producer namespace. -// // There should be a default route. -// assert!(producer_rx.next().now_or_never().is_none()); - -// // The route should be returned in queries from the same consumer -// // namespace. -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// detect_http_routes(&consumer_config, |routes| { -// let route = assert_singleton(routes); -// assert_route_name_eq(route, "foo-route"); -// }); - -// // The route should NOT be returned in queries from a different consumer -// // namespace. -// assert!(other_rx.next().now_or_never().is_none()); - -// delete_cluster_scoped(&client, consumer_ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn http_route_retries_and_timeouts_service() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn http_route_retries_and_timeouts_egress_net() { -// with_temp_ns(|client, ns| async move { -// // Create an egress network -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_retries_and_timeouts() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let mut svc = mk_service(&ns, "my-svc", 4191); -// svc.annotations_mut() -// .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); -// svc.annotations_mut() -// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); -// let svc = Resource::Service(create(&client, svc).await); - -// retries_and_timeouts(svc, &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_retries_and_timeouts() { -// with_temp_ns(|client, ns| async move { -// // Create a egress net -// let mut egress = mk_egress_net(&ns, "my-egress"); -// egress -// .annotations_mut() -// .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); -// egress -// .annotations_mut() -// .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); -// let egress = Resource::EgressNetwork(create(&client, egress).await); -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// retries_and_timeouts(egress, &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_http_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// http_route_reattachment(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_http_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// /* Helpers */ -// struct HttpRouteBuilder(k8s::policy::HttpRoute); - -// fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { -// use k8s::policy::httproute as api; - -// HttpRouteBuilder(api::HttpRoute { -// metadata: kube::api::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(name.to_string()), -// ..Default::default() -// }, -// spec: api::HttpRouteSpec { -// inner: api::CommonRouteSpec { -// parent_refs: Some(vec![api::ParentReference { -// group: Some(parent.group()), -// kind: Some(parent.kind()), -// namespace: Some(parent.namespace()), -// name: parent.name(), -// section_name: None, -// port, -// }]), -// }, -// hostnames: None, -// rules: Some(vec![api::HttpRouteRule { -// matches: Some(vec![api::HttpRouteMatch { -// path: Some(api::HttpPathMatch::Exact { -// value: "/foo".to_string(), -// }), -// headers: None, -// query_params: None, -// method: Some("GET".to_string()), -// }]), -// filters: None, -// backend_refs: None, -// timeouts: None, -// }]), -// }, -// status: None, -// }) -// } - -// impl HttpRouteBuilder { -// fn with_backends( -// self, -// backends: Option<&[Resource]>, -// backends_ns: Option, -// backend_filters: Option>, -// ) -> Self { -// let mut route = self.0; -// let backend_refs = backends.map(|backends| { -// backends -// .iter() -// .map(|backend| k8s::policy::httproute::HttpBackendRef { -// backend_ref: Some(k8s_gateway_api::BackendRef { -// weight: None, -// inner: k8s_gateway_api::BackendObjectReference { -// name: backend.name(), -// port: Some(8888), -// group: Some(backend.group()), -// kind: Some(backend.kind()), -// namespace: backends_ns.clone(), -// }, -// }), -// filters: backend_filters.clone(), -// }) -// .collect() -// }); -// route.spec.rules.iter_mut().flatten().for_each(|rule| { -// rule.backend_refs = backend_refs.clone(); -// }); -// Self(route) -// } - -// fn with_filters(self, filters: Option>) -> Self { -// let mut route = self.0; -// route -// .spec -// .rules -// .iter_mut() -// .flatten() -// .for_each(|rule| rule.filters = filters.clone()); -// Self(route) -// } - -// fn with_annotations(self, annotations: BTreeMap) -> Self { -// let mut route = self.0; -// route.metadata.annotations = Some(annotations); -// Self(route) -// } - -// fn build(self) -> k8s::policy::HttpRoute { -// self.0 -// } -// } - -// fn mk_empty_http_route( -// ns: &str, -// name: &str, -// parent: &Resource, -// port: u16, -// ) -> k8s::policy::HttpRoute { -// use k8s::policy::httproute as api; -// api::HttpRoute { -// metadata: kube::api::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(name.to_string()), -// ..Default::default() -// }, -// spec: api::HttpRouteSpec { -// inner: api::CommonRouteSpec { -// parent_refs: Some(vec![api::ParentReference { -// group: Some(parent.group()), -// kind: Some(parent.kind()), -// namespace: Some(parent.namespace()), -// name: parent.name(), -// section_name: None, -// port: Some(port), -// }]), -// }, -// hostnames: None, -// rules: Some(vec![]), -// }, -// status: None, -// } -// } - -// async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); -// } - -// async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; -// await_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a route with no rules. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// assert_eq!(route.rules.len(), 0); -// }); -// } - -// async fn parent_with_http_routes_without_backends( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let _route = create( -// client, -// mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), -// ) -// .await; -// await_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a route with the logical backend. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let backends = route_backends_first_available(route); -// let backend = assert_singleton(backends); -// assert_backend_matches_parent(backend, &parent, 4191); -// }); -// } - -// async fn parent_with_http_routes_with_backend( -// parent: Resource, -// rule_backend: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let backends = [rule_backend.clone()]; -// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( -// Some(&backends), -// None, -// None, -// ); -// let _route = create(client, route.build()).await; -// await_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a route with a backend with no filters. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let backends = route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); -// let filters = &backend.backend.as_ref().unwrap().filters; -// assert_eq!(filters.len(), 0); -// }); -// } - -// async fn parent_with_http_routes_with_invalid_backend( -// parent: Resource, -// backend: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let backends = [backend]; -// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( -// Some(&backends), -// None, -// None, -// ); -// let _route = create(client, route.build()).await; -// await_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a route with a backend. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let backends = route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_backend_has_failure_filter(backend); -// }); -// } - -// async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// // Routes should be returned in sorted order by creation timestamp then -// // name. To ensure that this test isn't timing dependant, routes should -// // be created in alphabetical order. -// let _a_route = create( -// client, -// mk_http_route(ns, "a-route", &parent, Some(4191)).build(), -// ) -// .await; -// await_route_status(client, ns, "a-route").await; - -// // First route update. -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let _b_route = create( -// client, -// mk_http_route(ns, "b-route", &parent, Some(4191)).build(), -// ) -// .await; -// await_route_status(client, ns, "b-route").await; - -// // Second route update. -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// detect_http_routes(&config, |routes| { -// let num_routes = match parent { -// Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default -// Resource::Service(_) => 2, // two routes for service -// }; -// assert_eq!(routes.len(), num_routes); -// assert_eq!(route_name(&routes[0]), "a-route"); -// assert_eq!(route_name(&routes[1]), "b-route"); -// }); -// } - -// async fn parent_with_consecutive_failure_accrual( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// detect_failure_accrual(&config, |accrual| { -// let consecutive = failure_accrual_consecutive(accrual); -// assert_eq!(8, consecutive.max_failures); -// assert_eq!( -// &grpc::outbound::ExponentialBackoff { -// min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), -// max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), -// jitter_ratio: 1.0_f32, -// }, -// consecutive -// .backoff -// .as_ref() -// .expect("backoff must be configured") -// ); -// }); -// } - -// async fn parent_with_consecutive_failure_accrual_defaults_no_config( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect default max_failures and default backoff -// detect_failure_accrual(&config, |accrual| { -// let consecutive = failure_accrual_consecutive(accrual); -// assert_eq!(7, consecutive.max_failures); -// assert_default_accrual_backoff!(consecutive -// .backoff -// .as_ref() -// .expect("backoff must be configured")); -// }); -// } - -// async fn parent_with_consecutive_failure_accrual_defaults_max_fails( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect default backoff and overridden max_failures -// detect_failure_accrual(&config, |accrual| { -// let consecutive = failure_accrual_consecutive(accrual); -// assert_eq!(8, consecutive.max_failures); -// assert_default_accrual_backoff!(consecutive -// .backoff -// .as_ref() -// .expect("backoff must be configured")); -// }); -// } - -// async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( -// parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect defaults for everything except for the jitter ratio -// detect_failure_accrual(&config, |accrual| { -// let consecutive = failure_accrual_consecutive(accrual); -// assert_eq!(7, consecutive.max_failures); -// assert_eq!( -// &grpc::outbound::ExponentialBackoff { -// min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), -// max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), -// jitter_ratio: 1.0_f32, -// }, -// consecutive -// .backoff -// .as_ref() -// .expect("backoff must be configured") -// ); -// }); -// } - -// async fn parent_with_default_failure_accrual( -// parent_default_config: Resource, -// parent_max_failures: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect failure accrual config to be default (no failure accrual) -// detect_failure_accrual(&config, |accrual| { -// assert!( -// accrual.is_none(), -// "consecutive failure accrual should not be configured for service" -// ); -// }); - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Expect failure accrual config to be default (no failure accrual) -// detect_failure_accrual(&config, |accrual| { -// assert!( -// accrual.is_none(), -// "consecutive failure accrual should not be configured for service" -// ) -// }); -// } - -// async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // Proxy protocol should be opaque. -// match config.protocol.unwrap().kind.unwrap() { -// grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} -// _ => panic!("proxy protocol must be Opaque"), -// }; -// } - -// async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let backends = [backend.clone()]; -// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) -// .with_backends(Some(&backends), None, None) -// .with_filters(Some(vec![ -// k8s::policy::httproute::HttpRouteFilter::RequestHeaderModifier { -// request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { -// set: Some(vec![k8s_gateway_api::HttpHeader { -// name: "set".to_string(), -// value: "set-value".to_string(), -// }]), -// add: Some(vec![k8s_gateway_api::HttpHeader { -// name: "add".to_string(), -// value: "add-value".to_string(), -// }]), -// remove: Some(vec!["remove".to_string()]), -// }, -// }, -// k8s::policy::httproute::HttpRouteFilter::RequestRedirect { -// request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { -// scheme: Some("http".to_string()), -// hostname: Some("host".to_string()), -// path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { -// replace_prefix_match: "/path".to_string(), -// }), -// port: Some(5555), -// status_code: Some(302), -// }, -// }, -// ])); -// let _route = create(client, route.build()).await; -// await_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// // There should be a route with filters. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// let filters = &rule.filters; -// assert_eq!( -// *filters, -// vec![ -// grpc::outbound::http_route::Filter { -// kind: Some( -// grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( -// grpc::http_route::RequestHeaderModifier { -// add: Some(grpc::http_types::Headers { -// headers: vec![grpc::http_types::headers::Header { -// name: "add".to_string(), -// value: "add-value".into(), -// }] -// }), -// set: Some(grpc::http_types::Headers { -// headers: vec![grpc::http_types::headers::Header { -// name: "set".to_string(), -// value: "set-value".into(), -// }] -// }), -// remove: vec!["remove".to_string()], -// } -// ) -// ) -// }, -// grpc::outbound::http_route::Filter { -// kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( -// grpc::http_route::RequestRedirect { -// scheme: Some(grpc::http_types::Scheme { -// r#type: Some(grpc::http_types::scheme::Type::Registered( -// grpc::http_types::scheme::Registered::Http.into(), -// )) -// }), -// host: "host".to_string(), -// path: Some(linkerd2_proxy_api::http_route::PathModifier { -// replace: Some( -// linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( -// "/path".to_string() -// ) -// ) -// }), -// port: 5555, -// status: 302, -// } -// )) -// } -// ] -// ); -// }); -// } - -// async fn backend_with_filters( -// parent: Resource, -// backend_for_parent: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// // There should be a default route. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// let backends = [backend_for_parent.clone()]; -// let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( -// Some(&backends), -// None, -// Some(vec![ -// k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { -// request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { -// set: Some(vec![k8s_gateway_api::HttpHeader { -// name: "set".to_string(), -// value: "set-value".to_string(), -// }]), -// add: Some(vec![k8s_gateway_api::HttpHeader { -// name: "add".to_string(), -// value: "add-value".to_string(), -// }]), -// remove: Some(vec!["remove".to_string()]), -// }, -// }, -// k8s_gateway_api::HttpRouteFilter::RequestRedirect { -// request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { -// scheme: Some("http".to_string()), -// hostname: Some("host".to_string()), -// path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { -// replace_prefix_match: "/path".to_string(), -// }), -// port: Some(5555), -// status_code: Some(302), -// }, -// }, -// ]), -// ); -// let _route = create(client, route.build()).await; -// await_route_status(client, ns, "foo-route").await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// // There should be a route without rule filters. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// assert_eq!(rule.filters.len(), 0); -// let backends = route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); -// let filters = &backend.backend.as_ref().unwrap().filters; -// assert_eq!( -// *filters, -// vec![ -// grpc::outbound::http_route::Filter { -// kind: Some( -// grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( -// grpc::http_route::RequestHeaderModifier { -// add: Some(grpc::http_types::Headers { -// headers: vec![grpc::http_types::headers::Header { -// name: "add".to_string(), -// value: "add-value".into(), -// }] -// }), -// set: Some(grpc::http_types::Headers { -// headers: vec![grpc::http_types::headers::Header { -// name: "set".to_string(), -// value: "set-value".into(), -// }] -// }), -// remove: vec!["remove".to_string()], -// } -// ) -// ) -// }, -// grpc::outbound::http_route::Filter { -// kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( -// grpc::http_route::RequestRedirect { -// scheme: Some(grpc::http_types::Scheme { -// r#type: Some(grpc::http_types::scheme::Type::Registered( -// grpc::http_types::scheme::Registered::Http.into(), -// )) -// }), -// host: "host".to_string(), -// path: Some(linkerd2_proxy_api::http_route::PathModifier { -// replace: Some( -// linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( -// "/path".to_string() -// ) -// ) -// }), -// port: 5555, -// status: 302, -// } -// )) -// } -// ] -// ); -// }); -// } - -// async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { -// let _route = create( -// client, -// mk_http_route(ns, "foo-route", &parent, Some(4191)) -// .with_annotations( -// vec![ -// ("retry.linkerd.io/http".to_string(), "5xx".to_string()), -// ("timeout.linkerd.io/response".to_string(), "10s".to_string()), -// ] -// .into_iter() -// .collect(), -// ) -// .build(), -// ) -// .await; - -// await_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// let conditions = rule -// .retry -// .as_ref() -// .expect("retry config expected") -// .conditions -// .as_ref() -// .expect("retry conditions expected"); -// let status_range = assert_singleton(&conditions.status_ranges); -// assert_eq!(status_range.start, 500); -// assert_eq!(status_range.end, 599); -// let timeout = rule -// .timeouts -// .as_ref() -// .expect("timeouts expected") -// .response -// .as_ref() -// .expect("response timeout expected"); -// assert_eq!(timeout.seconds, 10); -// }); -// } - -// async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { -// let _route = create( -// client, -// mk_http_route(ns, "foo-route", &parent, Some(4191)) -// .with_annotations( -// vec![ -// // Route annotations override the timeout config specified -// // on the service. -// ("timeout.linkerd.io/request".to_string(), "5s".to_string()), -// ] -// .into_iter() -// .collect(), -// ) -// .build(), -// ) -// .await; -// await_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// let conditions = rule -// .retry -// .as_ref() -// .expect("retry config expected") -// .conditions -// .as_ref() -// .expect("retry conditions expected"); -// let status_range = assert_singleton(&conditions.status_ranges); -// // Retry config inherited from the service. -// assert_eq!(status_range.start, 500); -// assert_eq!(status_range.end, 599); -// let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); -// // Service timeout config overridden by route timeout config. -// assert_eq!(timeouts.response, None); -// let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); -// assert_eq!(request_timeout.seconds, 5); -// }); -// } - -// async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { -// let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; -// await_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be attached. -// detect_http_routes(&config, |routes| { -// let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); -// assert_route_name_eq(route, "foo-route"); -// }); - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = "other".to_string(); -// update(client, route.clone()).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be unattached and the default route should be present. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = parent.name(); -// update(client, route).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be attached again. -// detect_http_routes(&config, |routes| { -// let route = assert_route_attached(routes, &parent); -// assert_route_name_eq(route, "foo-route"); -// }); -// } diff --git a/policy-test/tests/outbound_api_tcp.rs b/policy-test/tests/outbound_api_tcp.rs index 97ad040ea0270..90e19f77a725d 100644 --- a/policy-test/tests/outbound_api_tcp.rs +++ b/policy-test/tests/outbound_api_tcp.rs @@ -104,490 +104,3 @@ async fn multiple_tcp_routes() { test::().await; test::().await; } - -// #[tokio::test(flavor = "current_thread")] -// async fn tcp_route_with_no_port() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let _route = create( -// &client, -// mk_tcp_route(&ns, "foo-route", &svc, None) -// .with_backends(&[svc.clone()]) -// .build(), -// ) -// .await; -// await_tcp_route_status(&client, &ns, "foo-route").await; - -// let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - -// let config_4191 = rx_4191 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config_4191); - -// let routes = tcp_routes(&config_4191); -// let route = assert_singleton(routes); -// assert_tcp_route_name_eq(route, "foo-route"); - -// let config_9999 = rx_9999 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config_9999); - -// let routes = tcp_routes(&config_9999); -// let route = assert_singleton(routes); -// assert_tcp_route_name_eq(route, "foo-route"); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn producer_route() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// // A route created in the same namespace as its parent service is called -// // a producer route. It should be returned in outbound policy requests -// // for that service from ALL namespaces. -// let _route = create( -// &client, -// mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) -// .with_backends(&[svc.clone()]) -// .build(), -// ) -// .await; -// await_tcp_route_status(&client, &ns, "foo-route").await; - -// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?producer_config); -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// let routes = tcp_routes(&producer_config); -// let route = assert_singleton(routes); -// assert_tcp_route_name_eq(route, "foo-route"); - -// let routes = tcp_routes(&consumer_config); -// let route = assert_singleton(routes); -// assert_tcp_route_name_eq(route, "foo-route"); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn pre_existing_producer_route() { -// // We test the scenario where outbound policy watches are initiated after -// // a produce route already exists. -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// // A route created in the same namespace as its parent service is called -// // a producer route. It should be returned in outbound policy requests -// // for that service from ALL namespaces. -// let _route = create( -// &client, -// mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) -// .with_backends(&[svc.clone()]) -// .build(), -// ) -// .await; -// await_tcp_route_status(&client, &ns, "foo-route").await; - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// // The route should be returned in queries from the producer namespace. -// let routes = tcp_routes(&producer_config); -// let route = assert_singleton(routes); -// assert_tcp_route_name_eq(route, "foo-route"); - -// // The route should be returned in queries from a consumer namespace. -// let routes = tcp_routes(&consumer_config); -// let route = assert_singleton(routes); -// assert_tcp_route_name_eq(route, "foo-route"); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn consumer_route() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let consumer_ns_name = format!("{}-consumer", ns); -// let consumer_ns = create_cluster_scoped( -// &client, -// k8s::Namespace { -// metadata: k8s::ObjectMeta { -// name: Some(consumer_ns_name.clone()), -// labels: Some(convert_args!(btreemap!( -// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), -// ))), -// ..Default::default() -// }, -// ..Default::default() -// }, -// ) -// .await; - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = -// retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; -// let other_config = other_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?other_config); - -// // A route created in a different namespace as its parent service is -// // called a consumer route. It should be returned in outbound policy -// // requests for that service ONLY when the request comes from the -// // consumer namespace. -// let _route = create( -// &client, -// mk_tcp_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) -// .with_backends(&[svc]) -// .build(), -// ) -// .await; -// await_tcp_route_status(&client, &consumer_ns_name, "foo-route").await; - -// // The route should NOT be returned in queries from the producer namespace. -// // There should be a default route. -// assert!(producer_rx.next().now_or_never().is_none()); - -// // The route should be returned in queries from the same consumer -// // namespace. -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// let routes = tcp_routes(&consumer_config); -// let route = assert_singleton(routes); -// assert_tcp_route_name_eq(route, "foo-route"); - -// // The route should NOT be returned in queries from a different consumer -// // namespace. -// assert!(other_rx.next().now_or_never().is_none()); - -// delete_cluster_scoped(&client, consumer_ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_tcp_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// tcp_route_reattachment(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_tcp_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// tcp_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// /* Helpers */ -// struct TcpRouteBuilder(k8s_gateway_api::TcpRoute); - -// fn mk_tcp_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TcpRouteBuilder { -// use k8s_gateway_api as api; - -// TcpRouteBuilder(api::TcpRoute { -// metadata: kube::api::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(name.to_string()), -// ..Default::default() -// }, -// spec: api::TcpRouteSpec { -// inner: api::CommonRouteSpec { -// parent_refs: Some(vec![api::ParentReference { -// group: Some(parent.group()), -// kind: Some(parent.kind()), -// namespace: Some(parent.namespace()), -// name: parent.name(), -// section_name: None, -// port, -// }]), -// }, -// rules: vec![api::TcpRouteRule { -// backend_refs: Vec::default(), -// }], -// }, -// status: None, -// }) -// } - -// impl TcpRouteBuilder { -// fn with_backends(self, backends: &[Resource]) -> Self { -// let mut route = self.0; -// let backend_refs: Vec<_> = backends -// .iter() -// .map(|backend| k8s_gateway_api::BackendRef { -// weight: None, -// inner: k8s_gateway_api::BackendObjectReference { -// name: backend.name(), -// port: Some(8888), -// group: Some(backend.group()), -// kind: Some(backend.kind()), -// namespace: Some(backend.namespace()), -// }, -// }) -// .collect(); -// route.spec.rules.iter_mut().for_each(|rule| { -// rule.backend_refs = backend_refs.clone(); -// }); -// Self(route) -// } - -// fn build(self) -> k8s_gateway_api::TcpRoute { -// self.0 -// } -// } - -// async fn parent_with_tcp_routes_with_backend( -// parent: Resource, -// rule_backend: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let backends = [rule_backend.clone()]; -// let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); -// let _route = create(client, route.build()).await; -// await_tcp_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let routes = tcp_routes(&config); -// let route = assert_singleton(routes); -// let backends = tcp_route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); -// } - -// async fn parent_with_tcp_routes_with_invalid_backend( -// parent: Resource, -// backend: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let backends = [backend]; -// let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); -// let _route = create(client, route.build()).await; -// await_tcp_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let routes = tcp_routes(&config); -// let route = assert_singleton(routes); -// let backends = tcp_route_backends_random_available(route); -// assert_singleton(backends); -// } - -// async fn parent_with_multiple_tcp_routes(parent: Resource, client: &kube::Client, ns: &str) { -// // Routes should be returned in sorted order by creation timestamp then -// // name. To ensure that this test isn't timing dependant, routes should -// // be created in alphabetical order. -// let _a_route = create( -// client, -// mk_tcp_route(ns, "a-route", &parent, Some(4191)) -// .with_backends(&[parent.clone()]) -// .build(), -// ) -// .await; -// await_tcp_route_status(client, ns, "a-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - -// // First route update. -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let _b_route = create( -// client, -// mk_tcp_route(ns, "b-route", &parent, Some(4191)) -// .with_backends(&[parent.clone()]) -// .build(), -// ) -// .await; -// await_tcp_route_status(client, ns, "b-route").await; - -// // Second route update. -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let routes = tcp_routes(&config); -// assert_eq!(routes.len(), 1); -// assert_eq!(tcp_route_name(&routes[0]), "a-route"); -// } - -// async fn tcp_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { -// let mut route = create( -// client, -// mk_tcp_route(ns, "foo-route", &parent, Some(4191)) -// .with_backends(&[parent.clone()]) -// .build(), -// ) -// .await; -// await_tcp_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be attached. -// let routes = tcp_routes(&config); -// let tcp_route = assert_singleton(routes); -// assert_tcp_route_name_eq(tcp_route, "foo-route"); - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = "other".to_string(); -// update(client, route.clone()).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be unattached and the default route should be present. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = parent.name(); -// update(client, route).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be attached again. -// // The route should be attached. -// let routes = tcp_routes(&config); -// let tcp_route = assert_singleton(routes); -// assert_tcp_route_name_eq(tcp_route, "foo-route"); -// } diff --git a/policy-test/tests/outbound_api_tls.rs b/policy-test/tests/outbound_api_tls.rs deleted file mode 100644 index 4f69549f86d9e..0000000000000 --- a/policy-test/tests/outbound_api_tls.rs +++ /dev/null @@ -1,501 +0,0 @@ -// use futures::prelude::*; -// use linkerd_policy_controller_k8s_api as k8s; -// use linkerd_policy_test::{ -// assert_resource_meta, assert_status_accepted, await_egress_net_status, await_tls_route_status, -// create, create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, -// grpc, mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -// }; -// use maplit::{btreemap, convert_args}; - -// #[tokio::test(flavor = "current_thread")] -// async fn tls_route_with_no_port() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let _route = create( -// &client, -// mk_tls_route(&ns, "foo-route", &svc, None) -// .with_backends(&[svc.clone()]) -// .build(), -// ) -// .await; -// await_tls_route_status(&client, &ns, "foo-route").await; - -// let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; - -// let config_4191 = rx_4191 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config_4191); - -// let routes = tls_routes(&config_4191); -// let route = assert_singleton(routes); -// assert_tls_route_name_eq(route, "foo-route"); - -// let config_9999 = rx_9999 -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config_9999); - -// let routes = tls_routes(&config_9999); -// let route = assert_singleton(routes); -// assert_tls_route_name_eq(route, "foo-route"); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn producer_route() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// // A route created in the same namespace as its parent service is called -// // a producer route. It should be returned in outbound policy requests -// // for that service from ALL namespaces. -// let _route = create( -// &client, -// mk_tls_route(&ns, "foo-route", &svc, Some(4191)) -// .with_backends(&[svc.clone()]) -// .build(), -// ) -// .await; -// await_tls_route_status(&client, &ns, "foo-route").await; - -// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?producer_config); -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// let routes = tls_routes(&producer_config); -// let route = assert_singleton(routes); -// assert_tls_route_name_eq(route, "foo-route"); - -// let routes = tls_routes(&consumer_config); -// let route = assert_singleton(routes); -// assert_tls_route_name_eq(route, "foo-route"); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn pre_existing_producer_route() { -// // We test the scenario where outbound policy watches are initiated after -// // a produce route already exists. -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// // A route created in the same namespace as its parent service is called -// // a producer route. It should be returned in outbound policy requests -// // for that service from ALL namespaces. -// let _route = create( -// &client, -// mk_tls_route(&ns, "foo-route", &svc, Some(4191)) -// .with_backends(&[svc.clone()]) -// .build(), -// ) -// .await; -// await_tls_route_status(&client, &ns, "foo-route").await; - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// // The route should be returned in queries from the producer namespace. -// let routes = tls_routes(&producer_config); -// let route = assert_singleton(routes); -// assert_tls_route_name_eq(route, "foo-route"); - -// // The route should be returned in queries from a consumer namespace. -// let routes = tls_routes(&consumer_config); -// let route = assert_singleton(routes); -// assert_tls_route_name_eq(route, "foo-route"); -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn consumer_route() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); - -// let consumer_ns_name = format!("{}-consumer", ns); -// let consumer_ns = create_cluster_scoped( -// &client, -// k8s::Namespace { -// metadata: k8s::ObjectMeta { -// name: Some(consumer_ns_name.clone()), -// labels: Some(convert_args!(btreemap!( -// "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), -// ))), -// ..Default::default() -// }, -// ..Default::default() -// }, -// ) -// .await; - -// let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; -// let producer_config = producer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?producer_config); - -// let mut consumer_rx = -// retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; -// let other_config = other_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?other_config); - -// // A route created in a different namespace as its parent service is -// // called a consumer route. It should be returned in outbound policy -// // requests for that service ONLY when the request comes from the -// // consumer namespace. -// let _route = create( -// &client, -// mk_tls_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) -// .with_backends(&[svc]) -// .build(), -// ) -// .await; -// await_tls_route_status(&client, &consumer_ns_name, "foo-route").await; - -// // The route should NOT be returned in queries from the producer namespace. -// // There should be a default route. -// assert!(producer_rx.next().now_or_never().is_none()); - -// // The route should be returned in queries from the same consumer -// // namespace. -// let consumer_config = consumer_rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?consumer_config); - -// let routes = tls_routes(&consumer_config); -// let route = assert_singleton(routes); -// assert_tls_route_name_eq(route, "foo-route"); - -// // The route should NOT be returned in queries from a different consumer -// // namespace. -// assert!(other_rx.next().now_or_never().is_none()); - -// delete_cluster_scoped(&client, consumer_ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn service_tls_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a service -// let svc = create_service(&client, &ns, "my-svc", 4191).await; -// tls_route_reattachment(Resource::Service(svc), &client, &ns).await; -// }) -// .await; -// } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_tls_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a egress net -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// tls_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// /* Helpers */ -// struct TlsRouteBuilder(k8s_gateway_api::TlsRoute); - -// fn mk_tls_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TlsRouteBuilder { -// use k8s_gateway_api as api; - -// TlsRouteBuilder(api::TlsRoute { -// metadata: kube::api::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(name.to_string()), -// ..Default::default() -// }, -// spec: api::TlsRouteSpec { -// inner: api::CommonRouteSpec { -// parent_refs: Some(vec![api::ParentReference { -// group: Some(parent.group()), -// kind: Some(parent.kind()), -// namespace: Some(parent.namespace()), -// name: parent.name(), -// section_name: None, -// port, -// }]), -// }, -// hostnames: None, -// rules: vec![api::TlsRouteRule { -// backend_refs: Vec::default(), -// }], -// }, -// status: None, -// }) -// } - -// impl TlsRouteBuilder { -// fn with_backends(self, backends: &[Resource]) -> Self { -// let mut route = self.0; -// let backend_refs: Vec<_> = backends -// .iter() -// .map(|backend| k8s_gateway_api::BackendRef { -// weight: None, -// inner: k8s_gateway_api::BackendObjectReference { -// name: backend.name(), -// port: Some(8888), -// group: Some(backend.group()), -// kind: Some(backend.kind()), -// namespace: Some(backend.namespace()), -// }, -// }) -// .collect(); -// route.spec.rules.iter_mut().for_each(|rule| { -// rule.backend_refs = backend_refs.clone(); -// }); -// Self(route) -// } - -// fn build(self) -> k8s_gateway_api::TlsRoute { -// self.0 -// } -// } - -// async fn parent_with_tls_routes_with_backend( -// parent: Resource, -// rule_backend: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let backends = [rule_backend.clone()]; -// let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); -// let _route = create(client, route.build()).await; -// await_tls_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let routes = tls_routes(&config); -// let route = assert_route_attached(routes, &parent); -// let backends = tls_route_backends_random_available(route); -// let backend = assert_singleton(backends); -// assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); -// } - -// async fn parent_with_tls_routes_with_invalid_backend( -// parent: Resource, -// backend: Resource, -// client: &kube::Client, -// ns: &str, -// ) { -// let backends = [backend]; -// let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); -// let _route = create(client, route.build()).await; -// await_tls_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let routes = tls_routes(&config); -// let route = assert_route_attached(routes, &parent); -// let backends = tls_route_backends_random_available(route); -// assert_singleton(backends); -// } - -// async fn parent_with_multiple_tls_routes(parent: Resource, client: &kube::Client, ns: &str) { -// // Routes should be returned in sorted order by creation timestamp then -// // name. To ensure that this test isn't timing dependant, routes should -// // be created in alphabetical order. -// let _a_route = create( -// client, -// mk_tls_route(ns, "a-route", &parent, Some(4191)) -// .with_backends(&[parent.clone()]) -// .build(), -// ) -// .await; -// await_tls_route_status(client, ns, "a-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; - -// // First route update. -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let _b_route = create( -// client, -// mk_tls_route(ns, "b-route", &parent, Some(4191)) -// .with_backends(&[parent.clone()]) -// .build(), -// ) -// .await; -// await_tls_route_status(client, ns, "b-route").await; - -// // Second route update. -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// let routes = tls_routes(&config); -// let num_routes = match parent { -// Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default -// Resource::Service(_) => 2, // two routes for service -// }; -// assert_eq!(routes.len(), num_routes); -// assert_eq!(tls_route_name(&routes[0]), "a-route"); -// assert_eq!(tls_route_name(&routes[1]), "b-route"); -// } - -// async fn tls_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { -// let mut route = create( -// client, -// mk_tls_route(ns, "foo-route", &parent, Some(4191)) -// .with_backends(&[parent.clone()]) -// .build(), -// ) -// .await; -// await_tls_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be attached. -// let routes = tls_routes(&config); -// let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); -// assert_tls_route_name_eq(tls_route, "foo-route"); - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = "other".to_string(); -// update(client, route.clone()).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be unattached and the default route should be present. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = parent.name(); -// update(client, route).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be attached again. -// // The route should be attached. -// let routes = tls_routes(&config); -// let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); -// assert_tls_route_name_eq(tls_route, "foo-route"); -// } From ea87f3a14ebaf1e688ed646c371b9a4db9185b27 Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Thu, 9 Jan 2025 02:14:17 +0000 Subject: [PATCH 8/9] make logging consistent Signed-off-by: Alex Leong --- policy-test/src/lib.rs | 2 +- policy-test/src/test_route.rs | 55 ++-- .../tests/inbound_http_route_status.rs | 2 +- policy-test/tests/outbound_api.rs | 47 ++-- .../tests/outbound_api_failure_accrual.rs | 34 +-- policy-test/tests/outbound_api_grpc.rs | 259 +----------------- policy-test/tests/outbound_api_http.rs | 40 +-- policy-test/tests/outbound_api_tcp.rs | 17 +- 8 files changed, 111 insertions(+), 345 deletions(-) diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index 7b16609bdebfc..6706f94ecf634 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -220,7 +220,7 @@ pub async fn await_route_accepted(client: &kube::Client, route: &R .conditions() .unwrap_or_default() .into_iter() - .map(|c| c.clone()) + .cloned() .collect::>(); is_status_accepted(&conditions) }) diff --git a/policy-test/src/test_route.rs b/policy-test/src/test_route.rs index 1624b139e9d85..7f895d82cbe43 100644 --- a/policy-test/src/test_route.rs +++ b/policy-test/src/test_route.rs @@ -190,17 +190,16 @@ impl TestRoute for gateway::HttpRoute { .inner .parents .iter() - .map(|parent_status| &parent_status.conditions) - .flatten() + .flat_map(|parent_status| &parent_status.conditions) .collect() }) } fn is_failure_filter(filter: &outbound::http_route::Filter) -> bool { - match filter.kind.as_ref().unwrap() { - outbound::http_route::filter::Kind::FailureInjector(_) => true, - _ => false, - } + matches!( + filter.kind.as_ref().unwrap(), + outbound::http_route::filter::Kind::FailureInjector(_) + ) } fn parents_mut(&mut self) -> Vec<&mut ParentReference> { @@ -324,17 +323,16 @@ impl TestRoute for policy::HttpRoute { .inner .parents .iter() - .map(|parent_status| &parent_status.conditions) - .flatten() + .flat_map(|parent_status| &parent_status.conditions) .collect() }) } fn is_failure_filter(filter: &outbound::http_route::Filter) -> bool { - match filter.kind.as_ref().unwrap() { - outbound::http_route::filter::Kind::FailureInjector(_) => true, - _ => false, - } + matches!( + filter.kind.as_ref().unwrap(), + outbound::http_route::filter::Kind::FailureInjector(_) + ) } fn parents_mut(&mut self) -> Vec<&mut ParentReference> { @@ -458,17 +456,16 @@ impl TestRoute for gateway::GrpcRoute { .inner .parents .iter() - .map(|parent_status| &parent_status.conditions) - .flatten() + .flat_map(|parent_status| &parent_status.conditions) .collect() }) } fn is_failure_filter(filter: &outbound::grpc_route::Filter) -> bool { - match filter.kind.as_ref().unwrap() { - outbound::grpc_route::filter::Kind::FailureInjector(_) => true, - _ => false, - } + matches!( + filter.kind.as_ref().unwrap(), + outbound::grpc_route::filter::Kind::FailureInjector(_) + ) } fn parents_mut(&mut self) -> Vec<&mut ParentReference> { @@ -580,17 +577,16 @@ impl TestRoute for gateway::TlsRoute { .inner .parents .iter() - .map(|parent_status| &parent_status.conditions) - .flatten() + .flat_map(|parent_status| &parent_status.conditions) .collect() }) } fn is_failure_filter(filter: &outbound::tls_route::Filter) -> bool { - match filter.kind.as_ref().unwrap() { - outbound::tls_route::filter::Kind::Invalid(_) => true, - _ => false, - } + matches!( + filter.kind.as_ref().unwrap(), + outbound::tls_route::filter::Kind::Invalid(_) + ) } fn parents_mut(&mut self) -> Vec<&mut ParentReference> { @@ -701,17 +697,16 @@ impl TestRoute for gateway::TcpRoute { .inner .parents .iter() - .map(|parent_status| &parent_status.conditions) - .flatten() + .flat_map(|parent_status| &parent_status.conditions) .collect() }) } fn is_failure_filter(filter: &outbound::opaque_route::Filter) -> bool { - match filter.kind.as_ref().unwrap() { - outbound::opaque_route::filter::Kind::Invalid(_) => true, - _ => false, - } + matches!( + filter.kind.as_ref().unwrap(), + outbound::opaque_route::filter::Kind::Invalid(_) + ) } fn parents_mut(&mut self) -> Vec<&mut ParentReference> { diff --git a/policy-test/tests/inbound_http_route_status.rs b/policy-test/tests/inbound_http_route_status.rs index ca453386d8617..b37030c05e50a 100644 --- a/policy-test/tests/inbound_http_route_status.rs +++ b/policy-test/tests/inbound_http_route_status.rs @@ -35,7 +35,7 @@ async fn inbound_accepted_parent() { }]; // Create a route that references the Server resource. - let route = create(&client, mk_route(&ns, "test-accepted-route", Some(srv_ref))).await; + let _route = create(&client, mk_route(&ns, "test-accepted-route", Some(srv_ref))).await; // Wait until route is updated with a status let statuses = await_route_status(&client, &ns, "test-accepted-route") .await diff --git a/policy-test/tests/outbound_api.rs b/policy-test/tests/outbound_api.rs index 1e5140420c745..9de3fe820663d 100644 --- a/policy-test/tests/outbound_api.rs +++ b/policy-test/tests/outbound_api.rs @@ -78,6 +78,10 @@ async fn parent_with_no_routes() { #[tokio::test(flavor = "current_thread")] async fn route_with_no_rules() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); with_temp_ns(|client, ns| async move { tracing::debug!( parent = %P::kind(&P::DynamicType::default()), @@ -482,6 +486,10 @@ async fn routes_with_invalid_backend() { #[tokio::test(flavor = "current_thread")] async fn multiple_routes() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); with_temp_ns(|client, ns| async move { tracing::debug!( parent = %P::kind(&P::DynamicType::default()), @@ -576,6 +584,9 @@ async fn multiple_routes() { #[tokio::test(flavor = "current_thread")] async fn opaque_service() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { tracing::debug!( parent = %P::kind(&P::DynamicType::default()), @@ -614,11 +625,11 @@ async fn opaque_service() { #[tokio::test(flavor = "current_thread")] async fn route_with_no_port() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()), - ); // Create a parent let parent = create(&client, P::make_parent(&ns)).await; // Create a backend @@ -710,11 +721,11 @@ async fn route_with_no_port() { #[tokio::test(flavor = "current_thread")] async fn producer_route() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()), - ); // Create a parent let parent = create(&client, P::make_parent(&ns)).await; let port = 4191; @@ -809,13 +820,13 @@ async fn producer_route() { #[tokio::test(flavor = "current_thread")] async fn pre_existing_producer_route() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()) + ); // We test the scenario where outbound policy watches are initiated after // a produce route already exists. with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()), - ); // Create a parent let parent = create(&client, P::make_parent(&ns)).await; let port = 4191; @@ -884,11 +895,11 @@ async fn pre_existing_producer_route() { #[tokio::test(flavor = "current_thread")] async fn consumer_route() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()), - ); // Create a parent let parent = create(&client, P::make_parent(&ns)).await; let port = 4191; @@ -1012,6 +1023,10 @@ async fn consumer_route() { #[tokio::test(flavor = "current_thread")] async fn route_reattachment() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { // Create a parent let port = 4191; diff --git a/policy-test/tests/outbound_api_failure_accrual.rs b/policy-test/tests/outbound_api_failure_accrual.rs index 0016a7ba07758..26ea11c33bb0e 100644 --- a/policy-test/tests/outbound_api_failure_accrual.rs +++ b/policy-test/tests/outbound_api_failure_accrual.rs @@ -15,10 +15,10 @@ use maplit::btreemap; #[tokio::test(flavor = "current_thread")] async fn consecutive_failure_accrual() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a parent let port = 4191; let mut parent = P::make_parent(&ns); @@ -67,10 +67,10 @@ async fn consecutive_failure_accrual() { #[tokio::test(flavor = "current_thread")] async fn consecutive_failure_accrual_defaults_no_config() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a service configured to do consecutive failure accrual, but // with no additional configuration let port = 4191; @@ -110,10 +110,10 @@ async fn consecutive_failure_accrual_defaults_no_config() { #[tokio::test(flavor = "current_thread")] async fn consecutive_failure_accrual_defaults_max_fails() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a service configured to do consecutive failure accrual with // max number of failures and with default backoff let port = 4191; @@ -154,10 +154,10 @@ async fn consecutive_failure_accrual_defaults_max_fails() { #[tokio::test(flavor = "current_thread")] async fn consecutive_failure_accrual_defaults_jitter() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a service configured to do consecutive failure accrual with // max number of failures and with default backoff let port = 4191; @@ -205,12 +205,12 @@ async fn consecutive_failure_accrual_defaults_jitter() { #[tokio::test(flavor = "current_thread")] async fn default_failure_accrual() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); - // Create Service with consecutive failure accrual config for - // max_failures but no mode + // Create Service with consecutive failure accrual config for + // max_failures but no mode let port = 4191; let mut parent = P::make_parent(&ns); parent.meta_mut().annotations = Some(btreemap! { diff --git a/policy-test/tests/outbound_api_grpc.rs b/policy-test/tests/outbound_api_grpc.rs index a357189e9f61e..5232603431885 100644 --- a/policy-test/tests/outbound_api_grpc.rs +++ b/policy-test/tests/outbound_api_grpc.rs @@ -13,10 +13,10 @@ use maplit::btreemap; #[tokio::test(flavor = "current_thread")] async fn grpc_route_with_filters_service() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a parent let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; @@ -115,10 +115,10 @@ async fn grpc_route_with_filters_service() { #[tokio::test(flavor = "current_thread")] async fn policy_grpc_route_with_backend_filters() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a parent let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; @@ -220,10 +220,10 @@ async fn policy_grpc_route_with_backend_filters() { #[tokio::test(flavor = "current_thread")] async fn grpc_route_retries_and_timeouts() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a parent let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; @@ -288,10 +288,10 @@ async fn grpc_route_retries_and_timeouts() { #[tokio::test(flavor = "current_thread")] async fn parent_retries_and_timeouts() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a parent let mut parent = P::make_parent(&ns); parent.meta_mut().annotations = Some(btreemap! { @@ -356,238 +356,3 @@ async fn parent_retries_and_timeouts() { test::().await; test::().await; } - -// #[tokio::test(flavor = "current_thread")] -// async fn egress_net_grpc_route_reattachment() { -// with_temp_ns(|client, ns| async move { -// // Create a egress network -// let egress = create_egress_network(&client, &ns, "my-egress").await; -// let status = await_egress_net_status(&client, &ns, "my-egress").await; -// assert_status_accepted(status.conditions); - -// grpc_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; -// }) -// .await; -// } - -// /* Helpers */ -// struct GrpcRouteBuilder(k8s_gateway_api::GrpcRoute); - -// fn mk_grpc_route(ns: &str, name: &str, parent: &Resource, port: Option) -> GrpcRouteBuilder { -// GrpcRouteBuilder(k8s_gateway_api::GrpcRoute { -// metadata: kube::api::ObjectMeta { -// namespace: Some(ns.to_string()), -// name: Some(name.to_string()), -// ..Default::default() -// }, -// spec: k8s_gateway_api::GrpcRouteSpec { -// inner: k8s_gateway_api::CommonRouteSpec { -// parent_refs: Some(vec![k8s_gateway_api::ParentReference { -// group: Some(parent.group()), -// kind: Some(parent.kind()), -// namespace: Some(parent.namespace()), -// name: parent.name(), -// section_name: None, -// port, -// }]), -// }, -// hostnames: None, -// rules: Some(vec![k8s_gateway_api::GrpcRouteRule { -// matches: Some(vec![k8s_gateway_api::GrpcRouteMatch { -// method: Some(k8s_gateway_api::GrpcMethodMatch::Exact { -// method: Some("foo".to_string()), -// service: Some("my-gprc-service".to_string()), -// }), -// headers: None, -// }]), -// filters: None, -// backend_refs: None, -// }]), -// }, -// status: None, -// }) -// } - -// impl GrpcRouteBuilder { -// fn with_annotations(self, annotations: BTreeMap) -> Self { -// let mut route = self.0; -// route.metadata.annotations = Some(annotations); -// Self(route) -// } - -// fn build(self) -> k8s_gateway_api::GrpcRoute { -// self.0 -// } -// } - -// async fn grpc_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { -// let mut route = create( -// client, -// mk_grpc_route(ns, "foo-route", &parent, Some(4191)).build(), -// ) -// .await; -// await_grpc_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// { -// // The route should be attached. -// let routes = grpc_routes(&config); -// let route = assert_route_attached(routes, &parent); -// assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); -// } - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = "other".to_string(); -// update(client, route.clone()).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The grpc route should be unattached and the default (http) route -// // should be present. -// detect_http_routes(&config, |routes| { -// let route = assert_singleton(routes); -// assert_route_is_default(route, &parent, 4191); -// }); - -// route -// .spec -// .inner -// .parent_refs -// .as_mut() -// .unwrap() -// .first_mut() -// .unwrap() -// .name = parent.name(); -// update(client, route).await; - -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an updated config"); -// tracing::trace!(?config); - -// assert_resource_meta(&config.metadata, &parent, 4191); - -// // The route should be attached again. -// { -// // The route should be attached. -// let routes = grpc_routes(&config); -// let route = assert_route_attached(routes, &parent); -// assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); -// } -// } - -// async fn grpc_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { -// let _route = create( -// client, -// mk_grpc_route(ns, "foo-route", &parent, Some(4191)) -// .with_annotations( -// vec![ -// ("retry.linkerd.io/grpc".to_string(), "internal".to_string()), -// ("timeout.linkerd.io/response".to_string(), "10s".to_string()), -// ] -// .into_iter() -// .collect(), -// ) -// .build(), -// ) -// .await; -// await_grpc_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// let routes = grpc_routes(&config); -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// let conditions = rule -// .retry -// .as_ref() -// .expect("retry config expected") -// .conditions -// .as_ref() -// .expect("retry conditions expected"); -// assert!(conditions.internal); -// let timeout = rule -// .timeouts -// .as_ref() -// .expect("timeouts expected") -// .response -// .as_ref() -// .expect("response timeout expected"); -// assert_eq!(timeout.seconds, 10); -// } - -// async fn parent_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { -// let _route = create( -// client, -// mk_grpc_route(ns, "foo-route", &parent, Some(4191)) -// .with_annotations( -// vec![ -// // Route annotations override the timeout config specified -// // on the service. -// ("timeout.linkerd.io/request".to_string(), "5s".to_string()), -// ] -// .into_iter() -// .collect(), -// ) -// .build(), -// ) -// .await; -// await_grpc_route_status(client, ns, "foo-route").await; - -// let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; -// let config = rx -// .next() -// .await -// .expect("watch must not fail") -// .expect("watch must return an initial config"); -// tracing::trace!(?config); - -// let routes = grpc_routes(&config); -// let route = assert_route_attached(routes, &parent); -// let rule = assert_singleton(&route.rules); -// let conditions = rule -// .retry -// .as_ref() -// .expect("retry config expected") -// .conditions -// .as_ref() -// .expect("retry conditions expected"); -// // Retry config inherited from the service. -// assert!(conditions.internal); -// let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); -// // Parent timeout config overridden by route timeout config. -// assert_eq!(timeouts.response, None); -// let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); -// assert_eq!(request_timeout.seconds, 5); -// } diff --git a/policy-test/tests/outbound_api_http.rs b/policy-test/tests/outbound_api_http.rs index de1970689a728..264146d246a31 100644 --- a/policy-test/tests/outbound_api_http.rs +++ b/policy-test/tests/outbound_api_http.rs @@ -12,10 +12,10 @@ use maplit::btreemap; #[tokio::test(flavor = "current_thread")] async fn gateway_http_route_with_filters_service() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a parent let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; @@ -152,10 +152,10 @@ async fn gateway_http_route_with_filters_service() { #[tokio::test(flavor = "current_thread")] async fn policy_http_route_with_filters_service() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a parent let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; @@ -292,10 +292,10 @@ async fn policy_http_route_with_filters_service() { #[tokio::test(flavor = "current_thread")] async fn gateway_http_route_with_backend_filters() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a parent let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; @@ -435,10 +435,10 @@ async fn gateway_http_route_with_backend_filters() { #[tokio::test(flavor = "current_thread")] async fn policy_http_route_with_backend_filters() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - ); // Create a parent let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; @@ -578,11 +578,11 @@ async fn policy_http_route_with_backend_filters() { #[tokio::test(flavor = "current_thread")] async fn http_route_retries_and_timeouts() { async fn test>() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()), - ); // Create a parent let parent = create(&client, P::make_parent(&ns)).await; let port = 4191; @@ -650,11 +650,11 @@ async fn http_route_retries_and_timeouts() { #[tokio::test(flavor = "current_thread")] async fn parent_retries_and_timeouts() { async fn test>() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()), - ); // Create a parent let mut parent = P::make_parent(&ns); parent.meta_mut().annotations = Some(btreemap! { diff --git a/policy-test/tests/outbound_api_tcp.rs b/policy-test/tests/outbound_api_tcp.rs index 90e19f77a725d..53be8c9153c72 100644 --- a/policy-test/tests/outbound_api_tcp.rs +++ b/policy-test/tests/outbound_api_tcp.rs @@ -1,12 +1,3 @@ -// use futures::prelude::*; -// use linkerd_policy_controller_k8s_api as k8s; -// use linkerd_policy_test::{ -// assert_resource_meta, assert_status_accepted, await_egress_net_status, await_tcp_route_status, -// create, create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, -// mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, -// }; -// use maplit::{btreemap, convert_args}; - use futures::StreamExt; use linkerd_policy_controller_k8s_api::{self as k8s, gateway, policy}; use linkerd_policy_test::{ @@ -19,11 +10,11 @@ use linkerd_policy_test::{ #[tokio::test(flavor = "current_thread")] async fn multiple_tcp_routes() { async fn test() { + tracing::debug!( + parent = %P::kind(&P::DynamicType::default()), + route = %R::kind(&R::DynamicType::default()), + ); with_temp_ns(|client, ns| async move { - tracing::debug!( - parent = %P::kind(&P::DynamicType::default()), - route = %R::kind(&R::DynamicType::default()), - ); // Create a parent let port = 4191; let parent = create(&client, P::make_parent(&ns)).await; From 1da9620d355926bebae75995a6ce155d1aafd0a7 Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Fri, 10 Jan 2025 20:49:25 +0000 Subject: [PATCH 9/9] Remove unused helpers Signed-off-by: Alex Leong --- policy-test/src/outbound_api.rs | 268 +------------------------------- 1 file changed, 1 insertion(+), 267 deletions(-) diff --git a/policy-test/src/outbound_api.rs b/policy-test/src/outbound_api.rs index e32447b93deb4..35a7dd7cc870b 100644 --- a/policy-test/src/outbound_api.rs +++ b/policy-test/src/outbound_api.rs @@ -1,6 +1,5 @@ -use crate::{grpc, test_route::TestRoute, Resource}; +use crate::{grpc, test_route::TestRoute}; use k8s_gateway_api::ParentReference; -use kube::ResourceExt; use std::time::Duration; use tokio::time; @@ -166,126 +165,6 @@ pub fn failure_accrual_consecutive( accrual } -#[track_caller] -pub fn route_backends_first_available( - route: &grpc::outbound::HttpRoute, -) -> &[grpc::outbound::http_route::RouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::http_route::distribution::Kind::FirstAvailable(fa) => &fa.backends, - _ => panic!("Distribution must be FirstAvailable"), - } -} - -#[track_caller] -pub fn tls_route_backends_first_available( - route: &grpc::outbound::TlsRoute, -) -> &[grpc::outbound::tls_route::RouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::tls_route::distribution::Kind::FirstAvailable(fa) => &fa.backends, - _ => panic!("Distribution must be FirstAvailable"), - } -} - -#[track_caller] -pub fn route_backends_random_available( - route: &grpc::outbound::HttpRoute, -) -> &[grpc::outbound::http_route::WeightedRouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::http_route::distribution::Kind::RandomAvailable(dist) => &dist.backends, - _ => panic!("Distribution must be RandomAvailable"), - } -} - -#[track_caller] -pub fn tls_route_backends_random_available( - route: &grpc::outbound::TlsRoute, -) -> &[grpc::outbound::tls_route::WeightedRouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::tls_route::distribution::Kind::RandomAvailable(dist) => &dist.backends, - _ => panic!("Distribution must be RandomAvailable"), - } -} - -#[track_caller] -pub fn tcp_route_backends_random_available( - route: &grpc::outbound::OpaqueRoute, -) -> &[grpc::outbound::opaque_route::WeightedRouteBackend] { - let kind = assert_singleton(&route.rules) - .backends - .as_ref() - .expect("Rule must have backends") - .kind - .as_ref() - .expect("Backend must have kind"); - match kind { - grpc::outbound::opaque_route::distribution::Kind::RandomAvailable(dist) => &dist.backends, - _ => panic!("Distribution must be RandomAvailable"), - } -} - -#[track_caller] -pub fn route_name(route: &grpc::outbound::HttpRoute) -> &str { - match route.metadata.as_ref().unwrap().kind.as_ref().unwrap() { - grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { ref name, .. }) => name, - _ => panic!("route must be a resource kind"), - } -} - -#[track_caller] -pub fn tls_route_name(route: &grpc::outbound::TlsRoute) -> &str { - match route.metadata.as_ref().unwrap().kind.as_ref().unwrap() { - grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { ref name, .. }) => name, - _ => panic!("route must be a resource kind"), - } -} - -#[track_caller] -pub fn tcp_route_name(route: &grpc::outbound::OpaqueRoute) -> &str { - match route.metadata.as_ref().unwrap().kind.as_ref().unwrap() { - grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { ref name, .. }) => name, - _ => panic!("route must be a resource kind"), - } -} - -#[track_caller] -pub fn assert_backend_has_failure_filter( - backend: &grpc::outbound::http_route::WeightedRouteBackend, -) { - let filter = assert_singleton(&backend.backend.as_ref().unwrap().filters); - match filter.kind.as_ref().unwrap() { - grpc::outbound::http_route::filter::Kind::FailureInjector(_) => {} - _ => panic!("backend must have FailureInjector filter"), - }; -} - #[track_caller] pub fn assert_route_is_default( route: &R::Route, @@ -306,22 +185,6 @@ pub fn assert_route_is_default( } } -#[track_caller] -pub fn assert_tls_route_is_default(route: &grpc::outbound::TlsRoute, parent: &Resource, port: u16) { - let kind = route.metadata.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { - grpc::meta::metadata::Kind::Default(_) => {} - grpc::meta::metadata::Kind::Resource(r) => { - panic!("route expected to be default but got resource {r:?}") - } - } - - let backends = tls_route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_tls_backend_matches_parent(backend, parent, port); - assert_singleton(&route.rules); -} - #[track_caller] pub fn assert_backend_matches_reference( backend: &grpc::outbound::Backend, @@ -346,137 +209,8 @@ pub fn assert_backend_matches_reference( } } -#[track_caller] -pub fn assert_tls_backend_matches_parent( - backend: &grpc::outbound::tls_route::RouteBackend, - parent: &Resource, - port: u16, -) { - let backend = backend.backend.as_ref().unwrap(); - - match parent { - Resource::Service(svc) => { - let dst = match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Balancer(balance) => { - let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { - grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, - } - } - grpc::outbound::backend::Kind::Forward(_) => { - panic!("service default route backend must be Balancer") - } - }; - assert_eq!( - *dst, - format!( - "{}.{}.svc.{}:{}", - svc.name_unchecked(), - svc.namespace().unwrap(), - "cluster.local", - port - ) - ); - } - - Resource::EgressNetwork(_) => { - match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Forward(_) => {} - grpc::outbound::backend::Kind::Balancer(_) => { - panic!("egress net default route backend must be Forward") - } - }; - } - } - - //assert_resource_meta(&backend.metadata, parent, port) -} - -#[track_caller] -pub fn assert_tcp_backend_matches_parent( - backend: &grpc::outbound::opaque_route::RouteBackend, - parent: &Resource, - port: u16, -) { - let backend = backend.backend.as_ref().unwrap(); - - match parent { - Resource::Service(svc) => { - let dst = match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Balancer(balance) => { - let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { - grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, - } - } - grpc::outbound::backend::Kind::Forward(_) => { - panic!("service default route backend must be Balancer") - } - }; - assert_eq!( - *dst, - format!( - "{}.{}.svc.{}:{}", - svc.name_unchecked(), - svc.namespace().unwrap(), - "cluster.local", - port - ) - ); - } - - Resource::EgressNetwork(_) => { - match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Forward(_) => {} - grpc::outbound::backend::Kind::Balancer(_) => { - panic!("egress net default route backend must be Forward") - } - }; - } - } - - //assert_resource_meta(&backend.metadata, parent, port) -} - #[track_caller] pub fn assert_singleton(ts: &[T]) -> &T { assert_eq!(ts.len(), 1); ts.first().unwrap() } - -#[track_caller] -pub fn assert_route_attached<'a, T>(routes: &'a [T], parent: &Resource) -> &'a T { - match parent { - Resource::EgressNetwork(_) => { - assert_eq!(routes.len(), 2); - routes.first().unwrap() - } - Resource::Service(_) => assert_singleton(routes), - } -} - -#[track_caller] -pub fn assert_route_name_eq(route: &grpc::outbound::HttpRoute, name: &str) { - assert_name_eq(route.metadata.as_ref().unwrap(), name) -} - -#[track_caller] -pub fn assert_tls_route_name_eq(route: &grpc::outbound::TlsRoute, name: &str) { - assert_name_eq(route.metadata.as_ref().unwrap(), name) -} - -#[track_caller] -pub fn assert_tcp_route_name_eq(route: &grpc::outbound::OpaqueRoute, name: &str) { - assert_name_eq(route.metadata.as_ref().unwrap(), name) -} - -#[track_caller] -pub fn assert_name_eq(meta: &grpc::meta::Metadata, name: &str) { - let kind = meta.kind.as_ref().unwrap(); - match kind { - grpc::meta::metadata::Kind::Default(d) => { - panic!("route expected to not be default, but got default {d:?}") - } - grpc::meta::metadata::Kind::Resource(resource) => assert_eq!(resource.name, *name), - } -}