From 15ba09d400a919efde477cfc400fc1f603e56122 Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 31 Jan 2024 10:20:10 +0000 Subject: [PATCH 01/36] Add a simple controller example Signed-off-by: Matei David --- examples/Cargo.toml | 4 + examples/shared_stream_controllers.rs | 116 ++++++++++++++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 examples/shared_stream_controllers.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index d9fc4a8c7..29c48a74a 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -60,6 +60,10 @@ crossterm = {version = "0.27.0" } name = "configmapgen_controller" path = "configmapgen_controller.rs" +[[example]] +name = "shared_stream_controllers" +path = "shared_stream_controllers.rs" + [[example]] name = "crd_api" path = "crd_api.rs" diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs new file mode 100644 index 000000000..ebeade18a --- /dev/null +++ b/examples/shared_stream_controllers.rs @@ -0,0 +1,116 @@ +use std::{sync::Arc, time::Duration}; + +use futures::StreamExt; +use k8s_openapi::api::core::v1::Pod; +use kube::{ + api::{Patch, PatchParams}, + core::ObjectMeta, + runtime::{controller::Action, watcher, Config, Controller}, + Api, Client, ResourceExt, +}; +use tracing::{info, warn}; + +use thiserror::Error; + +#[derive(Clone)] +struct Data { + client: Client, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt::init(); + let client = Client::try_default().await?; + + let pods = Api::::all(client.clone()); + + // ? + let config = Config::default().concurrency(2); + let ctx = Arc::new(Data { client }); + + // + // Controller new returns Self. Each method consumes self and returns a new + // Self + // + // new() creates a watcher, and uses it to then create a reflector. Moves it + // all in the controller's memory. + // + // reconcile_all_on() takes a trigger (i.e. a stream). When the trigger + // fires, it will reconcile _all_ managed objects. For us, it means the + // trigger will be a stream element. + // + // shutdown_on_signal() is interesting to look at for research purposes + // + // run() is the equivalent of build(). Consumes everything and yields back a + // stream. We'll need to dedicate some time to reviewing it. + // + // for_each() will run a closure on each stream element effectively + // consuming the stream. + // TODO: Document this builder pattern in my notes + Controller::new(pods, watcher::Config::default()) + .with_config(config) + .shutdown_on_signal() + .run( + reconcile_metadata, + |_, _, _| Action::requeue(Duration::from_secs(1)), + ctx.clone(), + ) + .for_each(|res| async move { + match res { + Ok(v) => info!("reconciled {v:?}"), + Err(error) => warn!(%error, "failed to reconcile object"), + } + }) + .await; + + Ok(()) +} + +#[derive(Debug, Error)] +enum Error { + #[error("Failed to patch pod: {0}")] + WriteFailed(#[source] kube::Error), + + #[error("Missing po field: {0}")] + MissingField(&'static str), +} + +/// Controller will trigger this whenever our main pod has changed. The function +/// reconciles a pod by copying over the labels to the annotations +async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result { + let labels = pod.metadata.labels.clone().unwrap_or_else(|| Default::default()); + if labels.len() == 0 { + return Ok(Action::requeue(Duration::from_secs(180))); + } + + let annotations = labels.clone(); + let p = Pod { + metadata: ObjectMeta { + name: Some(pod.name_any()), + labels: Some(labels), + annotations: Some(annotations), + ..ObjectMeta::default() + }, + spec: pod.spec.clone(), + status: pod.status.clone(), + }; + + let pod_api = Api::::namespaced( + ctx.client.clone(), + pod.metadata + .namespace + .as_ref() + .ok_or_else(|| Error::MissingField(".metadata.name"))?, + ); + + pod_api + .patch( + &p.name_any(), + &PatchParams::apply("controller-1"), + &Patch::Apply(&p), + ) + .await + .map_err(Error::WriteFailed)?; + + Ok(Action::requeue(Duration::from_secs(300))) +} From 1657ddc6720bab2e29bb318dd5b9c98561e8cbdc Mon Sep 17 00:00:00 2001 From: Matei David Date: Fri, 23 Feb 2024 20:48:05 +0000 Subject: [PATCH 02/36] Add shared stream controller example Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 193 +++++++++++++++++++++++--- 1 file changed, 174 insertions(+), 19 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index ebeade18a..1c0262ff8 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, time::Duration}; use futures::StreamExt; -use k8s_openapi::api::core::v1::Pod; +use k8s_openapi::api::core::v1::{Pod, PodCondition, PodStatus}; use kube::{ api::{Patch, PatchParams}, core::ObjectMeta, @@ -12,6 +12,11 @@ use tracing::{info, warn}; use thiserror::Error; +pub mod condition { + pub static UNDOCUMENTED_TYPE: &str = "UndocumentedPort"; + pub static STATUS_TRUE: &str = "True"; +} + #[derive(Clone)] struct Data { client: Client, @@ -22,47 +27,119 @@ async fn main() -> anyhow::Result<()> { tracing_subscriber::fmt::init(); let client = Client::try_default().await?; - let pods = Api::::all(client.clone()); + let pods = Api::::namespaced(client.clone(), "default"); // ? let config = Config::default().concurrency(2); let ctx = Arc::new(Data { client }); + + // Building a controller + // + // 1. Controller uses a builder pattern to instantiate it with different + // configuration values. Each method called on it will return and consume + // Self. + // + // 2. When a controller is created (`new()` and `new_with()`) it will: + // - create a new store (and return a writeable handle). + // - create a new "trigger_selector" + // - create a "trigger_self" + // - a "trigger_self" accepts a reflector (created using a watch and + // our store handle) + // + // What are triggers: + // + // * Each controller has a trigger_selector. It is a stream union. It + // accepts a bunch of streams. + // - there are some internals to how this is done, which I guess I'll + // briefly cover, but `SelectAll` is a buffer. + // - e.g. it buffers futures. Calling `next()` on it will return + // whatever future resolves first. + // - how do we compose a buffer with streams? we turn every stream into a + // future. We turn them into a functional list, (car, cdr). + // - e.g. each stream is turned into a future, when polled it returns (Item, + // Tail) + // - this is a `StreamFuture`, its Poll function is easy to understand + // - and now, the buffer will drive them all into completion + // * A trigger itself is just a ReconciliationRequest. It is a stream that + // wraps an inner stream and maps the object to a ReconciliationRequest, + // something that triggers a reconciliation. + // + // + // Trigger helpers: + // * Trigger with: is an adapter that will take a stream and transform it + // according to a predicate / mapper. + // + // + // 3. When the controller is run: + // - It accepts a function that will reconcile. This is a callback that + // is called with an object that has triggered a reconciliation. + // - It will take a function that handles errors (an error callback) // - // Controller new returns Self. Each method consumes self and returns a new - // Self + // We can think of the applier as an actor. It owns the reconciliation + // state. It has a function (reconciler) and a stream, for each element + // in the stream, it applies the function. // - // new() creates a watcher, and uses it to then create a reflector. Moves it - // all in the controller's memory. + // The complexity for the applier comes from managing streams and + // requeues. It has to deal with: + // * Shutdown signals + // * Requeues // - // reconcile_all_on() takes a trigger (i.e. a stream). When the trigger - // fires, it will reconcile _all_ managed objects. For us, it means the - // trigger will be a stream element. + // Requeuing is done through a buffer structured as a channel. // - // shutdown_on_signal() is interesting to look at for research purposes + // The applier will start a "Runner". The runner is a scheduler that can + // debounce events. The runner wraps a stream that is handled by the + // applier; + // * Basically, our input stream and the rescheduled stream both get + // polled, events get sent to the runner. + // * Events are then debounced + // * Scheduler will get the relevant item from the store, and apply the + // callback to the object. + // - if it fails, it reschedules + // - if it does not, it moves on. // - // run() is the equivalent of build(). Consumes everything and yields back a - // stream. We'll need to dedicate some time to reviewing it. // - // for_each() will run a closure on each stream element effectively - // consuming the stream. - // TODO: Document this builder pattern in my notes + // So we create another stream from all streams that trigger + // reconciliations and the scheduler, and that stream of everything will + // end up being run through a debouncer. + // + // The scheduler is a bit tricky since it needs to deal with concurrent + // invocations and messages that have already been processed. + tokio::spawn( + Controller::new(pods.clone(), watcher::Config::default()) + .with_config(config.clone()) + .shutdown_on_signal() + .run( + reconcile_metadata, + |_, _, _| Action::requeue(Duration::from_secs(1)), + ctx.clone(), + ) + .for_each(|res| async move { + match res { + Ok(v) => info!("reconciled {v:?}"), + Err(error) => warn!(%error, "failed to reconcile object"), + } + }), + ); + + Controller::new(pods, watcher::Config::default()) .with_config(config) .shutdown_on_signal() .run( - reconcile_metadata, + reconcile_status, |_, _, _| Action::requeue(Duration::from_secs(1)), - ctx.clone(), + ctx, ) .for_each(|res| async move { match res { - Ok(v) => info!("reconciled {v:?}"), - Err(error) => warn!(%error, "failed to reconcile object"), + Ok(v) => info!("reconcile status for {v:?}"), + Err(error) => warn!(%error, "failed to reconcile status for object"), } }) .await; + Ok(()) } @@ -78,6 +155,9 @@ enum Error { /// Controller will trigger this whenever our main pod has changed. The function /// reconciles a pod by copying over the labels to the annotations async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result { + if pod.name_any() == "kube-system" { + return Ok(Action::requeue(Duration::from_secs(300))); + } let labels = pod.metadata.labels.clone().unwrap_or_else(|| Default::default()); if labels.len() == 0 { return Ok(Action::requeue(Duration::from_secs(180))); @@ -114,3 +194,78 @@ async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result, ctx: Arc) -> Result { + let mut conditions = pod + .status + .clone() + .unwrap_or_default() + .conditions + .unwrap_or_default(); + + // If the condition already exists, exit + for cond in conditions.iter() { + if cond.type_ == condition::UNDOCUMENTED_TYPE { + return Ok(Action::requeue(Duration::from_secs(300))); + } + } + + pod.spec + .clone() + .unwrap_or_default() + .containers + .iter() + .for_each(|c| { + if c.ports.clone().unwrap_or_default().len() == 0 { + conditions.push(PodCondition { + type_: condition::UNDOCUMENTED_TYPE.into(), + status: condition::STATUS_TRUE.into(), + ..Default::default() + }) + } + }); + + let mut current_conds = pod + .status + .clone() + .unwrap_or_default() + .conditions + .unwrap_or_default() + .into_iter() + .filter(|c| c.type_ != condition::UNDOCUMENTED_TYPE && c.status != condition::STATUS_TRUE) + .collect::>(); + + for condition in conditions { + current_conds.push(condition); + } + + let status = PodStatus { + conditions: Some(current_conds), + ..Default::default() + }; + let pod_api = Api::::namespaced( + ctx.client.clone(), + pod.metadata + .namespace + .as_ref() + .ok_or_else(|| Error::MissingField(".metadata.name"))?, + ); + + let name = pod.name_any(); + let value = serde_json::json!({ + "apiVersion": "v1", + "kind": "Pod", + "name": name, + "status": status, + }); + let p = Patch::Merge(value); + pod_api + .patch_status(&pod.name_any(), &PatchParams::apply("controller-2"), &p) + .await + .map_err(Error::WriteFailed)?; + + Ok(Action::requeue(Duration::from_secs(300))) +} From 98255dc3daacd506981030b91247ce5c5692a989 Mon Sep 17 00:00:00 2001 From: Matei David Date: Fri, 23 Feb 2024 21:30:43 +0000 Subject: [PATCH 03/36] Try to get something working Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 23 +++++++++++-- kube-runtime/src/controller/mod.rs | 42 ++++++++++++++++++++++++ kube-runtime/src/reflector/mod.rs | 14 +++++++- kube-runtime/src/reflector/object_ref.rs | 14 ++++++++ kube-runtime/src/reflector/store.rs | 38 +++++++++++++++++++++ kube-runtime/src/utils/watch_ext.rs | 2 ++ 6 files changed, 129 insertions(+), 4 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 1c0262ff8..52d82c152 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -5,7 +5,11 @@ use k8s_openapi::api::core::v1::{Pod, PodCondition, PodStatus}; use kube::{ api::{Patch, PatchParams}, core::ObjectMeta, - runtime::{controller::Action, watcher, Config, Controller}, + runtime::{ + controller::Action, + reflector::{shared_reflector, store::Writer}, + watcher, Config, Controller, WatchStreamExt, + }, Api, Client, ResourceExt, }; use tracing::{info, warn}; @@ -33,6 +37,11 @@ async fn main() -> anyhow::Result<()> { let config = Config::default().concurrency(2); let ctx = Arc::new(Data { client }); + // (1): Create a store & have it transform the stream to return arcs + let writer = Writer::::new(Default::default()); + let reader = writer.as_reader(); + let reflector = shared_reflector(writer, watcher(pods.clone(), Default::default())); + // Building a controller // @@ -105,8 +114,10 @@ async fn main() -> anyhow::Result<()> { // // The scheduler is a bit tricky since it needs to deal with concurrent // invocations and messages that have already been processed. + // + tokio::spawn( - Controller::new(pods.clone(), watcher::Config::default()) + Controller::for_shared_stream(reflector.applied_objects(), reader, ()) .with_config(config.clone()) .shutdown_on_signal() .run( @@ -122,8 +133,12 @@ async fn main() -> anyhow::Result<()> { }), ); + // (2): we can't share streams yet so we just use the same primitives + let writer2 = Writer::::new(Default::default()); + let reader2 = writer2.as_reader(); + let reflector2 = shared_reflector(writer2, watcher(pods.clone(), Default::default())); - Controller::new(pods, watcher::Config::default()) + Controller::for_shared_stream(reflector2.applied_objects(), reader2, ()) .with_config(config) .shutdown_on_signal() .run( @@ -139,6 +154,8 @@ async fn main() -> anyhow::Result<()> { }) .await; + // (3): Figure out how to use the same store and create a shared stream from + // the shared reflector :) Ok(()) } diff --git a/kube-runtime/src/controller/mod.rs b/kube-runtime/src/controller/mod.rs index ef4c662a7..5b928932c 100644 --- a/kube-runtime/src/controller/mod.rs +++ b/kube-runtime/src/controller/mod.rs @@ -122,6 +122,23 @@ where }) } +pub fn trigger_self_shared( + stream: S, + dyntype: K::DynamicType, +) -> impl Stream, S::Error>> +where + S: TryStream>, + K: Resource, + K::DynamicType: Clone, +{ + trigger_with(stream, move |obj| { + Some(ReconcileRequest { + obj_ref: ObjectRef::from_shared_obj_with(obj.clone(), dyntype.clone()), + reason: ReconcileReason::ObjectUpdated, + }) + }) +} + /// Enqueues any mapper returned `K` types for reconciliation fn trigger_others( stream: S, @@ -701,6 +718,31 @@ where } } + pub fn for_shared_stream( + trigger: impl Stream, watcher::Error>> + Send + 'static, + reader: Store, + dyntype: K::DynamicType, + ) -> Self { + let mut trigger_selector = stream::SelectAll::new(); + let self_watcher = trigger_self_shared(trigger, dyntype.clone()).boxed(); + trigger_selector.push(self_watcher); + Self { + trigger_selector, + trigger_backoff: Box::::default(), + graceful_shutdown_selector: vec![ + // Fallback future, ensuring that we never terminate if no additional futures are added to the selector + future::pending().boxed(), + ], + forceful_shutdown_selector: vec![ + // Fallback future, ensuring that we never terminate if no additional futures are added to the selector + future::pending().boxed(), + ], + dyntype, + reader, + config: Default::default(), + } + } + /// Specify the configuration for the controller's behavior. #[must_use] pub fn with_config(mut self, config: Config) -> Self { diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index d0a724b53..7c6514062 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -7,7 +7,7 @@ pub use self::object_ref::{Extra as ObjectRefExtra, ObjectRef}; use crate::watcher; use futures::{Stream, TryStreamExt}; use kube_client::Resource; -use std::hash::Hash; +use std::{hash::Hash, sync::Arc}; pub use store::{store, Store}; /// Cache objects from a [`watcher()`] stream into a local [`Store`] @@ -98,6 +98,18 @@ where stream.inspect_ok(move |event| writer.apply_watcher_event(event)) } +pub fn shared_reflector( + mut writer: store::Writer, + stream: W, +) -> impl Stream>>> +where + K: Resource + Clone, + K::DynamicType: Eq + Hash + Clone, + W: Stream>>, +{ + stream.map_ok(move |event| writer.apply_with_arc(event)) +} + #[cfg(test)] mod tests { use super::{reflector, store, ObjectRef}; diff --git a/kube-runtime/src/reflector/object_ref.rs b/kube-runtime/src/reflector/object_ref.rs index cc2049ca1..f38952126 100644 --- a/kube-runtime/src/reflector/object_ref.rs +++ b/kube-runtime/src/reflector/object_ref.rs @@ -8,6 +8,7 @@ use kube_client::{ use std::{ fmt::{Debug, Display}, hash::Hash, + sync::Arc, }; #[derive(Derivative)] @@ -119,6 +120,19 @@ impl ObjectRef { } } + pub fn from_shared_obj_with(obj: Arc, dyntype: K::DynamicType) -> Self + where + K: Resource, + { + let meta = obj.as_ref().meta(); + Self { + dyntype, + name: obj.name_unchecked(), + namespace: meta.namespace.clone(), + extra: Extra::from_obj_meta(meta), + } + } + /// Create an `ObjectRef` from an `OwnerReference` /// /// Returns `None` if the types do not match. diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index 9085ab69b..c44eb9c0f 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -88,6 +88,44 @@ where ready_tx.init(()) } } + + /// Applies a single watcher event to the store + pub fn apply_with_arc(&mut self, event: watcher::Event) -> watcher::Event> { + let ev = match event { + watcher::Event::Applied(obj) => { + let key = ObjectRef::from_obj_with(&obj, self.dyntype.clone()); + let obj = Arc::new(obj.clone()); + self.store.write().insert(key, obj.clone()); + watcher::Event::Applied(obj) + } + watcher::Event::Deleted(obj) => { + let key = ObjectRef::from_obj_with(&obj, self.dyntype.clone()); + self.store.write().remove(&key); + watcher::Event::Deleted(Arc::new(obj)) + } + watcher::Event::Restarted(new_objs) => { + let new_objs = new_objs + .iter() + .map(|obj| { + ( + ObjectRef::from_obj_with(obj, self.dyntype.clone()), + Arc::new(obj.clone()), + ) + }) + .collect::>(); + let objs_arced = new_objs.values().map(|obj| obj.to_owned()).collect(); + *self.store.write() = new_objs; + watcher::Event::Restarted(objs_arced) + } + }; + + // Mark as ready after the first event, "releasing" any calls to Store::wait_until_ready() + if let Some(ready_tx) = self.ready_tx.take() { + ready_tx.init(()) + } + + ev + } } impl Default for Writer where diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index 6f9994586..77c4c16f1 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + #[cfg(feature = "unstable-runtime-predicates")] use crate::utils::predicate::{Predicate, PredicateFilter}; #[cfg(feature = "unstable-runtime-subscribe")] From 3685b9e703dea32d9c47036fbf7774c977837371 Mon Sep 17 00:00:00 2001 From: Matei David Date: Fri, 23 Feb 2024 21:35:51 +0000 Subject: [PATCH 04/36] Rm my notes Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 73 --------------------------- 1 file changed, 73 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 52d82c152..dbb48729a 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -43,79 +43,6 @@ async fn main() -> anyhow::Result<()> { let reflector = shared_reflector(writer, watcher(pods.clone(), Default::default())); - // Building a controller - // - // 1. Controller uses a builder pattern to instantiate it with different - // configuration values. Each method called on it will return and consume - // Self. - // - // 2. When a controller is created (`new()` and `new_with()`) it will: - // - create a new store (and return a writeable handle). - // - create a new "trigger_selector" - // - create a "trigger_self" - // - a "trigger_self" accepts a reflector (created using a watch and - // our store handle) - // - // What are triggers: - // - // * Each controller has a trigger_selector. It is a stream union. It - // accepts a bunch of streams. - // - there are some internals to how this is done, which I guess I'll - // briefly cover, but `SelectAll` is a buffer. - // - e.g. it buffers futures. Calling `next()` on it will return - // whatever future resolves first. - // - how do we compose a buffer with streams? we turn every stream into a - // future. We turn them into a functional list, (car, cdr). - // - e.g. each stream is turned into a future, when polled it returns (Item, - // Tail) - // - this is a `StreamFuture`, its Poll function is easy to understand - // - and now, the buffer will drive them all into completion - // * A trigger itself is just a ReconciliationRequest. It is a stream that - // wraps an inner stream and maps the object to a ReconciliationRequest, - // something that triggers a reconciliation. - // - // - // Trigger helpers: - // * Trigger with: is an adapter that will take a stream and transform it - // according to a predicate / mapper. - // - // - // 3. When the controller is run: - // - It accepts a function that will reconcile. This is a callback that - // is called with an object that has triggered a reconciliation. - // - It will take a function that handles errors (an error callback) - // - // We can think of the applier as an actor. It owns the reconciliation - // state. It has a function (reconciler) and a stream, for each element - // in the stream, it applies the function. - // - // The complexity for the applier comes from managing streams and - // requeues. It has to deal with: - // * Shutdown signals - // * Requeues - // - // Requeuing is done through a buffer structured as a channel. - // - // The applier will start a "Runner". The runner is a scheduler that can - // debounce events. The runner wraps a stream that is handled by the - // applier; - // * Basically, our input stream and the rescheduled stream both get - // polled, events get sent to the runner. - // * Events are then debounced - // * Scheduler will get the relevant item from the store, and apply the - // callback to the object. - // - if it fails, it reschedules - // - if it does not, it moves on. - // - // - // So we create another stream from all streams that trigger - // reconciliations and the scheduler, and that stream of everything will - // end up being run through a debouncer. - // - // The scheduler is a bit tricky since it needs to deal with concurrent - // invocations and messages that have already been processed. - // - tokio::spawn( Controller::for_shared_stream(reflector.applied_objects(), reader, ()) .with_config(config.clone()) From 683e77da04be87cee0e1f37c1ba9c034ac14b082 Mon Sep 17 00:00:00 2001 From: Matei David Date: Thu, 29 Feb 2024 10:14:39 +0000 Subject: [PATCH 05/36] Results or objectefs Signed-off-by: Matei David --- examples/Cargo.toml | 4 ++ examples/shared_stream_controllers.rs | 12 +++-- kube-runtime/src/controller/mod.rs | 4 +- kube-runtime/src/utils/reflect.rs | 77 +++++++++++++++++++++++++-- kube-runtime/src/utils/watch_ext.rs | 14 ++++- 5 files changed, 99 insertions(+), 12 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 36c9d125f..d1b11886b 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -218,6 +218,10 @@ path = "custom_client_trace.rs" name = "secret_syncer" path = "secret_syncer.rs" +[[example]] +name = "node_del" +path = "node_del.rs" + [[example]] name = "pod_shell_crossterm" path = "pod_shell_crossterm.rs" diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index dbb48729a..2d4eda7bb 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, time::Duration}; -use futures::StreamExt; +use futures::{Stream, StreamExt, TryStream, TryStreamExt}; use k8s_openapi::api::core::v1::{Pod, PodCondition, PodStatus}; use kube::{ api::{Patch, PatchParams}, @@ -40,11 +40,13 @@ async fn main() -> anyhow::Result<()> { // (1): Create a store & have it transform the stream to return arcs let writer = Writer::::new(Default::default()); let reader = writer.as_reader(); - let reflector = shared_reflector(writer, watcher(pods.clone(), Default::default())); - + let root = watcher(pods.clone(), Default::default()) + .default_backoff() + .reflect_shared(writer); + let dup = root.subscribe().map(|obj| Ok(obj)).applied_objects(); tokio::spawn( - Controller::for_shared_stream(reflector.applied_objects(), reader, ()) + Controller::for_shared_stream(dup, reader, ()) .with_config(config.clone()) .shutdown_on_signal() .run( @@ -65,7 +67,7 @@ async fn main() -> anyhow::Result<()> { let reader2 = writer2.as_reader(); let reflector2 = shared_reflector(writer2, watcher(pods.clone(), Default::default())); - Controller::for_shared_stream(reflector2.applied_objects(), reader2, ()) + Controller::for_shared_stream(root, reader2, ()) .with_config(config) .shutdown_on_signal() .run( diff --git a/kube-runtime/src/controller/mod.rs b/kube-runtime/src/controller/mod.rs index 5b928932c..92a4d2660 100644 --- a/kube-runtime/src/controller/mod.rs +++ b/kube-runtime/src/controller/mod.rs @@ -719,12 +719,12 @@ where } pub fn for_shared_stream( - trigger: impl Stream, watcher::Error>> + Send + 'static, + trigger: impl Stream> + Send + 'static, reader: Store, dyntype: K::DynamicType, ) -> Self { let mut trigger_selector = stream::SelectAll::new(); - let self_watcher = trigger_self_shared(trigger, dyntype.clone()).boxed(); + let self_watcher = trigger_self_shared(trigger.map(|obj| Ok(obj)), dyntype.clone()).boxed(); trigger_selector.push(self_watcher); Self { trigger_selector, diff --git a/kube-runtime/src/utils/reflect.rs b/kube-runtime/src/utils/reflect.rs index 43fa65c2a..f0f528aab 100644 --- a/kube-runtime/src/utils/reflect.rs +++ b/kube-runtime/src/utils/reflect.rs @@ -2,13 +2,15 @@ use core::{ pin::Pin, task::{Context, Poll}, }; +use std::sync::Arc; -use futures::{Stream, TryStream}; +use futures::{poll, ready, stream, Stream, TryStream}; use pin_project::pin_project; +use tokio::sync::broadcast; use crate::{ - reflector::store::Writer, - watcher::{Error, Event}, + reflector::{store::Writer, Store}, + watcher::{self, Error, Event}, }; use kube_client::Resource; @@ -52,6 +54,75 @@ where } } +#[pin_project] +#[must_use = "subscribers will not get events unless this stream is polled"] +pub struct ReflectShared +where + St: Stream, + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + #[pin] + stream: St, + writer: Writer, + + tx: broadcast::Sender>>>, +} + +impl ReflectShared +where + St: TryStream>, + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + pub(super) fn new(stream: St, writer: Writer) -> ReflectShared { + let (tx, _) = broadcast::channel(10); + Self { stream, writer, tx } + } + + pub fn subscribe(&self) -> impl Stream>> { + stream::unfold(self.tx.subscribe(), |mut rx| async { + loop { + match rx.recv().await { + Ok(Some(ev)) => return Some((ev, rx)), + Err(broadcast::error::RecvError::Lagged(count)) => { + tracing::error!("stream lagged, skipped {count} events"); + continue; + } + _ => return None, + } + } + }) + } +} + +impl Stream for ReflectShared +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone, + St: Stream, Error>>, +{ + type Item = Event>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut me = self.project(); + match me.stream.as_mut().poll_next(cx) { + Poll::Ready(Some(Ok(ev))) => { + let ev = me.writer.apply_with_arc(ev); + me.tx.send(Some(ev.clone())).ok(); + Poll::Ready(Some(ev)) + } + Poll::Ready(Some(Err(error))) => Poll::Pending, + Poll::Ready(None) => { + me.tx.send(None).ok(); + Poll::Ready(None) + } + Poll::Pending => Poll::Pending, + } + } +} + + #[cfg(test)] pub(crate) mod test { use std::{task::Poll, vec}; diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index 77c4c16f1..987b85365 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - #[cfg(feature = "unstable-runtime-predicates")] use crate::utils::predicate::{Predicate, PredicateFilter}; #[cfg(feature = "unstable-runtime-subscribe")] @@ -16,6 +14,8 @@ use crate::watcher::DefaultBackoff; use backoff::backoff::Backoff; use futures::{Stream, TryStream}; +use super::reflect::ReflectShared; + /// Extension trait for streams returned by [`watcher`](watcher()) or [`reflector`](crate::reflector::reflector) pub trait WatchStreamExt: Stream { /// Apply the [`DefaultBackoff`] watcher [`Backoff`] policy @@ -191,6 +191,7 @@ pub trait WatchStreamExt: Stream { fn stream_subscribe(self) -> StreamSubscribe where Self: Stream, watcher::Error>> + Send + Sized + 'static, + K: Clone, { StreamSubscribe::new(self) } @@ -249,6 +250,15 @@ pub trait WatchStreamExt: Stream { { Reflect::new(self, writer) } + + fn reflect_shared(self, writer: Writer) -> ReflectShared + where + Self: Stream, watcher::Error>> + Sized, + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, + { + ReflectShared::new(self, writer) + } } impl WatchStreamExt for St where St: Stream {} From af7a3093c76fb9da3a00d447b6d2419e23a35cda Mon Sep 17 00:00:00 2001 From: Matei David Date: Thu, 29 Feb 2024 11:33:02 +0000 Subject: [PATCH 06/36] Working shared stream Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 10 +++----- kube-runtime/src/controller/mod.rs | 34 ++++++++++++++++++++++++ kube-runtime/src/utils/reflect.rs | 37 ++++++++++++++------------- 3 files changed, 56 insertions(+), 25 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 2d4eda7bb..419204e2c 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -43,10 +43,10 @@ async fn main() -> anyhow::Result<()> { let root = watcher(pods.clone(), Default::default()) .default_backoff() .reflect_shared(writer); - let dup = root.subscribe().map(|obj| Ok(obj)).applied_objects(); + let dup = root.subscribe(); tokio::spawn( - Controller::for_shared_stream(dup, reader, ()) + Controller::for_stream(root.applied_objects(), reader.clone()) .with_config(config.clone()) .shutdown_on_signal() .run( @@ -63,11 +63,7 @@ async fn main() -> anyhow::Result<()> { ); // (2): we can't share streams yet so we just use the same primitives - let writer2 = Writer::::new(Default::default()); - let reader2 = writer2.as_reader(); - let reflector2 = shared_reflector(writer2, watcher(pods.clone(), Default::default())); - - Controller::for_shared_stream(root, reader2, ()) + Controller::for_shared_ref(dup, reader, ()) .with_config(config) .shutdown_on_signal() .run( diff --git a/kube-runtime/src/controller/mod.rs b/kube-runtime/src/controller/mod.rs index 92a4d2660..8984e747c 100644 --- a/kube-runtime/src/controller/mod.rs +++ b/kube-runtime/src/controller/mod.rs @@ -718,6 +718,40 @@ where } } + pub fn for_shared_ref( + trigger: impl Stream> + Send + 'static, + reader: Store, + dyntype: K::DynamicType, + ) -> Self { + let mut trigger_selector = stream::SelectAll::new(); + trigger_selector.push( + trigger + .map(move |obj| { + Ok(ReconcileRequest { + obj_ref: obj, + reason: ReconcileReason::Unknown, + }) + }) + .boxed(), + ); + + Self { + trigger_selector, + trigger_backoff: Box::::default(), + graceful_shutdown_selector: vec![ + // Fallback future, ensuring that we never terminate if no additional futures are added to the selector + future::pending().boxed(), + ], + forceful_shutdown_selector: vec![ + // Fallback future, ensuring that we never terminate if no additional futures are added to the selector + future::pending().boxed(), + ], + dyntype, + reader, + config: Default::default(), + } + } + pub fn for_shared_stream( trigger: impl Stream> + Send + 'static, reader: Store, diff --git a/kube-runtime/src/utils/reflect.rs b/kube-runtime/src/utils/reflect.rs index f0f528aab..44d59d6c8 100644 --- a/kube-runtime/src/utils/reflect.rs +++ b/kube-runtime/src/utils/reflect.rs @@ -9,7 +9,7 @@ use pin_project::pin_project; use tokio::sync::broadcast; use crate::{ - reflector::{store::Writer, Store}, + reflector::{store::Writer, ObjectRef, Store}, watcher::{self, Error, Event}, }; use kube_client::Resource; @@ -66,7 +66,7 @@ where stream: St, writer: Writer, - tx: broadcast::Sender>>>, + tx: broadcast::Sender>>, } impl ReflectShared @@ -80,7 +80,7 @@ where Self { stream, writer, tx } } - pub fn subscribe(&self) -> impl Stream>> { + pub fn subscribe(&self) -> impl Stream> { stream::unfold(self.tx.subscribe(), |mut rx| async { loop { match rx.recv().await { @@ -99,26 +99,27 @@ where impl Stream for ReflectShared where K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, St: Stream, Error>>, { - type Item = Event>; + type Item = St::Item; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut me = self.project(); - match me.stream.as_mut().poll_next(cx) { - Poll::Ready(Some(Ok(ev))) => { - let ev = me.writer.apply_with_arc(ev); - me.tx.send(Some(ev.clone())).ok(); - Poll::Ready(Some(ev)) - } - Poll::Ready(Some(Err(error))) => Poll::Pending, - Poll::Ready(None) => { - me.tx.send(None).ok(); - Poll::Ready(None) - } - Poll::Pending => Poll::Pending, - } + me.stream.as_mut().poll_next(cx).map_ok(move |event| { + me.writer.apply_watcher_event(&event); + match &event { + Event::Applied(obj) | Event::Deleted(obj) => { + me.tx.send(Some(ObjectRef::from_obj(obj))).ok(); + } + Event::Restarted(obj_list) => { + for obj in obj_list.iter().map(ObjectRef::from_obj) { + me.tx.send(Some(obj)).ok(); + } + } + }; + event + }) } } From 8d4d6945f2eeb4d46fd3b89a13607d448bfd83f6 Mon Sep 17 00:00:00 2001 From: Matei David Date: Thu, 29 Feb 2024 12:20:03 +0000 Subject: [PATCH 07/36] Different way of doing it Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 8 +-- kube-runtime/src/controller/mod.rs | 2 +- kube-runtime/src/reflector/store.rs | 20 ++++++++ kube-runtime/src/utils/reflect.rs | 71 +++++++++++---------------- kube-runtime/src/utils/watch_ext.rs | 10 ++-- 5 files changed, 61 insertions(+), 50 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 419204e2c..0fa5e3674 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -42,11 +42,11 @@ async fn main() -> anyhow::Result<()> { let reader = writer.as_reader(); let root = watcher(pods.clone(), Default::default()) .default_backoff() - .reflect_shared(writer); - let dup = root.subscribe(); + .reflect(writer); + let dup = root.subscribe().reflect_shared(reader.clone()); tokio::spawn( - Controller::for_stream(root.applied_objects(), reader.clone()) + Controller::for_stream(root.applied_objects(), reader) .with_config(config.clone()) .shutdown_on_signal() .run( @@ -63,7 +63,7 @@ async fn main() -> anyhow::Result<()> { ); // (2): we can't share streams yet so we just use the same primitives - Controller::for_shared_ref(dup, reader, ()) + Controller::for_shared_stream(dup.reader(), dup, ()) .with_config(config) .shutdown_on_signal() .run( diff --git a/kube-runtime/src/controller/mod.rs b/kube-runtime/src/controller/mod.rs index 8984e747c..0d5781eb7 100644 --- a/kube-runtime/src/controller/mod.rs +++ b/kube-runtime/src/controller/mod.rs @@ -753,8 +753,8 @@ where } pub fn for_shared_stream( - trigger: impl Stream> + Send + 'static, reader: Store, + trigger: impl Stream> + Send + 'static, dyntype: K::DynamicType, ) -> Self { let mut trigger_selector = stream::SelectAll::new(); diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index c44eb9c0f..6cc86cda0 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -5,10 +5,12 @@ use crate::{ }; use ahash::AHashMap; use derivative::Derivative; +use futures::{stream, Stream}; use kube_client::Resource; use parking_lot::RwLock; use std::{fmt::Debug, hash::Hash, sync::Arc}; use thiserror::Error; +use tokio::sync::broadcast; type Cache = Arc, Arc>>>; @@ -25,6 +27,7 @@ where dyntype: K::DynamicType, ready_tx: Option>, ready_rx: Arc>, + pub(crate) broadcast_tx: broadcast::Sender>>, } impl Writer @@ -37,11 +40,13 @@ where /// `k8s_openapi` types) you can use `Default` instead. pub fn new(dyntype: K::DynamicType) -> Self { let (ready_tx, ready_rx) = DelayedInit::new(); + let (broadcast_tx, _) = broadcast::channel(10); Writer { store: Default::default(), dyntype, ready_tx: Some(ready_tx), ready_rx: Arc::new(ready_rx), + broadcast_tx, } } @@ -57,6 +62,21 @@ where } } + pub(crate) fn subscribe(&self) -> impl Stream> { + stream::unfold(self.broadcast_tx.subscribe(), |mut rx| async { + loop { + match rx.recv().await { + Ok(Some(ev)) => return Some((ev, rx)), + Err(broadcast::error::RecvError::Lagged(count)) => { + tracing::error!("stream lagged, skipped {count} events"); + continue; + } + _ => return None, + } + } + }) + } + /// Applies a single watcher event to the store pub fn apply_watcher_event(&mut self, event: &watcher::Event) { match event { diff --git a/kube-runtime/src/utils/reflect.rs b/kube-runtime/src/utils/reflect.rs index 44d59d6c8..7ff4e13f2 100644 --- a/kube-runtime/src/utils/reflect.rs +++ b/kube-runtime/src/utils/reflect.rs @@ -35,12 +35,16 @@ where pub(super) fn new(stream: St, writer: Writer) -> Reflect { Self { stream, writer } } + + pub fn subscribe(&self) -> impl Stream> { + self.writer.subscribe() + } } impl Stream for Reflect where K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, St: Stream, Error>>, { type Item = Result, Error>; @@ -49,14 +53,23 @@ where let mut me = self.project(); me.stream.as_mut().poll_next(cx).map_ok(move |event| { me.writer.apply_watcher_event(&event); + match &event { + Event::Applied(obj) | Event::Deleted(obj) => { + me.writer.broadcast_tx.send(Some(ObjectRef::from_obj(obj))).ok(); + } + Event::Restarted(obj_list) => { + for obj in obj_list.iter().map(ObjectRef::from_obj) { + me.writer.broadcast_tx.send(Some(obj)).ok(); + } + } + }; event }) } } #[pin_project] -#[must_use = "subscribers will not get events unless this stream is polled"] -pub struct ReflectShared +pub struct ReflectHandle where St: Stream, K: Resource + Clone + 'static, @@ -64,62 +77,38 @@ where { #[pin] stream: St, - writer: Writer, - - tx: broadcast::Sender>>, + reader: Store, } -impl ReflectShared +impl ReflectHandle where - St: TryStream>, + St: Stream>, K: Resource + Clone, K::DynamicType: Eq + std::hash::Hash + Clone, { - pub(super) fn new(stream: St, writer: Writer) -> ReflectShared { - let (tx, _) = broadcast::channel(10); - Self { stream, writer, tx } + pub(super) fn new(stream: St, reader: Store) -> ReflectHandle { + Self { stream, reader } } - pub fn subscribe(&self) -> impl Stream> { - stream::unfold(self.tx.subscribe(), |mut rx| async { - loop { - match rx.recv().await { - Ok(Some(ev)) => return Some((ev, rx)), - Err(broadcast::error::RecvError::Lagged(count)) => { - tracing::error!("stream lagged, skipped {count} events"); - continue; - } - _ => return None, - } - } - }) + pub fn reader(&self) -> Store { + self.reader.clone() } } -impl Stream for ReflectShared +impl Stream for ReflectHandle where K: Resource + Clone, K::DynamicType: Eq + std::hash::Hash + Clone + Default, - St: Stream, Error>>, + St: Stream>, { - type Item = St::Item; + type Item = Arc; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut me = self.project(); - me.stream.as_mut().poll_next(cx).map_ok(move |event| { - me.writer.apply_watcher_event(&event); - match &event { - Event::Applied(obj) | Event::Deleted(obj) => { - me.tx.send(Some(ObjectRef::from_obj(obj))).ok(); - } - Event::Restarted(obj_list) => { - for obj in obj_list.iter().map(ObjectRef::from_obj) { - me.tx.send(Some(obj)).ok(); - } - } - }; - event - }) + match ready!(me.stream.as_mut().poll_next(cx)) { + Some(obj_ref) => Poll::Ready(me.reader.get(&obj_ref)), + None => Poll::Ready(None), + } } } diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index 987b85365..ea4952bb5 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -3,6 +3,7 @@ use crate::utils::predicate::{Predicate, PredicateFilter}; #[cfg(feature = "unstable-runtime-subscribe")] use crate::utils::stream_subscribe::StreamSubscribe; use crate::{ + reflector::{ObjectRef, Store}, utils::{event_flatten::EventFlatten, event_modify::EventModify, stream_backoff::StreamBackoff}, watcher, }; @@ -14,7 +15,8 @@ use crate::watcher::DefaultBackoff; use backoff::backoff::Backoff; use futures::{Stream, TryStream}; -use super::reflect::ReflectShared; +use super::reflect::ReflectHandle; + /// Extension trait for streams returned by [`watcher`](watcher()) or [`reflector`](crate::reflector::reflector) pub trait WatchStreamExt: Stream { @@ -251,13 +253,13 @@ pub trait WatchStreamExt: Stream { Reflect::new(self, writer) } - fn reflect_shared(self, writer: Writer) -> ReflectShared + fn reflect_shared(self, reader: Store) -> ReflectHandle where - Self: Stream, watcher::Error>> + Sized, + Self: Stream> + Sized, K: Resource + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone, { - ReflectShared::new(self, writer) + ReflectHandle::new(self, reader) } } From 85347702830bf622d041e85699be9bcfeb160d31 Mon Sep 17 00:00:00 2001 From: Matei David Date: Sat, 2 Mar 2024 12:24:36 +0000 Subject: [PATCH 08/36] Switch to async_broadcast Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 122 ++++++++++++++++-------- kube-runtime/src/controller/mod.rs | 45 +++------ kube-runtime/src/reflector/mod.rs | 14 +-- kube-runtime/src/reflector/store.rs | 20 ---- kube-runtime/src/utils/reflect.rs | 128 ++++++++++++++++++++------ kube-runtime/src/utils/watch_ext.rs | 14 ++- 6 files changed, 209 insertions(+), 134 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 0fa5e3674..7cf13ccea 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -1,18 +1,15 @@ use std::{sync::Arc, time::Duration}; -use futures::{Stream, StreamExt, TryStream, TryStreamExt}; +use futures::StreamExt; use k8s_openapi::api::core::v1::{Pod, PodCondition, PodStatus}; use kube::{ api::{Patch, PatchParams}, core::ObjectMeta, - runtime::{ - controller::Action, - reflector::{shared_reflector, store::Writer}, - watcher, Config, Controller, WatchStreamExt, - }, + runtime::{controller::Action, reflector::store::Writer, watcher, Config, Controller, WatchStreamExt}, Api, Client, ResourceExt, }; -use tracing::{info, warn}; +use tokio::signal::unix::{signal, SignalKind}; +use tracing::{info, info_span, warn, Instrument}; use thiserror::Error; @@ -29,41 +26,76 @@ struct Data { #[tokio::main] async fn main() -> anyhow::Result<()> { tracing_subscriber::fmt::init(); - let client = Client::try_default().await?; + let client = Client::try_default().await?; let pods = Api::::namespaced(client.clone(), "default"); - - // ? let config = Config::default().concurrency(2); let ctx = Arc::new(Data { client }); - // (1): Create a store & have it transform the stream to return arcs + // (1): create a store let writer = Writer::::new(Default::default()); - let reader = writer.as_reader(); - let root = watcher(pods.clone(), Default::default()) - .default_backoff() - .reflect(writer); - let dup = root.subscribe().reflect_shared(reader.clone()); + // (2): split the stream: + // - create a handle that can be cloned to get more readers + // - pass through events from root stream through a reflector + // + // Note: if we wanted to, we could apply a backoff _before_ we spill into the reflector + let (subscriber, reflector) = watcher(pods.clone(), Default::default()).reflect_shared(writer, 256); + + // (3): schedule the root stream with the runtime + // - apply a backoff to the root stream + // - poll it to handle errors + // scheduling with the runtime ensures the stream will be polled continously and allow + // readers to make progress. tokio::spawn( - Controller::for_stream(root.applied_objects(), reader) - .with_config(config.clone()) - .shutdown_on_signal() - .run( - reconcile_metadata, - |_, _, _| Action::requeue(Duration::from_secs(1)), - ctx.clone(), - ) - .for_each(|res| async move { - match res { - Ok(v) => info!("reconciled {v:?}"), - Err(error) => warn!(%error, "failed to reconcile object"), + async move { + // Pin on the heap so we don't overflow our stack + // Put a backoff on it. + // - Depending on how we want to handle backpressure, the backoff could help to relax + // the flow of data + // i.e. the root stream has a buffer that objects get put into. When an object is in the + // buffer, it is cloned and sent to all readers. Once all readers have acked their copy, + // the item is removed from the buffer. + // + // A backoff here could ensure that when the buffer is full, we backpressure in the root + // stream by not consuming watcher output. We give clients enough time to make progress and + // ensure the next time the root stream is polled it can make progress by pushing into the + // buffer. + let mut reflector = reflector.default_backoff().boxed(); + tracing::info!("Polling root"); + while let Some(next) = reflector.next().await { + match next { + Err(error) => tracing::error!(%error, "Received error from main watcher stream"), + _ => {} } - }), + } + } + .instrument(info_span!("root_stream")), ); - // (2): we can't share streams yet so we just use the same primitives - Controller::for_shared_stream(dup.reader(), dup, ()) + + // Create metadata controller to edit annotations + let reader = subscriber.reader(); + let metadata_controller = Controller::for_shared_stream(subscriber.clone(), reader) + .with_config(config.clone()) + .shutdown_on_signal() + .run( + reconcile_metadata, + |_, _, _| Action::requeue(Duration::from_secs(1)), + ctx.clone(), + ) + .for_each(|res| async move { + match res { + Ok(v) => info!("Reconciled {v:?}"), + Err(error) => warn!(%error, "Failed to reconcile object"), + } + }) + .instrument(info_span!("metadata_controller")); + tokio::spawn(metadata_controller); + + // Create status controller + let reader = subscriber.reader(); + let status_controller = Controller::for_shared_stream(subscriber, reader) .with_config(config) .shutdown_on_signal() .run( @@ -73,14 +105,32 @@ async fn main() -> anyhow::Result<()> { ) .for_each(|res| async move { match res { - Ok(v) => info!("reconcile status for {v:?}"), - Err(error) => warn!(%error, "failed to reconcile status for object"), + Ok(v) => info!("Reconciled {v:?}"), + Err(error) => warn!(%error, "Failed to reconcile object"), } }) - .await; + .instrument(info_span!("status_controller")); + tokio::spawn(status_controller); - // (3): Figure out how to use the same store and create a shared stream from - // the shared reflector :) + // Handle shutdown + // + // In a more nicely put together example we'd want to actually drain everything + // instead of having controllers manage signals on their own + // + // The lack of a drain abstraction atm made me skip it but when the example is ready we should + // consider handling shutdowns well to help users out + let mut interrupt = signal(SignalKind::interrupt())?; + let mut terminate = signal(SignalKind::terminate())?; + tokio::select! { + _ = interrupt.recv() => { + info!("Received SIGINT; terminating..."); + }, + + _ = terminate.recv() => { + info!("Received SIGTERM; terminating..."); + } + + } Ok(()) } diff --git a/kube-runtime/src/controller/mod.rs b/kube-runtime/src/controller/mod.rs index 0d5781eb7..52de72544 100644 --- a/kube-runtime/src/controller/mod.rs +++ b/kube-runtime/src/controller/mod.rs @@ -718,43 +718,20 @@ where } } - pub fn for_shared_ref( - trigger: impl Stream> + Send + 'static, - reader: Store, - dyntype: K::DynamicType, - ) -> Self { - let mut trigger_selector = stream::SelectAll::new(); - trigger_selector.push( - trigger - .map(move |obj| { - Ok(ReconcileRequest { - obj_ref: obj, - reason: ReconcileReason::Unknown, - }) - }) - .boxed(), - ); - - Self { - trigger_selector, - trigger_backoff: Box::::default(), - graceful_shutdown_selector: vec![ - // Fallback future, ensuring that we never terminate if no additional futures are added to the selector - future::pending().boxed(), - ], - forceful_shutdown_selector: vec![ - // Fallback future, ensuring that we never terminate if no additional futures are added to the selector - future::pending().boxed(), - ], - dyntype, - reader, - config: Default::default(), - } + // TODO: do an entrypoint for shared streams of owned objects + // + // Is it better to use a concrete type (i.e. a SubscribeHandle as a trigger) + // or to pass in the reader out-of-band? + pub fn for_shared_stream(trigger: impl Stream> + Send + 'static, reader: Store) -> Self + where + K::DynamicType: Default, + { + Self::for_shared_stream_with(trigger, reader, Default::default()) } - pub fn for_shared_stream( - reader: Store, + pub fn for_shared_stream_with( trigger: impl Stream> + Send + 'static, + reader: Store, dyntype: K::DynamicType, ) -> Self { let mut trigger_selector = stream::SelectAll::new(); diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index 7c6514062..d0a724b53 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -7,7 +7,7 @@ pub use self::object_ref::{Extra as ObjectRefExtra, ObjectRef}; use crate::watcher; use futures::{Stream, TryStreamExt}; use kube_client::Resource; -use std::{hash::Hash, sync::Arc}; +use std::hash::Hash; pub use store::{store, Store}; /// Cache objects from a [`watcher()`] stream into a local [`Store`] @@ -98,18 +98,6 @@ where stream.inspect_ok(move |event| writer.apply_watcher_event(event)) } -pub fn shared_reflector( - mut writer: store::Writer, - stream: W, -) -> impl Stream>>> -where - K: Resource + Clone, - K::DynamicType: Eq + Hash + Clone, - W: Stream>>, -{ - stream.map_ok(move |event| writer.apply_with_arc(event)) -} - #[cfg(test)] mod tests { use super::{reflector, store, ObjectRef}; diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index 6cc86cda0..c44eb9c0f 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -5,12 +5,10 @@ use crate::{ }; use ahash::AHashMap; use derivative::Derivative; -use futures::{stream, Stream}; use kube_client::Resource; use parking_lot::RwLock; use std::{fmt::Debug, hash::Hash, sync::Arc}; use thiserror::Error; -use tokio::sync::broadcast; type Cache = Arc, Arc>>>; @@ -27,7 +25,6 @@ where dyntype: K::DynamicType, ready_tx: Option>, ready_rx: Arc>, - pub(crate) broadcast_tx: broadcast::Sender>>, } impl Writer @@ -40,13 +37,11 @@ where /// `k8s_openapi` types) you can use `Default` instead. pub fn new(dyntype: K::DynamicType) -> Self { let (ready_tx, ready_rx) = DelayedInit::new(); - let (broadcast_tx, _) = broadcast::channel(10); Writer { store: Default::default(), dyntype, ready_tx: Some(ready_tx), ready_rx: Arc::new(ready_rx), - broadcast_tx, } } @@ -62,21 +57,6 @@ where } } - pub(crate) fn subscribe(&self) -> impl Stream> { - stream::unfold(self.broadcast_tx.subscribe(), |mut rx| async { - loop { - match rx.recv().await { - Ok(Some(ev)) => return Some((ev, rx)), - Err(broadcast::error::RecvError::Lagged(count)) => { - tracing::error!("stream lagged, skipped {count} events"); - continue; - } - _ => return None, - } - } - }) - } - /// Applies a single watcher event to the store pub fn apply_watcher_event(&mut self, event: &watcher::Event) { match event { diff --git a/kube-runtime/src/utils/reflect.rs b/kube-runtime/src/utils/reflect.rs index 7ff4e13f2..6d44f48dd 100644 --- a/kube-runtime/src/utils/reflect.rs +++ b/kube-runtime/src/utils/reflect.rs @@ -4,14 +4,14 @@ use core::{ }; use std::sync::Arc; -use futures::{poll, ready, stream, Stream, TryStream}; +use futures::{ready, Future, Stream, TryStream}; use pin_project::pin_project; -use tokio::sync::broadcast; use crate::{ reflector::{store::Writer, ObjectRef, Store}, - watcher::{self, Error, Event}, + watcher::{Error, Event}, }; +use async_broadcast::{InactiveReceiver, Receiver, Sender}; use kube_client::Resource; /// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method @@ -35,10 +35,6 @@ where pub(super) fn new(stream: St, writer: Writer) -> Reflect { Self { stream, writer } } - - pub fn subscribe(&self) -> impl Stream> { - self.writer.subscribe() - } } impl Stream for Reflect @@ -53,41 +49,120 @@ where let mut me = self.project(); me.stream.as_mut().poll_next(cx).map_ok(move |event| { me.writer.apply_watcher_event(&event); - match &event { - Event::Applied(obj) | Event::Deleted(obj) => { - me.writer.broadcast_tx.send(Some(ObjectRef::from_obj(obj))).ok(); - } - Event::Restarted(obj_list) => { - for obj in obj_list.iter().map(ObjectRef::from_obj) { - me.writer.broadcast_tx.send(Some(obj)).ok(); - } - } - }; event }) } } +/// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method #[pin_project] -pub struct ReflectHandle +pub struct SharedReflect where - St: Stream, K: Resource + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone, { #[pin] stream: St, + writer: Writer, + tx: Sender>, + rx: InactiveReceiver>, +} + +impl SharedReflect +where + St: TryStream>, + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> SharedReflect { + let (tx, rx) = async_broadcast::broadcast(buf_size); + Self { + stream, + writer, + tx, + rx: rx.deactivate(), + } + } + + pub fn subscribe(&self) -> SubscribeHandle { + // Note: broadcast::Sender::new_receiver() will return a new receiver + // that _will not_ replay any messages in the channel, effectively + // starting from the latest message. + // + // Since we create a reader and a writer when calling reflect_shared() + // this should be fine. All subsequent clones should go through + // SubscribeHandle::clone() to get a receiver that replays all of the + // messages in the channel. + SubscribeHandle::new(self.writer.as_reader(), self.tx.new_receiver()) + } +} + +impl Stream for SharedReflect +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, + St: Stream, Error>>, +{ + type Item = Result, Error>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut me = self.project(); + let next = me.stream.as_mut().poll_next(cx).map_ok(move |event| { + me.writer.apply_watcher_event(&event); + event + }); + let ev = match ready!(next) { + Some(Ok(event)) => event, + None => return Poll::Ready(None), + Some(Err(error)) => return Poll::Ready(Some(Err(error))), + }; + + match &ev { + Event::Applied(obj) | Event::Deleted(obj) => { + // No error handling for now + // Future resolves to a Result> if explicitly marked + // as non-blocking + let _ = ready!(me.tx.broadcast(ObjectRef::from_obj(obj)).as_mut().poll(cx)); + } + Event::Restarted(obj_list) => { + for obj in obj_list.iter().map(ObjectRef::from_obj) { + let _ = ready!(me.tx.broadcast(obj).as_mut().poll(cx)); + } + } + } + + Poll::Ready(Some(Ok(ev))) + } +} + +#[pin_project] +pub struct SubscribeHandle +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + #[pin] + rx: Receiver>, reader: Store, } -impl ReflectHandle +impl Clone for SubscribeHandle +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + fn clone(&self) -> Self { + SubscribeHandle::new(self.reader.clone(), self.rx.clone()) + } +} + +impl SubscribeHandle where - St: Stream>, K: Resource + Clone, K::DynamicType: Eq + std::hash::Hash + Clone, { - pub(super) fn new(stream: St, reader: Store) -> ReflectHandle { - Self { stream, reader } + pub(super) fn new(reader: Store, rx: Receiver>) -> SubscribeHandle { + Self { reader, rx } } pub fn reader(&self) -> Store { @@ -95,17 +170,18 @@ where } } -impl Stream for ReflectHandle +impl Stream for SubscribeHandle where K: Resource + Clone, K::DynamicType: Eq + std::hash::Hash + Clone + Default, - St: Stream>, { type Item = Arc; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut me = self.project(); - match ready!(me.stream.as_mut().poll_next(cx)) { + // If we use try_recv() here we could return Poll::Ready(Error) and let + // the controller's trigger_backoff come into play (?) + match ready!(me.rx.as_mut().poll_next(cx)) { Some(obj_ref) => Poll::Ready(me.reader.get(&obj_ref)), None => Poll::Ready(None), } diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index ea4952bb5..b485d2092 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -3,7 +3,6 @@ use crate::utils::predicate::{Predicate, PredicateFilter}; #[cfg(feature = "unstable-runtime-subscribe")] use crate::utils::stream_subscribe::StreamSubscribe; use crate::{ - reflector::{ObjectRef, Store}, utils::{event_flatten::EventFlatten, event_modify::EventModify, stream_backoff::StreamBackoff}, watcher, }; @@ -15,7 +14,7 @@ use crate::watcher::DefaultBackoff; use backoff::backoff::Backoff; use futures::{Stream, TryStream}; -use super::reflect::ReflectHandle; +use super::reflect::{SharedReflect, SubscribeHandle}; /// Extension trait for streams returned by [`watcher`](watcher()) or [`reflector`](crate::reflector::reflector) @@ -253,13 +252,18 @@ pub trait WatchStreamExt: Stream { Reflect::new(self, writer) } - fn reflect_shared(self, reader: Store) -> ReflectHandle + fn reflect_shared( + self, + writer: Writer, + buf_size: usize, + ) -> (SubscribeHandle, SharedReflect) where - Self: Stream> + Sized, + Self: Stream>> + Sized, K: Resource + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone, { - ReflectHandle::new(self, reader) + let reflect = SharedReflect::new(self, writer, buf_size); + (reflect.subscribe(), reflect) } } From 9bbe8e12ded2605c9c38844a038d2f7175e7da7a Mon Sep 17 00:00:00 2001 From: Matei David Date: Sat, 2 Mar 2024 12:27:56 +0000 Subject: [PATCH 09/36] Remove old, unused code Signed-off-by: Matei David --- kube-runtime/Cargo.toml | 1 + kube-runtime/src/reflector/store.rs | 38 ----------------------------- 2 files changed, 1 insertion(+), 38 deletions(-) diff --git a/kube-runtime/Cargo.toml b/kube-runtime/Cargo.toml index fb60411e5..3784f2773 100644 --- a/kube-runtime/Cargo.toml +++ b/kube-runtime/Cargo.toml @@ -44,6 +44,7 @@ thiserror = "1.0.29" backoff = "0.4.0" async-trait = "0.1.64" hashbrown = "0.14.0" +async-broadcast = "0.7.0" [dependencies.k8s-openapi] version = "0.21.0" diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index c44eb9c0f..9085ab69b 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -88,44 +88,6 @@ where ready_tx.init(()) } } - - /// Applies a single watcher event to the store - pub fn apply_with_arc(&mut self, event: watcher::Event) -> watcher::Event> { - let ev = match event { - watcher::Event::Applied(obj) => { - let key = ObjectRef::from_obj_with(&obj, self.dyntype.clone()); - let obj = Arc::new(obj.clone()); - self.store.write().insert(key, obj.clone()); - watcher::Event::Applied(obj) - } - watcher::Event::Deleted(obj) => { - let key = ObjectRef::from_obj_with(&obj, self.dyntype.clone()); - self.store.write().remove(&key); - watcher::Event::Deleted(Arc::new(obj)) - } - watcher::Event::Restarted(new_objs) => { - let new_objs = new_objs - .iter() - .map(|obj| { - ( - ObjectRef::from_obj_with(obj, self.dyntype.clone()), - Arc::new(obj.clone()), - ) - }) - .collect::>(); - let objs_arced = new_objs.values().map(|obj| obj.to_owned()).collect(); - *self.store.write() = new_objs; - watcher::Event::Restarted(objs_arced) - } - }; - - // Mark as ready after the first event, "releasing" any calls to Store::wait_until_ready() - if let Some(ready_tx) = self.ready_tx.take() { - ready_tx.init(()) - } - - ev - } } impl Default for Writer where From 3f874ce60cfecce3eca24b3d19e1d61d7582acd0 Mon Sep 17 00:00:00 2001 From: Matei David Date: Sat, 2 Mar 2024 12:28:47 +0000 Subject: [PATCH 10/36] Remove unused examples Signed-off-by: Matei David --- examples/Cargo.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index d1b11886b..36c9d125f 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -218,10 +218,6 @@ path = "custom_client_trace.rs" name = "secret_syncer" path = "secret_syncer.rs" -[[example]] -name = "node_del" -path = "node_del.rs" - [[example]] name = "pod_shell_crossterm" path = "pod_shell_crossterm.rs" From 1e1e3477fc2e2681ec5130dc64df17eceff108df Mon Sep 17 00:00:00 2001 From: Matei David Date: Thu, 7 Mar 2024 20:01:09 +0000 Subject: [PATCH 11/36] Gotta state machine this stuff Signed-off-by: Matei David --- kube-runtime/src/utils/mod.rs | 2 + kube-runtime/src/utils/reflect.rs | 38 ++- kube-runtime/src/utils/reflect_subscribe.rs | 247 ++++++++++++++++++++ 3 files changed, 285 insertions(+), 2 deletions(-) create mode 100644 kube-runtime/src/utils/reflect_subscribe.rs diff --git a/kube-runtime/src/utils/mod.rs b/kube-runtime/src/utils/mod.rs index bdf85b227..4fdc3cec4 100644 --- a/kube-runtime/src/utils/mod.rs +++ b/kube-runtime/src/utils/mod.rs @@ -6,6 +6,7 @@ mod event_flatten; mod event_modify; #[cfg(feature = "unstable-runtime-predicates")] mod predicate; mod reflect; +mod reflect_subscribe; mod stream_backoff; #[cfg(feature = "unstable-runtime-subscribe")] pub mod stream_subscribe; mod watch_ext; @@ -16,6 +17,7 @@ pub use event_modify::EventModify; #[cfg(feature = "unstable-runtime-predicates")] pub use predicate::{predicates, Predicate, PredicateFilter}; pub use reflect::Reflect; +pub use reflect_subscribe::{SharedReflect, SubscribeHandle}; pub use stream_backoff::StreamBackoff; #[cfg(feature = "unstable-runtime-subscribe")] pub use stream_subscribe::StreamSubscribe; diff --git a/kube-runtime/src/utils/reflect.rs b/kube-runtime/src/utils/reflect.rs index 6d44f48dd..cd92fa58b 100644 --- a/kube-runtime/src/utils/reflect.rs +++ b/kube-runtime/src/utils/reflect.rs @@ -6,6 +6,7 @@ use std::sync::Arc; use futures::{ready, Future, Stream, TryStream}; use pin_project::pin_project; +use tokio::time; use crate::{ reflector::{store::Writer, ObjectRef, Store}, @@ -66,6 +67,10 @@ where writer: Writer, tx: Sender>, rx: InactiveReceiver>, + + #[pin] + sleep: time::Sleep, + deadline: time::Duration, } impl SharedReflect @@ -81,6 +86,8 @@ where writer, tx, rx: rx.deactivate(), + sleep: time::sleep(time::Duration::ZERO), + deadline: time::Duration::from_secs(2), } } @@ -117,20 +124,47 @@ where Some(Err(error)) => return Poll::Ready(Some(Err(error))), }; + let mut futures = Vec::new(); match &ev { Event::Applied(obj) | Event::Deleted(obj) => { // No error handling for now // Future resolves to a Result> if explicitly marked // as non-blocking - let _ = ready!(me.tx.broadcast(ObjectRef::from_obj(obj)).as_mut().poll(cx)); + futures.push(( + ObjectRef::from_obj(obj), + me.tx.broadcast(ObjectRef::from_obj(obj)), + )); + //let _ = ready!(me.tx.broadcast(ObjectRef::from_obj(obj)).as_mut().poll(cx)); } Event::Restarted(obj_list) => { for obj in obj_list.iter().map(ObjectRef::from_obj) { - let _ = ready!(me.tx.broadcast(obj).as_mut().poll(cx)); + futures.push((obj.clone(), me.tx.broadcast(obj))); + + //let _ = ready!(me.tx.broadcast(obj).as_mut().poll(cx)); + } + } + } + + tracing::info!("WE POLLING AGAIN?"); + me.sleep.as_mut().reset(time::Instant::now() + *me.deadline); + for (i, f) in futures.into_iter().enumerate() { + let obj = f.0; + let mut fut = f.1; + let name = obj.name; + tracing::info!("PROCESSING {name}, iteration {i}"); + match fut.as_mut().poll(cx) { + Poll::Ready(_) => { + tracing::info!("READY"); + } + Poll::Pending => { + tracing::info!("GOT POLLED"); + ready!(me.sleep.as_mut().poll(cx)); + tracing::info!("BACKING OFF..."); } } } + tracing::info!("Returned"); Poll::Ready(Some(Ok(ev))) } } diff --git a/kube-runtime/src/utils/reflect_subscribe.rs b/kube-runtime/src/utils/reflect_subscribe.rs new file mode 100644 index 000000000..7fdc84fa7 --- /dev/null +++ b/kube-runtime/src/utils/reflect_subscribe.rs @@ -0,0 +1,247 @@ +use core::{ + pin::Pin, + task::{Context, Poll}, +}; +use std::{collections::VecDeque, sync::Arc}; + +use futures::{ready, Future, Stream, TryStream}; +use pin_project::pin_project; +use tokio::time; + +use crate::{ + reflector::{store::Writer, ObjectRef, Store}, + watcher::{Error, Event}, +}; +use async_broadcast::{InactiveReceiver, Receiver, Sender}; +use kube_client::Resource; + + +/// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method +#[pin_project] +pub struct SharedReflect<'a, St, K> +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + #[pin] + stream: St, + writer: Writer, + tx: Sender>, + rx: InactiveReceiver>, + + #[pin] + state: BroadcastState<'a, K>, + deadline: time::Duration, +} + +#[pin_project(project = BroadcastStateProj)] +enum BroadcastState<'a, K> +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + Reading, + Writing { + #[pin] + sleep: time::Sleep, + #[pin] + send_fut: async_broadcast::Send<'a, ObjectRef>, + event: Event, + }, + WritingBuffered { + #[pin] + sleep: time::Sleep, + #[pin] + send_fut: async_broadcast::Send<'a, ObjectRef>, + items: VecDeque>, + event: Event, + }, +} + +impl<'a, St, K> SharedReflect<'a, St, K> +where + St: TryStream>, + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> SharedReflect<'a, St, K> { + let (tx, rx) = async_broadcast::broadcast(buf_size); + Self { + stream, + writer, + tx, + rx: rx.deactivate(), + state: BroadcastState::Reading, + deadline: time::Duration::from_secs(2), + } + } + + pub fn subscribe(&self) -> SubscribeHandle { + // Note: broadcast::Sender::new_receiver() will return a new receiver + // that _will not_ replay any messages in the channel, effectively + // starting from the latest message. + // + // Since we create a reader and a writer when calling reflect_shared() + // this should be fine. All subsequent clones should go through + // SubscribeHandle::clone() to get a receiver that replays all of the + // messages in the channel. + SubscribeHandle::new(self.writer.as_reader(), self.tx.new_receiver()) + } +} + +impl Stream for SharedReflect<'_, St, K> +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, + St: Stream, Error>>, +{ + type Item = Result, Error>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + loop { + match this.state.as_mut().project() { + // Continue reading + BroadcastStateProj::Reading => {} + BroadcastStateProj::Writing { + mut sleep, + mut send_fut, + event, + } => match send_fut.as_mut().poll(cx) { + Poll::Ready(_) => { + tracing::trace!("Broadcast value"); + this.state.set(BroadcastState::Reading); + return Poll::Ready(Some(Ok(event.clone()))); + } + Poll::Pending => { + ready!(sleep.as_mut().poll(cx)); + tracing::debug!("Stream is stuck, clear your buffer"); + sleep.as_mut().reset(time::Instant::now() + *this.deadline); + } + }, + BroadcastStateProj::WritingBuffered { + mut sleep, + mut send_fut, + items, + event, + } => match send_fut.as_mut().poll(cx) { + Poll::Ready(_) => { + let next = items.pop_front().map(|obj| this.tx.broadcast_direct(obj)); + let left = items.len(); + tracing::trace!(items_left = %left, "Broadcast buffered value"); + match next { + Some(next) => this.state.set(BroadcastState::WritingBuffered { + sleep: time::sleep(*this.deadline), + send_fut: next, + items: *items, + event: *event, + }), + Some(next) if items.is_empty() => this.state.set(BroadcastState::Writing { + sleep: time::sleep(*this.deadline), + send_fut: next, + event: *event, + }), + + None => {} + } + return Poll::Pending; + } + Poll::Pending => { + ready!(sleep.as_mut().poll(cx)); + tracing::debug!("Stream is stuck, clear your buffer"); + sleep.as_mut().reset(time::Instant::now() + *this.deadline); + } + }, + } + + + let next = this.stream.as_mut().poll_next(cx).map_ok(move |event| { + this.writer.apply_watcher_event(&event); + event + }); + + let ev = match ready!(next) { + Some(Ok(event)) => event, + None => return Poll::Ready(None), + Some(Err(error)) => return Poll::Ready(Some(Err(error))), + }; + + + match &ev { + Event::Applied(obj) | Event::Deleted(obj) => this.state.set(BroadcastState::Writing { + sleep: time::sleep(*this.deadline), + send_fut: this.tx.broadcast_direct(ObjectRef::from_obj(obj)), + event: ev, + }), + Event::Restarted(obj_list) => { + let mut obj_list = obj_list + .iter() + .map(ObjectRef::from_obj) + .collect::>>(); + let next = obj_list.pop_front().map(|obj| this.tx.broadcast_direct(obj)); + if let Some(next) = next { + this.state.set(BroadcastState::WritingBuffered { + sleep: time::sleep(*this.deadline), + send_fut: next, + items: obj_list, + event: ev, + }) + } + } + } + } + } +} + +#[pin_project] +pub struct SubscribeHandle +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + #[pin] + rx: Receiver>, + reader: Store, +} + +impl Clone for SubscribeHandle +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + fn clone(&self) -> Self { + SubscribeHandle::new(self.reader.clone(), self.rx.clone()) + } +} + +impl SubscribeHandle +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + pub(super) fn new(reader: Store, rx: Receiver>) -> SubscribeHandle { + Self { reader, rx } + } + + pub fn reader(&self) -> Store { + self.reader.clone() + } +} + +impl Stream for SubscribeHandle +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, +{ + type Item = Arc; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut me = self.project(); + // If we use try_recv() here we could return Poll::Ready(Error) and let + // the controller's trigger_backoff come into play (?) + match ready!(me.rx.as_mut().poll_next(cx)) { + Some(obj_ref) => Poll::Ready(me.reader.get(&obj_ref)), + None => Poll::Ready(None), + } + } +} From 15f6e1dba7e2355caf45610731b5275bef005cf4 Mon Sep 17 00:00:00 2001 From: Matei David Date: Fri, 8 Mar 2024 11:08:49 +0000 Subject: [PATCH 12/36] Take 1 with try_recv Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 2 +- kube-runtime/src/utils/reflect.rs | 174 +------------------- kube-runtime/src/utils/reflect_subscribe.rs | 165 ++++++++++--------- kube-runtime/src/utils/watch_ext.rs | 2 +- 4 files changed, 90 insertions(+), 253 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 7cf13ccea..0537708a5 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -40,7 +40,7 @@ async fn main() -> anyhow::Result<()> { // - pass through events from root stream through a reflector // // Note: if we wanted to, we could apply a backoff _before_ we spill into the reflector - let (subscriber, reflector) = watcher(pods.clone(), Default::default()).reflect_shared(writer, 256); + let (subscriber, reflector) = watcher(pods.clone(), Default::default()).reflect_shared(writer, 1); // (3): schedule the root stream with the runtime // - apply a backoff to the root stream diff --git a/kube-runtime/src/utils/reflect.rs b/kube-runtime/src/utils/reflect.rs index cd92fa58b..babf63e9a 100644 --- a/kube-runtime/src/utils/reflect.rs +++ b/kube-runtime/src/utils/reflect.rs @@ -2,17 +2,14 @@ use core::{ pin::Pin, task::{Context, Poll}, }; -use std::sync::Arc; -use futures::{ready, Future, Stream, TryStream}; +use futures::{Stream, TryStream}; use pin_project::pin_project; -use tokio::time; use crate::{ - reflector::{store::Writer, ObjectRef, Store}, + reflector::store::Writer, watcher::{Error, Event}, }; -use async_broadcast::{InactiveReceiver, Receiver, Sender}; use kube_client::Resource; /// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method @@ -55,173 +52,6 @@ where } } -/// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method -#[pin_project] -pub struct SharedReflect -where - K: Resource + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - #[pin] - stream: St, - writer: Writer, - tx: Sender>, - rx: InactiveReceiver>, - - #[pin] - sleep: time::Sleep, - deadline: time::Duration, -} - -impl SharedReflect -where - St: TryStream>, - K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> SharedReflect { - let (tx, rx) = async_broadcast::broadcast(buf_size); - Self { - stream, - writer, - tx, - rx: rx.deactivate(), - sleep: time::sleep(time::Duration::ZERO), - deadline: time::Duration::from_secs(2), - } - } - - pub fn subscribe(&self) -> SubscribeHandle { - // Note: broadcast::Sender::new_receiver() will return a new receiver - // that _will not_ replay any messages in the channel, effectively - // starting from the latest message. - // - // Since we create a reader and a writer when calling reflect_shared() - // this should be fine. All subsequent clones should go through - // SubscribeHandle::clone() to get a receiver that replays all of the - // messages in the channel. - SubscribeHandle::new(self.writer.as_reader(), self.tx.new_receiver()) - } -} - -impl Stream for SharedReflect -where - K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, - St: Stream, Error>>, -{ - type Item = Result, Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut me = self.project(); - let next = me.stream.as_mut().poll_next(cx).map_ok(move |event| { - me.writer.apply_watcher_event(&event); - event - }); - let ev = match ready!(next) { - Some(Ok(event)) => event, - None => return Poll::Ready(None), - Some(Err(error)) => return Poll::Ready(Some(Err(error))), - }; - - let mut futures = Vec::new(); - match &ev { - Event::Applied(obj) | Event::Deleted(obj) => { - // No error handling for now - // Future resolves to a Result> if explicitly marked - // as non-blocking - futures.push(( - ObjectRef::from_obj(obj), - me.tx.broadcast(ObjectRef::from_obj(obj)), - )); - //let _ = ready!(me.tx.broadcast(ObjectRef::from_obj(obj)).as_mut().poll(cx)); - } - Event::Restarted(obj_list) => { - for obj in obj_list.iter().map(ObjectRef::from_obj) { - futures.push((obj.clone(), me.tx.broadcast(obj))); - - //let _ = ready!(me.tx.broadcast(obj).as_mut().poll(cx)); - } - } - } - - tracing::info!("WE POLLING AGAIN?"); - me.sleep.as_mut().reset(time::Instant::now() + *me.deadline); - for (i, f) in futures.into_iter().enumerate() { - let obj = f.0; - let mut fut = f.1; - let name = obj.name; - tracing::info!("PROCESSING {name}, iteration {i}"); - match fut.as_mut().poll(cx) { - Poll::Ready(_) => { - tracing::info!("READY"); - } - Poll::Pending => { - tracing::info!("GOT POLLED"); - ready!(me.sleep.as_mut().poll(cx)); - tracing::info!("BACKING OFF..."); - } - } - } - - tracing::info!("Returned"); - Poll::Ready(Some(Ok(ev))) - } -} - -#[pin_project] -pub struct SubscribeHandle -where - K: Resource + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - #[pin] - rx: Receiver>, - reader: Store, -} - -impl Clone for SubscribeHandle -where - K: Resource + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - fn clone(&self) -> Self { - SubscribeHandle::new(self.reader.clone(), self.rx.clone()) - } -} - -impl SubscribeHandle -where - K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - pub(super) fn new(reader: Store, rx: Receiver>) -> SubscribeHandle { - Self { reader, rx } - } - - pub fn reader(&self) -> Store { - self.reader.clone() - } -} - -impl Stream for SubscribeHandle -where - K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, -{ - type Item = Arc; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut me = self.project(); - // If we use try_recv() here we could return Poll::Ready(Error) and let - // the controller's trigger_backoff come into play (?) - match ready!(me.rx.as_mut().poll_next(cx)) { - Some(obj_ref) => Poll::Ready(me.reader.get(&obj_ref)), - None => Poll::Ready(None), - } - } -} - #[cfg(test)] pub(crate) mod test { diff --git a/kube-runtime/src/utils/reflect_subscribe.rs b/kube-runtime/src/utils/reflect_subscribe.rs index 7fdc84fa7..65327f293 100644 --- a/kube-runtime/src/utils/reflect_subscribe.rs +++ b/kube-runtime/src/utils/reflect_subscribe.rs @@ -7,6 +7,7 @@ use std::{collections::VecDeque, sync::Arc}; use futures::{ready, Future, Stream, TryStream}; use pin_project::pin_project; use tokio::time; +use tracing::info; use crate::{ reflector::{store::Writer, ObjectRef, Store}, @@ -18,7 +19,7 @@ use kube_client::Resource; /// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method #[pin_project] -pub struct SharedReflect<'a, St, K> +pub struct SharedReflect where K: Resource + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone, @@ -30,41 +31,32 @@ where rx: InactiveReceiver>, #[pin] - state: BroadcastState<'a, K>, + state: BroadcastState, deadline: time::Duration, } #[pin_project(project = BroadcastStateProj)] -enum BroadcastState<'a, K> +enum BroadcastState where K: Resource + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone, { Reading, - Writing { + BlockedOnWrite { #[pin] sleep: time::Sleep, - #[pin] - send_fut: async_broadcast::Send<'a, ObjectRef>, - event: Event, - }, - WritingBuffered { - #[pin] - sleep: time::Sleep, - #[pin] - send_fut: async_broadcast::Send<'a, ObjectRef>, - items: VecDeque>, + buffer: VecDeque>, event: Event, }, } -impl<'a, St, K> SharedReflect<'a, St, K> +impl SharedReflect where St: TryStream>, K: Resource + Clone, K::DynamicType: Eq + std::hash::Hash + Clone, { - pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> SharedReflect<'a, St, K> { + pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> SharedReflect { let (tx, rx) = async_broadcast::broadcast(buf_size); Self { stream, @@ -89,7 +81,7 @@ where } } -impl Stream for SharedReflect<'_, St, K> +impl Stream for SharedReflect where K: Resource + Clone, K::DynamicType: Eq + std::hash::Hash + Clone + Default, @@ -99,63 +91,50 @@ where fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); + info!("Polling"); loop { match this.state.as_mut().project() { // Continue reading BroadcastStateProj::Reading => {} - BroadcastStateProj::Writing { + BroadcastStateProj::BlockedOnWrite { mut sleep, - mut send_fut, + buffer, event, - } => match send_fut.as_mut().poll(cx) { - Poll::Ready(_) => { - tracing::trace!("Broadcast value"); - this.state.set(BroadcastState::Reading); - return Poll::Ready(Some(Ok(event.clone()))); - } - Poll::Pending => { - ready!(sleep.as_mut().poll(cx)); - tracing::debug!("Stream is stuck, clear your buffer"); - sleep.as_mut().reset(time::Instant::now() + *this.deadline); - } - }, - BroadcastStateProj::WritingBuffered { - mut sleep, - mut send_fut, - items, - event, - } => match send_fut.as_mut().poll(cx) { - Poll::Ready(_) => { - let next = items.pop_front().map(|obj| this.tx.broadcast_direct(obj)); - let left = items.len(); - tracing::trace!(items_left = %left, "Broadcast buffered value"); - match next { - Some(next) => this.state.set(BroadcastState::WritingBuffered { - sleep: time::sleep(*this.deadline), - send_fut: next, - items: *items, - event: *event, - }), - Some(next) if items.is_empty() => this.state.set(BroadcastState::Writing { - sleep: time::sleep(*this.deadline), - send_fut: next, - event: *event, - }), - - None => {} + } => { + loop { + let c = buffer.len(); + info!(count = %c, "Starting loop"); + if buffer.is_empty() { + let event = event.to_owned(); + info!("Switched to Reading"); + this.state.set(BroadcastState::Reading); + return Poll::Ready(Some(Ok(event))); + } + let next = buffer.pop_front().unwrap(); + match this.tx.try_broadcast(next) { + Ok(_) => { + let c = buffer.len(); + info!(count = %c, "Sent it"); + } + Err(async_broadcast::TrySendError::Full(msg)) => { + let c = buffer.len(); + info!(count = %c, "oh nooo"); + // Enqueue value back up + buffer.push_front(msg); + tracing::info!("Getting ready to be slept"); + ready!(sleep.as_mut().poll(cx)); + tracing::info!("Stream is stuck"); + // Reset timer and re-start loop. + sleep.as_mut().reset(time::Instant::now() + *this.deadline); + return Poll::Pending; + } + _ => {} } - return Poll::Pending; - } - Poll::Pending => { - ready!(sleep.as_mut().poll(cx)); - tracing::debug!("Stream is stuck, clear your buffer"); - sleep.as_mut().reset(time::Instant::now() + *this.deadline); } - }, + } } - - let next = this.stream.as_mut().poll_next(cx).map_ok(move |event| { + let next = this.stream.as_mut().poll_next(cx).map_ok(|event| { this.writer.apply_watcher_event(&event); event }); @@ -167,28 +146,56 @@ where }; - match &ev { - Event::Applied(obj) | Event::Deleted(obj) => this.state.set(BroadcastState::Writing { - sleep: time::sleep(*this.deadline), - send_fut: this.tx.broadcast_direct(ObjectRef::from_obj(obj)), - event: ev, - }), + let buf = match &ev { + Event::Applied(obj) | Event::Deleted(obj) => { + info!("Processing Applied | Deleted event"); + let obj_ref = ObjectRef::from_obj(obj); + match this.tx.try_broadcast(obj_ref) { + Ok(_) => { + info!("First try in single event"); + return Poll::Ready(Some(Ok(ev))); + } + Err(async_broadcast::TrySendError::Full(msg)) => { + info!("oh nooo, switch states"); + let mut buf = VecDeque::new(); + buf.push_back(msg); + buf + } + _ => return Poll::Pending, + } + } Event::Restarted(obj_list) => { + info!("Processing restarted event"); let mut obj_list = obj_list .iter() .map(ObjectRef::from_obj) .collect::>>(); - let next = obj_list.pop_front().map(|obj| this.tx.broadcast_direct(obj)); - if let Some(next) = next { - this.state.set(BroadcastState::WritingBuffered { - sleep: time::sleep(*this.deadline), - send_fut: next, - items: obj_list, - event: ev, - }) + + loop { + if obj_list.is_empty() { + info!("First try very nice"); + return Poll::Ready(Some(Ok(ev))); + } + + let obj_ref = obj_list.pop_front().unwrap(); + match this.tx.try_broadcast(obj_ref) { + Ok(_) => {} + Err(async_broadcast::TrySendError::Full(msg)) => { + obj_list.push_front(msg); + break obj_list; + } + _ => return Poll::Pending, + } } } - } + }; + + info!("Switched to BlockedOnWrite"); + this.state.set(BroadcastState::BlockedOnWrite { + sleep: tokio::time::sleep(*this.deadline), + buffer: buf, + event: ev, + }); } } } diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index b485d2092..cf7218439 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -14,7 +14,7 @@ use crate::watcher::DefaultBackoff; use backoff::backoff::Backoff; use futures::{Stream, TryStream}; -use super::reflect::{SharedReflect, SubscribeHandle}; +use super::{SharedReflect, SubscribeHandle}; /// Extension trait for streams returned by [`watcher`](watcher()) or [`reflector`](crate::reflector::reflector) From 49eaf12447791bb0abd0620b59a3b6aca4550679 Mon Sep 17 00:00:00 2001 From: Matei David Date: Fri, 8 Mar 2024 12:01:38 +0000 Subject: [PATCH 13/36] try_recv take 2 Signed-off-by: Matei David --- kube-runtime/src/utils/reflect_subscribe.rs | 201 +++++++++----------- 1 file changed, 89 insertions(+), 112 deletions(-) diff --git a/kube-runtime/src/utils/reflect_subscribe.rs b/kube-runtime/src/utils/reflect_subscribe.rs index 65327f293..90c6ff1eb 100644 --- a/kube-runtime/src/utils/reflect_subscribe.rs +++ b/kube-runtime/src/utils/reflect_subscribe.rs @@ -7,7 +7,7 @@ use std::{collections::VecDeque, sync::Arc}; use futures::{ready, Future, Stream, TryStream}; use pin_project::pin_project; use tokio::time; -use tracing::info; +use tracing::{debug, error, instrument, trace}; use crate::{ reflector::{store::Writer, ObjectRef, Store}, @@ -31,25 +31,11 @@ where rx: InactiveReceiver>, #[pin] - state: BroadcastState, + sleep: time::Sleep, + buffer: VecDeque>, deadline: time::Duration, } -#[pin_project(project = BroadcastStateProj)] -enum BroadcastState -where - K: Resource + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - Reading, - BlockedOnWrite { - #[pin] - sleep: time::Sleep, - buffer: VecDeque>, - event: Event, - }, -} - impl SharedReflect where St: TryStream>, @@ -63,8 +49,9 @@ where writer, tx, rx: rx.deactivate(), - state: BroadcastState::Reading, - deadline: time::Duration::from_secs(2), + deadline: time::Duration::from_secs(10), + sleep: time::sleep(time::Duration::ZERO), + buffer: VecDeque::new(), } } @@ -89,114 +76,104 @@ where { type Item = Result, Error>; + #[instrument( + name = "shared_stream", + skip_all, + fields(active_readers = %self.tx.receiver_count(), + inner_queue_depth = %self.buffer.len()) + )] fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); - info!("Polling"); loop { - match this.state.as_mut().project() { - // Continue reading - BroadcastStateProj::Reading => {} - BroadcastStateProj::BlockedOnWrite { - mut sleep, - buffer, - event, - } => { - loop { - let c = buffer.len(); - info!(count = %c, "Starting loop"); - if buffer.is_empty() { - let event = event.to_owned(); - info!("Switched to Reading"); - this.state.set(BroadcastState::Reading); - return Poll::Ready(Some(Ok(event))); - } - let next = buffer.pop_front().unwrap(); - match this.tx.try_broadcast(next) { - Ok(_) => { - let c = buffer.len(); - info!(count = %c, "Sent it"); - } - Err(async_broadcast::TrySendError::Full(msg)) => { - let c = buffer.len(); - info!(count = %c, "oh nooo"); - // Enqueue value back up - buffer.push_front(msg); - tracing::info!("Getting ready to be slept"); - ready!(sleep.as_mut().poll(cx)); - tracing::info!("Stream is stuck"); - // Reset timer and re-start loop. - sleep.as_mut().reset(time::Instant::now() + *this.deadline); - return Poll::Pending; - } - _ => {} - } + if let Some(msg) = this.buffer.pop_front() { + match this.tx.try_broadcast(msg) { + Ok(_) => { + trace!("Broadcast value"); + } + Err(async_broadcast::TrySendError::Full(msg)) => { + // When the broadcast buffer is full, retry with a + // deadline. + // + // First, push the msg back to the front of the buffer + // so ordering is preserved. + this.buffer.push_front(msg); + trace!(deadline_ms = %this.deadline.as_millis(), "Root stream's buffer is full, retrying with a deadline"); + ready!(this.sleep.as_mut().poll(cx)); + error!("Shared stream cannot make progress; ensure subscribers are being driven"); + // Reset timer + this.sleep.as_mut().reset(time::Instant::now() + *this.deadline); + } + Err(error) if error.is_disconnected() => { + // When the broadcast channel is disconnected, we have + // no active receivers. We should clear the buffer and + // avoid writing to the channel. + this.buffer.clear(); + debug!("No active readers subscribed to shared stream"); + } + _ => { + // Other possible error is a closed channel. + // We should never hit this since we are holding a + // writer and an inactive reader. } } + } else { + break; } + } - let next = this.stream.as_mut().poll_next(cx).map_ok(|event| { - this.writer.apply_watcher_event(&event); - event - }); - - let ev = match ready!(next) { - Some(Ok(event)) => event, - None => return Poll::Ready(None), - Some(Err(error)) => return Poll::Ready(Some(Err(error))), - }; - - - let buf = match &ev { - Event::Applied(obj) | Event::Deleted(obj) => { - info!("Processing Applied | Deleted event"); - let obj_ref = ObjectRef::from_obj(obj); - match this.tx.try_broadcast(obj_ref) { - Ok(_) => { - info!("First try in single event"); - return Poll::Ready(Some(Ok(ev))); - } - Err(async_broadcast::TrySendError::Full(msg)) => { - info!("oh nooo, switch states"); - let mut buf = VecDeque::new(); - buf.push_back(msg); - buf - } - _ => return Poll::Pending, + let next = this.stream.as_mut().poll_next(cx).map_ok(|event| { + this.writer.apply_watcher_event(&event); + event + }); + + let event = match ready!(next) { + Some(Ok(event)) => event, + None => return Poll::Ready(None), + Some(Err(error)) => return Poll::Ready(Some(Err(error))), + }; + + + match &event { + Event::Applied(obj) | Event::Deleted(obj) => { + let obj_ref = ObjectRef::from_obj(obj); + match this.tx.try_broadcast(obj_ref) { + Ok(_) => {} + Err(async_broadcast::TrySendError::Full(msg)) => { + debug!( + "Attempted to write to subscribers with no buffer space; applying backpressure" + ); + this.buffer.push_back(msg); } + // Channel is closed or we have no active readers. + // In both cases there's not much we can do, so drive the + // watch strem. + _ => {} } - Event::Restarted(obj_list) => { - info!("Processing restarted event"); - let mut obj_list = obj_list - .iter() - .map(ObjectRef::from_obj) - .collect::>>(); - - loop { - if obj_list.is_empty() { - info!("First try very nice"); - return Poll::Ready(Some(Ok(ev))); - } - - let obj_ref = obj_list.pop_front().unwrap(); - match this.tx.try_broadcast(obj_ref) { + } + Event::Restarted(obj_list) => { + let obj_list = obj_list.iter().map(ObjectRef::from_obj); + this.buffer.extend(obj_list); + loop { + if let Some(msg) = this.buffer.pop_front() { + match this.tx.try_broadcast(msg) { Ok(_) => {} Err(async_broadcast::TrySendError::Full(msg)) => { - obj_list.push_front(msg); - break obj_list; + debug!( + "Attempted to write to subscribers with no buffer space; applying backpressure" + ); + this.buffer.push_front(msg); + break; } - _ => return Poll::Pending, + _ => {} } + } else { + break; } } - }; - - info!("Switched to BlockedOnWrite"); - this.state.set(BroadcastState::BlockedOnWrite { - sleep: tokio::time::sleep(*this.deadline), - buffer: buf, - event: ev, - }); - } + } + }; + + Poll::Ready(Some(Ok(event))) } } From e7aad7651998d610f467c6963c42f2e14cf27faf Mon Sep 17 00:00:00 2001 From: Matei David Date: Mon, 11 Mar 2024 19:03:34 +0000 Subject: [PATCH 14/36] Working on names next Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 99 +++++++++---------- kube-runtime/src/utils/reflect_subscribe.rs | 100 ++++++++++++++++++-- 2 files changed, 142 insertions(+), 57 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 0537708a5..c24169814 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -8,7 +8,6 @@ use kube::{ runtime::{controller::Action, reflector::store::Writer, watcher, Config, Controller, WatchStreamExt}, Api, Client, ResourceExt, }; -use tokio::signal::unix::{signal, SignalKind}; use tracing::{info, info_span, warn, Instrument}; use thiserror::Error; @@ -18,6 +17,8 @@ pub mod condition { pub static STATUS_TRUE: &str = "True"; } +const SUBSCRIBE_BUFFER_SIZE: usize = 256; + #[derive(Clone)] struct Data { client: Client, @@ -39,43 +40,45 @@ async fn main() -> anyhow::Result<()> { // - create a handle that can be cloned to get more readers // - pass through events from root stream through a reflector // - // Note: if we wanted to, we could apply a backoff _before_ we spill into the reflector - let (subscriber, reflector) = watcher(pods.clone(), Default::default()).reflect_shared(writer, 1); - - // (3): schedule the root stream with the runtime - // - apply a backoff to the root stream - // - poll it to handle errors - // scheduling with the runtime ensures the stream will be polled continously and allow - // readers to make progress. - tokio::spawn( - async move { - // Pin on the heap so we don't overflow our stack - // Put a backoff on it. - // - Depending on how we want to handle backpressure, the backoff could help to relax - // the flow of data - // i.e. the root stream has a buffer that objects get put into. When an object is in the - // buffer, it is cloned and sent to all readers. Once all readers have acked their copy, - // the item is removed from the buffer. - // - // A backoff here could ensure that when the buffer is full, we backpressure in the root - // stream by not consuming watcher output. We give clients enough time to make progress and - // ensure the next time the root stream is polled it can make progress by pushing into the - // buffer. - let mut reflector = reflector.default_backoff().boxed(); - tracing::info!("Polling root"); - while let Some(next) = reflector.next().await { - match next { - Err(error) => tracing::error!(%error, "Received error from main watcher stream"), - _ => {} - } + // Before splitting, we apply a backoff. This is completely optional, but it + // allows us to ensure the APIServer won't be overwhelmed when we retry + // watches on errors. + let (subscriber, reflector) = watcher(pods.clone(), Default::default()) + .default_backoff() + .reflect_shared(writer, SUBSCRIBE_BUFFER_SIZE); + + // (3): schedule the root (i.e. shared) stream with the runtime. + // + // The runtime (tokio) will drive this task to readiness; the stream is + // polled continously and allows all downstream readers (i.e. subscribers) + // to make progress. + tokio::spawn(async move { + // Pin on the heap so we don't overflow our stack + let mut reflector = reflector.boxed(); + while let Some(next) = reflector.next().await { + // We are not interested in the returned events here, only in + // handling errors. + match next { + Err(error) => tracing::error!(%error, "Received error from main watcher stream"), + _ => {} } } - .instrument(info_span!("root_stream")), - ); + }); - // Create metadata controller to edit annotations + // (4): create a reader. We create a metadata controller that will mirror a + // pod's labels as annotations. + // + // To create a controller that operates on a shared stream, we need two + // handles: + // - A handle to the store. + // - A handle to a shared stream. + // + // The handle to the shared stream will be used to receive shared objects as + // they are applied by the reflector. let reader = subscriber.reader(); + // Store readers can be created on-demand by calling `reader()` on a shared + // stream handle. Stream handles are cheap to clone. let metadata_controller = Controller::for_shared_stream(subscriber.clone(), reader) .with_config(config.clone()) .shutdown_on_signal() @@ -91,9 +94,13 @@ async fn main() -> anyhow::Result<()> { } }) .instrument(info_span!("metadata_controller")); - tokio::spawn(metadata_controller); - // Create status controller + // (5): Create status controller. Our status controller write a condition + // whenever a pod has undocumented container ports (i.e. containers with no + // exposed ports). + // + // This is the last controller we will create, so we can just move the + // handle inside the controller. let reader = subscriber.reader(); let status_controller = Controller::for_shared_stream(subscriber, reader) .with_config(config) @@ -110,24 +117,20 @@ async fn main() -> anyhow::Result<()> { } }) .instrument(info_span!("status_controller")); - tokio::spawn(status_controller); - // Handle shutdown - // - // In a more nicely put together example we'd want to actually drain everything - // instead of having controllers manage signals on their own - // - // The lack of a drain abstraction atm made me skip it but when the example is ready we should - // consider handling shutdowns well to help users out - let mut interrupt = signal(SignalKind::interrupt())?; - let mut terminate = signal(SignalKind::terminate())?; + let mut terminate = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())?; + // (6): Last step, drive controllers to readiness. Controllers are futures + // and need to be driven to make progress. A controller that's not driven + // and operates on a subscribed stream will eventually block the shared stream. tokio::select! { - _ = interrupt.recv() => { - info!("Received SIGINT; terminating..."); + _ = metadata_controller => { + }, + + _ = status_controller => { }, _ = terminate.recv() => { - info!("Received SIGTERM; terminating..."); + info!("Received term signal; shutting down...") } } diff --git a/kube-runtime/src/utils/reflect_subscribe.rs b/kube-runtime/src/utils/reflect_subscribe.rs index 90c6ff1eb..619724f7a 100644 --- a/kube-runtime/src/utils/reflect_subscribe.rs +++ b/kube-runtime/src/utils/reflect_subscribe.rs @@ -16,7 +16,6 @@ use crate::{ use async_broadcast::{InactiveReceiver, Receiver, Sender}; use kube_client::Resource; - /// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method #[pin_project] pub struct SharedReflect @@ -128,13 +127,17 @@ where let event = match ready!(next) { Some(Ok(event)) => event, - None => return Poll::Ready(None), + None => { + tracing::info!("Stream terminated"); + return Poll::Ready(None); + }, Some(Err(error)) => return Poll::Ready(Some(Err(error))), }; match &event { - Event::Applied(obj) | Event::Deleted(obj) => { + // Only deal with Deleted events + Event::Applied(obj) => { let obj_ref = ObjectRef::from_obj(obj); match this.tx.try_broadcast(obj_ref) { Ok(_) => {} @@ -171,6 +174,10 @@ where } } } + // Delete events should refresh the store. There is no need to propagate + // them to subscribers since we have already updated the store by this + // point. + _ => {} }; Poll::Ready(Some(Ok(event))) @@ -220,12 +227,87 @@ where type Item = Arc; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut me = self.project(); - // If we use try_recv() here we could return Poll::Ready(Error) and let - // the controller's trigger_backoff come into play (?) - match ready!(me.rx.as_mut().poll_next(cx)) { - Some(obj_ref) => Poll::Ready(me.reader.get(&obj_ref)), - None => Poll::Ready(None), + let mut this = self.project(); + match ready!(this.rx.as_mut().poll_next(cx)) { + Some(obj_ref) => this.reader + .get(&obj_ref) + .map(|obj| Poll::Ready(Some(obj))) + .unwrap_or(Poll::Pending), + None => Poll::Ready(None) } } } + + +#[cfg(test)] +pub(crate) mod test { + use std::{task::Poll, vec}; + + use super::{Error, Event}; + use crate::{reflector, utils::SharedReflect}; + use futures::{pin_mut, poll, stream, StreamExt}; + use k8s_openapi::api::core::v1::Pod; + + const TEST_BUFFER_SIZE: usize = 10; + + fn testpod(name: &str) -> Pod { + let mut pod = Pod::default(); + pod.metadata.name = Some(name.to_string()); + pod + } + + /* + * A list of tests: + * Happy Path: + * - events are passed through (including errors); + * - And on None it all works well + * - objects are shared through N subscribers; + * - objects are shared through N subscribers but deletes don't do anything; + * - when main stream shuts down readers can still receive + * Pathological cases + * - events are passed through on overflow and readers recover after delay; + * ( any chance to see how many times `sleep` has been called?) + * - when main stream shuts down readers can still receive after + * backpressure is applied (?) + * + * Integration tests: + * - Three controllers. Can we get an integration test set-up with owned streams? */ + + #[tokio::test] + async fn shared_reflect_passes_events_through() { + + } + async fn reflect_passes_events_through() { + let foo = testpod("foo"); + let bar = testpod("bar"); + let st = stream::iter([ + Ok(Event::Applied(foo.clone())), + Err(Error::TooManyObjects), + Ok(Event::Restarted(vec![foo, bar])), + ]); + let (reader, writer) = reflector::store(); + + let reflect = SharedReflect::new(st, writer, TEST_BUFFER_SIZE); + pin_mut!(reflect); + assert_eq!(reader.len(), 0); + + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Applied(_)))) + )); + assert_eq!(reader.len(), 1); + + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Err(Error::TooManyObjects))) + )); + assert_eq!(reader.len(), 1); + + let restarted = poll!(reflect.next()); + assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::Restarted(_)))))); + assert_eq!(reader.len(), 2); + + assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); + assert_eq!(reader.len(), 2); + } +} From b6ff97f7a8aac19536a7f68f064c0a6c08645e00 Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 13 Mar 2024 20:17:13 +0000 Subject: [PATCH 15/36] Ok surprising this worked Signed-off-by: Matei David --- kube-runtime/src/utils/mod.rs | 4 +- kube-runtime/src/utils/reflect_dispatch.rs | 0 kube-runtime/src/utils/reflect_subscribe.rs | 313 -------------------- kube-runtime/src/utils/watch_ext.rs | 13 +- 4 files changed, 9 insertions(+), 321 deletions(-) create mode 100644 kube-runtime/src/utils/reflect_dispatch.rs delete mode 100644 kube-runtime/src/utils/reflect_subscribe.rs diff --git a/kube-runtime/src/utils/mod.rs b/kube-runtime/src/utils/mod.rs index 4fdc3cec4..0fb7dd409 100644 --- a/kube-runtime/src/utils/mod.rs +++ b/kube-runtime/src/utils/mod.rs @@ -6,7 +6,7 @@ mod event_flatten; mod event_modify; #[cfg(feature = "unstable-runtime-predicates")] mod predicate; mod reflect; -mod reflect_subscribe; +mod reflect_dispatch; mod stream_backoff; #[cfg(feature = "unstable-runtime-subscribe")] pub mod stream_subscribe; mod watch_ext; @@ -17,7 +17,7 @@ pub use event_modify::EventModify; #[cfg(feature = "unstable-runtime-predicates")] pub use predicate::{predicates, Predicate, PredicateFilter}; pub use reflect::Reflect; -pub use reflect_subscribe::{SharedReflect, SubscribeHandle}; +pub use reflect_dispatch::{ReflectDispatcher, ReflectHandle}; pub use stream_backoff::StreamBackoff; #[cfg(feature = "unstable-runtime-subscribe")] pub use stream_subscribe::StreamSubscribe; diff --git a/kube-runtime/src/utils/reflect_dispatch.rs b/kube-runtime/src/utils/reflect_dispatch.rs new file mode 100644 index 000000000..e69de29bb diff --git a/kube-runtime/src/utils/reflect_subscribe.rs b/kube-runtime/src/utils/reflect_subscribe.rs deleted file mode 100644 index 619724f7a..000000000 --- a/kube-runtime/src/utils/reflect_subscribe.rs +++ /dev/null @@ -1,313 +0,0 @@ -use core::{ - pin::Pin, - task::{Context, Poll}, -}; -use std::{collections::VecDeque, sync::Arc}; - -use futures::{ready, Future, Stream, TryStream}; -use pin_project::pin_project; -use tokio::time; -use tracing::{debug, error, instrument, trace}; - -use crate::{ - reflector::{store::Writer, ObjectRef, Store}, - watcher::{Error, Event}, -}; -use async_broadcast::{InactiveReceiver, Receiver, Sender}; -use kube_client::Resource; - -/// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method -#[pin_project] -pub struct SharedReflect -where - K: Resource + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - #[pin] - stream: St, - writer: Writer, - tx: Sender>, - rx: InactiveReceiver>, - - #[pin] - sleep: time::Sleep, - buffer: VecDeque>, - deadline: time::Duration, -} - -impl SharedReflect -where - St: TryStream>, - K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> SharedReflect { - let (tx, rx) = async_broadcast::broadcast(buf_size); - Self { - stream, - writer, - tx, - rx: rx.deactivate(), - deadline: time::Duration::from_secs(10), - sleep: time::sleep(time::Duration::ZERO), - buffer: VecDeque::new(), - } - } - - pub fn subscribe(&self) -> SubscribeHandle { - // Note: broadcast::Sender::new_receiver() will return a new receiver - // that _will not_ replay any messages in the channel, effectively - // starting from the latest message. - // - // Since we create a reader and a writer when calling reflect_shared() - // this should be fine. All subsequent clones should go through - // SubscribeHandle::clone() to get a receiver that replays all of the - // messages in the channel. - SubscribeHandle::new(self.writer.as_reader(), self.tx.new_receiver()) - } -} - -impl Stream for SharedReflect -where - K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, - St: Stream, Error>>, -{ - type Item = Result, Error>; - - #[instrument( - name = "shared_stream", - skip_all, - fields(active_readers = %self.tx.receiver_count(), - inner_queue_depth = %self.buffer.len()) - )] - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - loop { - if let Some(msg) = this.buffer.pop_front() { - match this.tx.try_broadcast(msg) { - Ok(_) => { - trace!("Broadcast value"); - } - Err(async_broadcast::TrySendError::Full(msg)) => { - // When the broadcast buffer is full, retry with a - // deadline. - // - // First, push the msg back to the front of the buffer - // so ordering is preserved. - this.buffer.push_front(msg); - trace!(deadline_ms = %this.deadline.as_millis(), "Root stream's buffer is full, retrying with a deadline"); - ready!(this.sleep.as_mut().poll(cx)); - error!("Shared stream cannot make progress; ensure subscribers are being driven"); - // Reset timer - this.sleep.as_mut().reset(time::Instant::now() + *this.deadline); - } - Err(error) if error.is_disconnected() => { - // When the broadcast channel is disconnected, we have - // no active receivers. We should clear the buffer and - // avoid writing to the channel. - this.buffer.clear(); - debug!("No active readers subscribed to shared stream"); - } - _ => { - // Other possible error is a closed channel. - // We should never hit this since we are holding a - // writer and an inactive reader. - } - } - } else { - break; - } - } - - let next = this.stream.as_mut().poll_next(cx).map_ok(|event| { - this.writer.apply_watcher_event(&event); - event - }); - - let event = match ready!(next) { - Some(Ok(event)) => event, - None => { - tracing::info!("Stream terminated"); - return Poll::Ready(None); - }, - Some(Err(error)) => return Poll::Ready(Some(Err(error))), - }; - - - match &event { - // Only deal with Deleted events - Event::Applied(obj) => { - let obj_ref = ObjectRef::from_obj(obj); - match this.tx.try_broadcast(obj_ref) { - Ok(_) => {} - Err(async_broadcast::TrySendError::Full(msg)) => { - debug!( - "Attempted to write to subscribers with no buffer space; applying backpressure" - ); - this.buffer.push_back(msg); - } - // Channel is closed or we have no active readers. - // In both cases there's not much we can do, so drive the - // watch strem. - _ => {} - } - } - Event::Restarted(obj_list) => { - let obj_list = obj_list.iter().map(ObjectRef::from_obj); - this.buffer.extend(obj_list); - loop { - if let Some(msg) = this.buffer.pop_front() { - match this.tx.try_broadcast(msg) { - Ok(_) => {} - Err(async_broadcast::TrySendError::Full(msg)) => { - debug!( - "Attempted to write to subscribers with no buffer space; applying backpressure" - ); - this.buffer.push_front(msg); - break; - } - _ => {} - } - } else { - break; - } - } - } - // Delete events should refresh the store. There is no need to propagate - // them to subscribers since we have already updated the store by this - // point. - _ => {} - }; - - Poll::Ready(Some(Ok(event))) - } -} - -#[pin_project] -pub struct SubscribeHandle -where - K: Resource + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - #[pin] - rx: Receiver>, - reader: Store, -} - -impl Clone for SubscribeHandle -where - K: Resource + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - fn clone(&self) -> Self { - SubscribeHandle::new(self.reader.clone(), self.rx.clone()) - } -} - -impl SubscribeHandle -where - K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - pub(super) fn new(reader: Store, rx: Receiver>) -> SubscribeHandle { - Self { reader, rx } - } - - pub fn reader(&self) -> Store { - self.reader.clone() - } -} - -impl Stream for SubscribeHandle -where - K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, -{ - type Item = Arc; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - match ready!(this.rx.as_mut().poll_next(cx)) { - Some(obj_ref) => this.reader - .get(&obj_ref) - .map(|obj| Poll::Ready(Some(obj))) - .unwrap_or(Poll::Pending), - None => Poll::Ready(None) - } - } -} - - -#[cfg(test)] -pub(crate) mod test { - use std::{task::Poll, vec}; - - use super::{Error, Event}; - use crate::{reflector, utils::SharedReflect}; - use futures::{pin_mut, poll, stream, StreamExt}; - use k8s_openapi::api::core::v1::Pod; - - const TEST_BUFFER_SIZE: usize = 10; - - fn testpod(name: &str) -> Pod { - let mut pod = Pod::default(); - pod.metadata.name = Some(name.to_string()); - pod - } - - /* - * A list of tests: - * Happy Path: - * - events are passed through (including errors); - * - And on None it all works well - * - objects are shared through N subscribers; - * - objects are shared through N subscribers but deletes don't do anything; - * - when main stream shuts down readers can still receive - * Pathological cases - * - events are passed through on overflow and readers recover after delay; - * ( any chance to see how many times `sleep` has been called?) - * - when main stream shuts down readers can still receive after - * backpressure is applied (?) - * - * Integration tests: - * - Three controllers. Can we get an integration test set-up with owned streams? */ - - #[tokio::test] - async fn shared_reflect_passes_events_through() { - - } - async fn reflect_passes_events_through() { - let foo = testpod("foo"); - let bar = testpod("bar"); - let st = stream::iter([ - Ok(Event::Applied(foo.clone())), - Err(Error::TooManyObjects), - Ok(Event::Restarted(vec![foo, bar])), - ]); - let (reader, writer) = reflector::store(); - - let reflect = SharedReflect::new(st, writer, TEST_BUFFER_SIZE); - pin_mut!(reflect); - assert_eq!(reader.len(), 0); - - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Ok(Event::Applied(_)))) - )); - assert_eq!(reader.len(), 1); - - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Err(Error::TooManyObjects))) - )); - assert_eq!(reader.len(), 1); - - let restarted = poll!(reflect.next()); - assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::Restarted(_)))))); - assert_eq!(reader.len(), 2); - - assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); - assert_eq!(reader.len(), 2); - } -} diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index cf7218439..a86620864 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -8,14 +8,15 @@ use crate::{ }; use kube_client::Resource; -use crate::{reflector::store::Writer, utils::Reflect}; +use crate::{ + reflector::store::Writer, + utils::{Reflect, ReflectDispatcher, ReflectHandle}, +}; use crate::watcher::DefaultBackoff; use backoff::backoff::Backoff; use futures::{Stream, TryStream}; -use super::{SharedReflect, SubscribeHandle}; - /// Extension trait for streams returned by [`watcher`](watcher()) or [`reflector`](crate::reflector::reflector) pub trait WatchStreamExt: Stream { @@ -252,17 +253,17 @@ pub trait WatchStreamExt: Stream { Reflect::new(self, writer) } - fn reflect_shared( + fn reflect_dispatch( self, writer: Writer, buf_size: usize, - ) -> (SubscribeHandle, SharedReflect) + ) -> (ReflectHandle, ReflectDispatcher) where Self: Stream>> + Sized, K: Resource + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone, { - let reflect = SharedReflect::new(self, writer, buf_size); + let reflect = ReflectDispatcher::new(self, writer, buf_size); (reflect.subscribe(), reflect) } } From 7a570fd8b881ef448c92361605abb55df6100885 Mon Sep 17 00:00:00 2001 From: Matei David Date: Mon, 25 Mar 2024 19:17:03 +0000 Subject: [PATCH 16/36] Write tests and rename file to reflect dispatch Signed-off-by: Matei David --- kube-runtime/src/utils/mod.rs | 6 +- kube-runtime/src/utils/reflect_dispatch.rs | 449 +++++++++++++++++++++ 2 files changed, 453 insertions(+), 2 deletions(-) diff --git a/kube-runtime/src/utils/mod.rs b/kube-runtime/src/utils/mod.rs index 0fb7dd409..a35068ce4 100644 --- a/kube-runtime/src/utils/mod.rs +++ b/kube-runtime/src/utils/mod.rs @@ -4,11 +4,13 @@ mod backoff_reset_timer; pub(crate) mod delayed_init; mod event_flatten; mod event_modify; -#[cfg(feature = "unstable-runtime-predicates")] mod predicate; +#[cfg(feature = "unstable-runtime-predicates")] +mod predicate; mod reflect; mod reflect_dispatch; mod stream_backoff; -#[cfg(feature = "unstable-runtime-subscribe")] pub mod stream_subscribe; +#[cfg(feature = "unstable-runtime-subscribe")] +pub mod stream_subscribe; mod watch_ext; pub use backoff_reset_timer::ResetTimerBackoff; diff --git a/kube-runtime/src/utils/reflect_dispatch.rs b/kube-runtime/src/utils/reflect_dispatch.rs index e69de29bb..1bc991dd0 100644 --- a/kube-runtime/src/utils/reflect_dispatch.rs +++ b/kube-runtime/src/utils/reflect_dispatch.rs @@ -0,0 +1,449 @@ +use core::{ + pin::Pin, + task::{Context, Poll}, +}; +use std::{collections::VecDeque, sync::Arc}; + +use futures::{ready, Future, Stream, TryStream}; +use pin_project::pin_project; +use tokio::time; +use tracing::{debug, error, instrument, trace}; + +use crate::{ + reflector::{store::Writer, ObjectRef, Store}, + watcher::{Error, Event}, +}; +use async_broadcast::{InactiveReceiver, Receiver, Sender}; +use kube_client::Resource; + +/// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method +#[pin_project] +pub struct ReflectDispatcher +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + #[pin] + stream: St, + writer: Writer, + tx: Sender>, + rx: InactiveReceiver>, + + #[pin] + sleep: time::Sleep, + buffer: VecDeque>, + deadline: time::Duration, +} + +impl ReflectDispatcher +where + St: TryStream>, + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> ReflectDispatcher { + let (tx, rx) = async_broadcast::broadcast(buf_size); + Self { + stream, + writer, + tx, + rx: rx.deactivate(), + deadline: time::Duration::from_secs(10), + sleep: time::sleep(time::Duration::ZERO), + buffer: VecDeque::new(), + } + } + + pub fn subscribe(&self) -> ReflectHandle { + // Note: broadcast::Sender::new_receiver() will return a new receiver + // that _will not_ replay any messages in the channel, effectively + // starting from the latest message. + // + // Since we create a reader and a writer when calling reflect_shared() + // this should be fine. All subsequent clones should go through + // ReflectHandle::clone() to get a receiver that replays all of the + // messages in the channel. + ReflectHandle::new(self.writer.as_reader(), self.tx.new_receiver()) + } +} + +impl Stream for ReflectDispatcher +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, + St: Stream, Error>>, +{ + type Item = Result, Error>; + + #[instrument( + name = "shared_stream", + skip_all, + fields(active_readers = %self.tx.receiver_count(), + inner_queue_depth = %self.buffer.len()) + )] + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + loop { + if let Some(msg) = this.buffer.pop_front() { + match this.tx.try_broadcast(msg) { + Ok(_) => { + trace!("Broadcast value"); + } + Err(async_broadcast::TrySendError::Full(msg)) => { + // When the broadcast buffer is full, retry with a + // deadline. + // + // First, push the msg back to the front of the buffer + // so ordering is preserved. + this.buffer.push_front(msg); + trace!( + deadline_ms = %this.deadline.as_millis(), + queue_depth = %this.buffer.len(), + active_readers = %this.tx.receiver_count(), + "Root stream's buffer is full, retrying with a deadline" + ); + ready!(this.sleep.as_mut().poll(cx)); + error!("Shared stream cannot make progress; ensure subscribers are being driven"); + // Reset timer + this.sleep.as_mut().reset(time::Instant::now() + *this.deadline); + } + Err(error) if error.is_disconnected() => { + // When the broadcast channel is disconnected, we have + // no active receivers. We should clear the buffer and + // avoid writing to the channel. + this.buffer.clear(); + debug!("No active readers subscribed to shared stream"); + } + _ => { + // Other possible error is a closed channel. + // We should never hit this since we are holding a + // writer and an inactive reader. + } + } + } else { + break; + } + } + + let next = this.stream.as_mut().poll_next(cx).map_ok(|event| { + this.writer.apply_watcher_event(&event); + event + }); + + let event = match ready!(next) { + Some(Ok(event)) => event, + None => { + tracing::info!("Stream terminated"); + this.tx.close(); + return Poll::Ready(None); + }, + Some(Err(error)) => return Poll::Ready(Some(Err(error))), + }; + + + match &event { + // Only deal with Deleted events + Event::Applied(obj) => { + let obj_ref = ObjectRef::from_obj(obj); + match this.tx.try_broadcast(obj_ref) { + Ok(_) => {} + Err(async_broadcast::TrySendError::Full(msg)) => { + debug!( + "Attempted to write to subscribers with no buffer space; applying backpressure" + ); + this.buffer.push_back(msg); + } + // Channel is closed or we have no active readers. + // In both cases there's not much we can do, so drive the + // watch strem. + _ => {} + } + } + Event::Restarted(obj_list) => { + let obj_list = obj_list.iter().map(ObjectRef::from_obj); + this.buffer.extend(obj_list); + loop { + if let Some(msg) = this.buffer.pop_front() { + match this.tx.try_broadcast(msg) { + Ok(_) => {} + Err(async_broadcast::TrySendError::Full(msg)) => { + debug!( + "Attempted to write to subscribers with no buffer space; applying backpressure" + ); + this.buffer.push_front(msg); + break; + } + _ => {} + } + } else { + break; + } + } + } + // Delete events should refresh the store. There is no need to propagate + // them to subscribers since we have already updated the store by this + // point. + _ => {} + }; + + Poll::Ready(Some(Ok(event))) + } +} + +#[pin_project] +pub struct ReflectHandle +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + #[pin] + rx: Receiver>, + reader: Store, +} + +impl Clone for ReflectHandle +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + fn clone(&self) -> Self { + ReflectHandle::new(self.reader.clone(), self.rx.clone()) + } +} + +impl ReflectHandle +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + pub(super) fn new(reader: Store, rx: Receiver>) -> ReflectHandle { + Self { reader, rx } + } + + pub fn reader(&self) -> Store { + self.reader.clone() + } +} + +impl Stream for ReflectHandle +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, +{ + type Item = Arc; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + match ready!(this.rx.as_mut().poll_next(cx)) { + Some(obj_ref) => this.reader + .get(&obj_ref) + .map(|obj| Poll::Ready(Some(obj))) + .unwrap_or(Poll::Pending), + None => Poll::Ready(None) + } + } +} + + +#[cfg(test)] +pub(crate) mod test { + use std::{sync::Arc, task::Poll, vec}; + + use super::{Error, Event}; + use crate::{reflector, utils::ReflectDispatcher}; + use futures::{pin_mut, poll, stream, StreamExt}; + use k8s_openapi::api::core::v1::Pod; + + fn testpod(name: &str) -> Pod { + let mut pod = Pod::default(); + pod.metadata.name = Some(name.to_string()); + pod + } + + #[tokio::test] + async fn events_are_passed_through() { + let foo = testpod("foo"); + let bar = testpod("bar"); + let st = stream::iter([ + Ok(Event::Applied(foo.clone())), + Err(Error::TooManyObjects), + Ok(Event::Restarted(vec![foo, bar])), + ]); + + let (reader, writer) = reflector::store(); + let reflect = ReflectDispatcher::new(st, writer, 10); + pin_mut!(reflect); + + // Prior to any polls, we should have an empty store. + assert_eq!(reader.len(), 0); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Applied(_)))) + )); + + // Make progress and assert all events are seen + assert_eq!(reader.len(), 1); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Err(Error::TooManyObjects))) + )); + assert_eq!(reader.len(), 1); + + let restarted = poll!(reflect.next()); + assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::Restarted(_)))))); + assert_eq!(reader.len(), 2); + + assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); + assert_eq!(reader.len(), 2); + } + + #[tokio::test] + async fn readers_yield_touched_objects() { + // Readers should yield touched objects they receive from Stream events. + // + // NOTE: a Delete(_) event will be ignored if the item does not exist in + // the cache. Same with a Restarted(vec![delete_item]) + let foo = testpod("foo"); + let bar = testpod("bar"); + let st = stream::iter([ + Ok(Event::Deleted(foo.clone())), + Ok(Event::Applied(foo.clone())), + Err(Error::TooManyObjects), + Ok(Event::Restarted(vec![foo.clone(), bar.clone()])), + ]); + + let foo = Arc::new(foo); + let bar = Arc::new(bar); + + let (_, writer) = reflector::store(); + let reflect = ReflectDispatcher::new(st, writer, 10); + pin_mut!(reflect); + let subscriber = reflect.subscribe(); + pin_mut!(subscriber); + + // Deleted events should be skipped by subscriber. + assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Deleted(_)))))); + assert!(matches!(poll!(subscriber.next()), Poll::Pending)); + + assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Applied(_)))))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + + // Errors are not propagated to subscribers. + assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Err(Error::TooManyObjects))))); + assert!(matches!(poll!(subscriber.next()), Poll::Pending)); + + // Restart event will yield all objects in the list + assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Restarted(_)))))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); + + // When main channel is closed, it is propagated to subscribers + assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(None)); + } + + #[tokio::test] + async fn readers_yield_when_tx_drops() { + // Once the main stream is dropped, readers should continue to make + // progress and read values that have been sent on the channel. + let foo = testpod("foo"); + let bar = testpod("bar"); + let st = stream::iter([ + Ok(Event::Applied(foo.clone())), + Ok(Event::Restarted(vec![foo.clone(), bar.clone()])), + ]); + + let foo = Arc::new(foo); + let bar = Arc::new(bar); + + let (_, writer) = reflector::store(); + let reflect = ReflectDispatcher::new(st, writer, 10); + + // We pin the reflector on the heap to make it easier to drop it. + let mut reflect = Box::pin(reflect); + let subscriber = reflect.subscribe(); + pin_mut!(subscriber); + + assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Applied(_)))))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + + + // Restart event will yield all objects in the list. Broadcast values + // without polling and then drop. + // + // First, subscribers should be pending. + assert_eq!(poll!(subscriber.next()), Poll::Pending); + assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Restarted(_)))))); + drop(reflect); + + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(None)); + } + + #[tokio::test] + async fn reflect_applies_backpressure() { + // When the channel is full, we should observe backpressure applied. + // + // This will be manifested by receiving Poll::Pending on the reflector + // stream while the reader stream is not polled. Once we unblock the + // buffer, the reflector will make progress. + let foo = testpod("foo"); + let bar = testpod("bar"); + let st = stream::iter([ + Ok(Event::Applied(foo.clone())), + Ok(Event::Restarted(vec![foo.clone(), bar.clone()])), + ]); + + let foo = Arc::new(foo); + let bar = Arc::new(bar); + + let (_, writer) = reflector::store(); + let reflect = ReflectDispatcher::new(st, writer, 1); + pin_mut!(reflect); + let subscriber = reflect.subscribe(); + pin_mut!(subscriber); + let subscriber_slow = reflect.subscribe(); + pin_mut!(subscriber_slow); + + assert_eq!(poll!(subscriber.next()), Poll::Pending); + assert_eq!(poll!(subscriber_slow.next()), Poll::Pending); + + // Poll first subscriber, but not the second. + assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Applied(_)))))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + // One subscriber is not reading, so we need to apply backpressure until + // channel has capacity. + // + // At this point, the buffer is full. Polling again will trigger the + // backpressure logic. This means, next event will be returned, but no + // more progress will be made after that until subscriber_slow catches + // up. + assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Restarted(_)))))); + assert!(matches!(poll!(reflect.next()), Poll::Pending)); + // Our "fast" subscriber will also have nothing else to poll until the + // slower subscriber advances its pointer in the buffer. + assert_eq!(poll!(subscriber.next()), Poll::Pending); + + // Advance slow reader + assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(foo.clone()))); + + // We now have room for only one more item. In total, the previous event + // had two. We repeat the same pattern. + assert!(matches!(poll!(reflect.next()), Poll::Pending)); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + assert!(matches!(poll!(reflect.next()), Poll::Pending)); + assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(foo.clone()))); + // Poll again to drain the queue. + assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); + assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(bar.clone()))); + + assert_eq!(poll!(subscriber.next()), Poll::Ready(None)); + assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(None)); + } + + // TODO (matei): tests around cloning subscribers once a watch stream has already + // been established. This will depend on the interfaces & impl so are left + // out for now. +} From 0256cb0fa26c8291d3b98b0b037585b0e9adefa9 Mon Sep 17 00:00:00 2001 From: Matei David Date: Mon, 25 Mar 2024 22:15:18 +0000 Subject: [PATCH 17/36] WIP Signed-off-by: Matei David --- kube-runtime/Cargo.toml | 1 + kube-runtime/src/utils/reflect_dispatch.rs | 121 ++++++++++++++------- 2 files changed, 83 insertions(+), 39 deletions(-) diff --git a/kube-runtime/Cargo.toml b/kube-runtime/Cargo.toml index 3784f2773..55c5be0a1 100644 --- a/kube-runtime/Cargo.toml +++ b/kube-runtime/Cargo.toml @@ -45,6 +45,7 @@ backoff = "0.4.0" async-trait = "0.1.64" hashbrown = "0.14.0" async-broadcast = "0.7.0" +async-stream = "0.3.5" [dependencies.k8s-openapi] version = "0.21.0" diff --git a/kube-runtime/src/utils/reflect_dispatch.rs b/kube-runtime/src/utils/reflect_dispatch.rs index 1bc991dd0..406081a8e 100644 --- a/kube-runtime/src/utils/reflect_dispatch.rs +++ b/kube-runtime/src/utils/reflect_dispatch.rs @@ -4,10 +4,11 @@ use core::{ }; use std::{collections::VecDeque, sync::Arc}; -use futures::{ready, Future, Stream, TryStream}; +use async_stream::stream; +use futures::{pin_mut, ready, Future, Stream, StreamExt, TryStream}; use pin_project::pin_project; use tokio::time; -use tracing::{debug, error, instrument, trace}; +use tracing::{debug, error, trace}; use crate::{ reflector::{store::Writer, ObjectRef, Store}, @@ -21,7 +22,7 @@ use kube_client::Resource; pub struct ReflectDispatcher where K: Resource + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, { #[pin] stream: St, @@ -37,9 +38,9 @@ where impl ReflectDispatcher where - St: TryStream>, + St: Stream, Error>> + 'static, K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, { pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> ReflectDispatcher { let (tx, rx) = async_broadcast::broadcast(buf_size); @@ -65,6 +66,32 @@ where // messages in the channel. ReflectHandle::new(self.writer.as_reader(), self.tx.new_receiver()) } + + // Hm, not the right interface for this... + pub fn into_stream(mut self) -> impl Stream, Error>> { + stream! { + let stream = self.stream; + pin_mut!(stream); + while let Some(event) = stream.next().await { + if let Ok(ev) = &event { + self.writer.apply_watcher_event(&ev); + match ev { + Event::Applied(obj) => { + let obj_ref = ObjectRef::from_obj(obj); + tokio::select!{ + _ = self.tx.broadcast(obj_ref) => {}, + } + }, + Event::Restarted(obj_refs) => { + }, + _ => {} + } + } + + yield event; + } + } + } } impl Stream for ReflectDispatcher @@ -75,12 +102,6 @@ where { type Item = Result, Error>; - #[instrument( - name = "shared_stream", - skip_all, - fields(active_readers = %self.tx.receiver_count(), - inner_queue_depth = %self.buffer.len()) - )] fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { @@ -97,11 +118,11 @@ where // so ordering is preserved. this.buffer.push_front(msg); trace!( - deadline_ms = %this.deadline.as_millis(), - queue_depth = %this.buffer.len(), - active_readers = %this.tx.receiver_count(), - "Root stream's buffer is full, retrying with a deadline" - ); + deadline_ms = %this.deadline.as_millis(), + queue_depth = %this.buffer.len(), + active_readers = %this.tx.receiver_count(), + "Root stream's buffer is full, retrying with a deadline" + ); ready!(this.sleep.as_mut().poll(cx)); error!("Shared stream cannot make progress; ensure subscribers are being driven"); // Reset timer @@ -136,11 +157,10 @@ where tracing::info!("Stream terminated"); this.tx.close(); return Poll::Ready(None); - }, + } Some(Err(error)) => return Poll::Ready(Some(Err(error))), }; - match &event { // Only deal with Deleted events Event::Applied(obj) => { @@ -180,10 +200,10 @@ where } } } - // Delete events should refresh the store. There is no need to propagate - // them to subscribers since we have already updated the store by this - // point. - _ => {} + // Delete events should refresh the store. There is no need to propagate + // them to subscribers since we have already updated the store by this + // point. + _ => {} }; Poll::Ready(Some(Ok(event))) @@ -235,16 +255,16 @@ where fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); match ready!(this.rx.as_mut().poll_next(cx)) { - Some(obj_ref) => this.reader - .get(&obj_ref) - .map(|obj| Poll::Ready(Some(obj))) - .unwrap_or(Poll::Pending), - None => Poll::Ready(None) + Some(obj_ref) => this + .reader + .get(&obj_ref) + .map(|obj| Poll::Ready(Some(obj))) + .unwrap_or(Poll::Pending), + None => Poll::Ready(None), } } } - #[cfg(test)] pub(crate) mod test { use std::{sync::Arc, task::Poll, vec}; @@ -322,18 +342,30 @@ pub(crate) mod test { pin_mut!(subscriber); // Deleted events should be skipped by subscriber. - assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Deleted(_)))))); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Deleted(_)))) + )); assert!(matches!(poll!(subscriber.next()), Poll::Pending)); - assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Applied(_)))))); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Applied(_)))) + )); assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); // Errors are not propagated to subscribers. - assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Err(Error::TooManyObjects))))); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Err(Error::TooManyObjects))) + )); assert!(matches!(poll!(subscriber.next()), Poll::Pending)); // Restart event will yield all objects in the list - assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Restarted(_)))))); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Restarted(_)))) + )); assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); @@ -364,16 +396,21 @@ pub(crate) mod test { let subscriber = reflect.subscribe(); pin_mut!(subscriber); - assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Applied(_)))))); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Applied(_)))) + )); assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); - // Restart event will yield all objects in the list. Broadcast values // without polling and then drop. // // First, subscribers should be pending. assert_eq!(poll!(subscriber.next()), Poll::Pending); - assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Restarted(_)))))); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Restarted(_)))) + )); drop(reflect); assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); @@ -405,21 +442,27 @@ pub(crate) mod test { pin_mut!(subscriber); let subscriber_slow = reflect.subscribe(); pin_mut!(subscriber_slow); - + assert_eq!(poll!(subscriber.next()), Poll::Pending); assert_eq!(poll!(subscriber_slow.next()), Poll::Pending); // Poll first subscriber, but not the second. - assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Applied(_)))))); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Applied(_)))) + )); assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); // One subscriber is not reading, so we need to apply backpressure until - // channel has capacity. + // channel has capacity. // // At this point, the buffer is full. Polling again will trigger the // backpressure logic. This means, next event will be returned, but no // more progress will be made after that until subscriber_slow catches // up. - assert!(matches!(poll!(reflect.next()), Poll::Ready(Some(Ok(Event::Restarted(_)))))); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Restarted(_)))) + )); assert!(matches!(poll!(reflect.next()), Poll::Pending)); // Our "fast" subscriber will also have nothing else to poll until the // slower subscriber advances its pointer in the buffer. From 74f09f79ee9825590b55b82d019345fb33177df1 Mon Sep 17 00:00:00 2001 From: Matei David Date: Tue, 26 Mar 2024 16:55:47 +0000 Subject: [PATCH 18/36] WIP 2 Signed-off-by: Matei David --- kube-runtime/src/utils/reflect_dispatch.rs | 15 +++------------ kube-runtime/src/utils/watch_ext.rs | 6 +++--- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/kube-runtime/src/utils/reflect_dispatch.rs b/kube-runtime/src/utils/reflect_dispatch.rs index 406081a8e..adecc02ea 100644 --- a/kube-runtime/src/utils/reflect_dispatch.rs +++ b/kube-runtime/src/utils/reflect_dispatch.rs @@ -2,7 +2,7 @@ use core::{ pin::Pin, task::{Context, Poll}, }; -use std::{collections::VecDeque, sync::Arc}; +use std::{collections::VecDeque, sync::Arc, time::Duration}; use async_stream::stream; use futures::{pin_mut, ready, Future, Stream, StreamExt, TryStream}; @@ -24,15 +24,10 @@ where K: Resource + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone + Default, { - #[pin] stream: St, writer: Writer, tx: Sender>, - rx: InactiveReceiver>, - #[pin] - sleep: time::Sleep, - buffer: VecDeque>, deadline: time::Duration, } @@ -42,16 +37,12 @@ where K: Resource + Clone, K::DynamicType: Eq + std::hash::Hash + Clone + Default, { - pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> ReflectDispatcher { - let (tx, rx) = async_broadcast::broadcast(buf_size); + pub(super) fn new(stream: St, writer: Writer, tx: Sender>) -> ReflectDispatcher { Self { stream, writer, tx, - rx: rx.deactivate(), - deadline: time::Duration::from_secs(10), - sleep: time::sleep(time::Duration::ZERO), - buffer: VecDeque::new(), + deadline: Duration::from_millis(10), } } diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index a86620864..5109a1669 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -17,7 +17,6 @@ use crate::watcher::DefaultBackoff; use backoff::backoff::Backoff; use futures::{Stream, TryStream}; - /// Extension trait for streams returned by [`watcher`](watcher()) or [`reflector`](crate::reflector::reflector) pub trait WatchStreamExt: Stream { /// Apply the [`DefaultBackoff`] watcher [`Backoff`] policy @@ -257,13 +256,14 @@ pub trait WatchStreamExt: Stream { self, writer: Writer, buf_size: usize, - ) -> (ReflectHandle, ReflectDispatcher) + ) -> (ReflectHandle, impl Stream) where Self: Stream>> + Sized, K: Resource + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone, { - let reflect = ReflectDispatcher::new(self, writer, buf_size); + let (tx, rx) = async_broadcast::broadcast(buf_size); + let handle = ReflectHandle::new(writer.as_reader(), rx); (reflect.subscribe(), reflect) } } From 2d5a3b0c9d9e97bf3f8c3f164c1f5d486554e935 Mon Sep 17 00:00:00 2001 From: Matei David Date: Tue, 26 Mar 2024 17:37:05 +0000 Subject: [PATCH 19/36] Start working on store side Signed-off-by: Matei David --- kube-runtime/src/reflector/dispatcher.rs | 91 ++++++++++++++++++++++ kube-runtime/src/reflector/mod.rs | 2 + kube-runtime/src/utils/reflect_dispatch.rs | 45 ++++------- 3 files changed, 107 insertions(+), 31 deletions(-) create mode 100644 kube-runtime/src/reflector/dispatcher.rs diff --git a/kube-runtime/src/reflector/dispatcher.rs b/kube-runtime/src/reflector/dispatcher.rs new file mode 100644 index 000000000..4a97eb2d1 --- /dev/null +++ b/kube-runtime/src/reflector/dispatcher.rs @@ -0,0 +1,91 @@ +use core::{ + pin::Pin, + task::{Context, Poll}, +}; +use std::sync::Arc; + +use futures::{ready, Stream}; +use pin_project::pin_project; +use tokio::time; + +use crate::reflector::{ObjectRef, Store}; +use async_broadcast::{Receiver, Sender}; +use kube_client::Resource; + +pub(crate) struct Dispatcher +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, +{ + tx: Sender>, + deadline: time::Duration, +} + +impl Dispatcher +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, +{ + pub(crate) fn new(tx: Sender>, deadline: time::Duration) -> Dispatcher { + Self { tx, deadline } + } + + pub(crate) async fn send(&mut self, value: ObjectRef) { + self.tx.broadcast_direct(value).await; + } +} + +#[pin_project] +pub struct ReflectHandle +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + #[pin] + rx: Receiver>, + reader: Store, +} + +impl Clone for ReflectHandle +where + K: Resource + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + fn clone(&self) -> Self { + ReflectHandle::new(self.reader.clone(), self.rx.clone()) + } +} + +impl ReflectHandle +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + pub(super) fn new(reader: Store, rx: Receiver>) -> ReflectHandle { + Self { reader, rx } + } + + pub fn reader(&self) -> Store { + self.reader.clone() + } +} + +impl Stream for ReflectHandle +where + K: Resource + Clone, + K::DynamicType: Eq + std::hash::Hash + Clone + Default, +{ + type Item = Arc; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + match ready!(this.rx.as_mut().poll_next(cx)) { + Some(obj_ref) => this + .reader + .get(&obj_ref) + .map(|obj| Poll::Ready(Some(obj))) + .unwrap_or(Poll::Pending), + None => Poll::Ready(None), + } + } +} diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index d0a724b53..41de22af6 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -1,8 +1,10 @@ //! Caches objects in memory +mod dispatcher; mod object_ref; pub mod store; +pub use self::dispatcher::ReflectHandle; pub use self::object_ref::{Extra as ObjectRefExtra, ObjectRef}; use crate::watcher; use futures::{Stream, TryStreamExt}; diff --git a/kube-runtime/src/utils/reflect_dispatch.rs b/kube-runtime/src/utils/reflect_dispatch.rs index adecc02ea..2e7f5582e 100644 --- a/kube-runtime/src/utils/reflect_dispatch.rs +++ b/kube-runtime/src/utils/reflect_dispatch.rs @@ -2,7 +2,7 @@ use core::{ pin::Pin, task::{Context, Poll}, }; -use std::{collections::VecDeque, sync::Arc, time::Duration}; +use std::{collections::VecDeque, sync::Arc}; use async_stream::stream; use futures::{pin_mut, ready, Future, Stream, StreamExt, TryStream}; @@ -22,12 +22,17 @@ use kube_client::Resource; pub struct ReflectDispatcher where K: Resource + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, + K::DynamicType: Eq + std::hash::Hash + Clone, { + #[pin] stream: St, writer: Writer, tx: Sender>, + rx: InactiveReceiver>, + #[pin] + sleep: time::Sleep, + buffer: VecDeque>, deadline: time::Duration, } @@ -35,14 +40,18 @@ impl ReflectDispatcher where St: Stream, Error>> + 'static, K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, + K::DynamicType: Eq + std::hash::Hash + Clone, { - pub(super) fn new(stream: St, writer: Writer, tx: Sender>) -> ReflectDispatcher { + pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> ReflectDispatcher { + let (tx, rx) = async_broadcast::broadcast(buf_size); Self { stream, writer, tx, - deadline: Duration::from_millis(10), + rx: rx.deactivate(), + deadline: time::Duration::from_secs(10), + sleep: time::sleep(time::Duration::ZERO), + buffer: VecDeque::new(), } } @@ -57,32 +66,6 @@ where // messages in the channel. ReflectHandle::new(self.writer.as_reader(), self.tx.new_receiver()) } - - // Hm, not the right interface for this... - pub fn into_stream(mut self) -> impl Stream, Error>> { - stream! { - let stream = self.stream; - pin_mut!(stream); - while let Some(event) = stream.next().await { - if let Ok(ev) = &event { - self.writer.apply_watcher_event(&ev); - match ev { - Event::Applied(obj) => { - let obj_ref = ObjectRef::from_obj(obj); - tokio::select!{ - _ = self.tx.broadcast(obj_ref) => {}, - } - }, - Event::Restarted(obj_refs) => { - }, - _ => {} - } - } - - yield event; - } - } - } } impl Stream for ReflectDispatcher From 9bf111c4122794ec9b20489d0a42947482cdaac3 Mon Sep 17 00:00:00 2001 From: Matei David Date: Tue, 26 Mar 2024 19:22:54 +0000 Subject: [PATCH 20/36] Tests are green Signed-off-by: Matei David --- kube-runtime/src/reflector/dispatcher.rs | 228 +++++++++++++++++++++++ kube-runtime/src/reflector/mod.rs | 19 +- kube-runtime/src/reflector/store.rs | 37 +++- 3 files changed, 273 insertions(+), 11 deletions(-) diff --git a/kube-runtime/src/reflector/dispatcher.rs b/kube-runtime/src/reflector/dispatcher.rs index 5cf414ee8..f493e58bb 100644 --- a/kube-runtime/src/reflector/dispatcher.rs +++ b/kube-runtime/src/reflector/dispatcher.rs @@ -66,3 +66,231 @@ where } } } + +#[cfg(test)] +pub(crate) mod test { + use crate::{ + watcher::{Error, Event}, + WatchStreamExt, + }; + use std::{sync::Arc, task::Poll, vec}; + + use crate::reflector; + use futures::{pin_mut, poll, stream, StreamExt}; + use k8s_openapi::api::core::v1::Pod; + + fn testpod(name: &str) -> Pod { + let mut pod = Pod::default(); + pod.metadata.name = Some(name.to_string()); + pod + } + + #[tokio::test] + async fn events_are_passed_through() { + let foo = testpod("foo"); + let bar = testpod("bar"); + let st = stream::iter([ + Ok(Event::Applied(foo.clone())), + Err(Error::TooManyObjects), + Ok(Event::Restarted(vec![foo, bar])), + ]); + + let (reader, writer) = reflector::store_with_dispatch(10, Default::default()); + let reflect = st.reflect_dispatch(writer); + pin_mut!(reflect); + + // Prior to any polls, we should have an empty store. + assert_eq!(reader.len(), 0); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Applied(_)))) + )); + + // Make progress and assert all events are seen + assert_eq!(reader.len(), 1); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Err(Error::TooManyObjects))) + )); + assert_eq!(reader.len(), 1); + + let restarted = poll!(reflect.next()); + assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::Restarted(_)))))); + assert_eq!(reader.len(), 2); + + assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); + assert_eq!(reader.len(), 2); + } + + #[tokio::test] + async fn readers_yield_touched_objects() { + // Readers should yield touched objects they receive from Stream events. + // + // NOTE: a Delete(_) event will be ignored if the item does not exist in + // the cache. Same with a Restarted(vec![delete_item]) + let foo = testpod("foo"); + let bar = testpod("bar"); + let st = stream::iter([ + Ok(Event::Deleted(foo.clone())), + Ok(Event::Applied(foo.clone())), + Err(Error::TooManyObjects), + Ok(Event::Restarted(vec![foo.clone(), bar.clone()])), + ]); + + let foo = Arc::new(foo); + let bar = Arc::new(bar); + + let (_, writer) = reflector::store_with_dispatch(10, Default::default()); + let subscriber = writer.subscribe(); + let reflect = st.reflect_dispatch(writer); + pin_mut!(reflect); + pin_mut!(subscriber); + + // Deleted events should be skipped by subscriber. + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Deleted(_)))) + )); + assert_eq!(poll!(subscriber.next()), Poll::Pending); + + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Applied(_)))) + )); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + + // Errors are not propagated to subscribers. + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Err(Error::TooManyObjects))) + )); + assert!(matches!(poll!(subscriber.next()), Poll::Pending)); + + // Restart event will yield all objects in the list + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Restarted(_)))) + )); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); + + // When main channel is closed, it is propagated to subscribers + assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(None)); + } + + #[tokio::test] + async fn readers_yield_when_tx_drops() { + // Once the main stream is dropped, readers should continue to make + // progress and read values that have been sent on the channel. + let foo = testpod("foo"); + let bar = testpod("bar"); + let st = stream::iter([ + Ok(Event::Applied(foo.clone())), + Ok(Event::Restarted(vec![foo.clone(), bar.clone()])), + ]); + + let foo = Arc::new(foo); + let bar = Arc::new(bar); + + let (_, writer) = reflector::store_with_dispatch(10, Default::default()); + let subscriber = writer.subscribe(); + let mut reflect = Box::pin(st.reflect_dispatch(writer)); + pin_mut!(subscriber); + + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Applied(_)))) + )); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + + // Restart event will yield all objects in the list. Broadcast values + // without polling and then drop. + // + // First, subscribers should be pending. + assert_eq!(poll!(subscriber.next()), Poll::Pending); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Restarted(_)))) + )); + drop(reflect); + + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(None)); + } + + #[tokio::test] + async fn reflect_applies_backpressure() { + // When the channel is full, we should observe backpressure applied. + // + // This will be manifested by receiving Poll::Pending on the reflector + // stream while the reader stream is not polled. Once we unblock the + // buffer, the reflector will make progress. + let foo = testpod("foo"); + let bar = testpod("bar"); + let st = stream::iter([ + Ok(Event::Applied(foo.clone())), + Ok(Event::Restarted(vec![foo.clone(), bar.clone()])), + ]); + + let foo = Arc::new(foo); + let bar = Arc::new(bar); + + let (_, writer) = reflector::store_with_dispatch(1, Default::default()); + let subscriber = writer.subscribe(); + let subscriber_slow = writer.subscribe(); + let reflect = st.reflect_dispatch(writer); + pin_mut!(reflect); + pin_mut!(subscriber); + pin_mut!(subscriber_slow); + + assert_eq!(poll!(subscriber.next()), Poll::Pending); + assert_eq!(poll!(subscriber_slow.next()), Poll::Pending); + + // Poll first subscriber, but not the second. + // + // The buffer can hold one value, so even if we have a slow subscriber, + // we will still get an event from the root. + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Applied(_)))) + )); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + // One subscriber is not reading, so we need to apply backpressure until + // channel has capacity. + // + // At this point, the buffer is full. Polling again will trigger the + // backpressure logic. + assert!(matches!(poll!(reflect.next()), Poll::Pending)); + + // Our "fast" subscriber will also have nothing else to poll until the + // slower subscriber advances its pointer in the buffer. + assert_eq!(poll!(subscriber.next()), Poll::Pending); + + // Advance slow reader + assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(foo.clone()))); + + // We now have room for only one more item. In total, the previous event + // had two. We repeat the same pattern. + assert!(matches!(poll!(reflect.next()), Poll::Pending)); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); + assert!(matches!(poll!(reflect.next()), Poll::Pending)); + assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(foo.clone()))); + assert!(matches!( + poll!(reflect.next()), + Poll::Ready(Some(Ok(Event::Restarted(_)))) + )); + // Poll again to drain the queue. + assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); + assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); + assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(bar.clone()))); + + assert_eq!(poll!(subscriber.next()), Poll::Ready(None)); + assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(None)); + } + + // TODO (matei): tests around cloning subscribers once a watch stream has already + // been established. This will depend on the interfaces & impl so are left + // out for now. +} diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index c890ba1af..d16850a8b 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -7,9 +7,10 @@ pub mod store; pub use self::dispatcher::ReflectHandle; pub use self::object_ref::{Extra as ObjectRefExtra, Lookup, ObjectRef}; use crate::watcher; -use futures::{Stream, TryStreamExt}; +use async_stream::stream; +use futures::{Stream, StreamExt}; use std::hash::Hash; -pub use store::{store, Store}; +pub use store::{store, store_with_dispatch, Store}; /// Cache objects from a [`watcher()`] stream into a local [`Store`] /// @@ -99,7 +100,19 @@ where K::DynamicType: Eq + Hash + Clone, W: Stream>>, { - stream.inspect_ok(move |event| writer.apply_watcher_event(event)) + let mut stream = Box::pin(stream); + stream! { + while let Some(event) = stream.next().await { + match event { + Ok(ev) => { + writer.apply_watcher_event(&ev); + writer.dispatch_event(&ev).await; + yield Ok(ev); + }, + Err(ev) => yield Err(ev) + } + } + } } #[cfg(test)] diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index ff3eea3a8..8c5679fc3 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -4,7 +4,7 @@ use crate::{ watcher, }; use ahash::AHashMap; -use async_broadcast::Sender; +use async_broadcast::{InactiveReceiver, Sender}; use derivative::Derivative; use parking_lot::RwLock; use std::{fmt::Debug, hash::Hash, sync::Arc}; @@ -19,18 +19,20 @@ type Cache = Arc, Arc>>>; #[derive(Debug)] pub struct Writer where - K::DynamicType: Eq + Hash + Default + Clone, + K::DynamicType: Eq + Hash + Clone, { store: Cache, dyntype: K::DynamicType, ready_tx: Option>, ready_rx: Arc>, + dispatch_tx: Sender>, + _dispatch_rx: InactiveReceiver>, } impl Writer where - K::DynamicType: Eq + Hash + Default + Clone, + K::DynamicType: Eq + Hash + Clone, { /// Creates a new Writer with the specified dynamic type. /// @@ -38,25 +40,30 @@ where /// `k8s_openapi` types) you can use `Default` instead. pub fn new(dyntype: K::DynamicType) -> Self { let (ready_tx, ready_rx) = DelayedInit::new(); - let (dispatch_tx, _) = async_broadcast::broadcast(1); + let (dispatch_tx, dispatch_rx) = async_broadcast::broadcast(1); + dispatch_tx.close(); Writer { store: Default::default(), dyntype, ready_tx: Some(ready_tx), ready_rx: Arc::new(ready_rx), dispatch_tx, + _dispatch_rx: dispatch_rx.deactivate(), } } pub fn new_with_dispatch(dyntype: K::DynamicType, buf_size: usize) -> Self { let (ready_tx, ready_rx) = DelayedInit::new(); - let (dispatch_tx, _) = async_broadcast::broadcast(buf_size); + let (mut dispatch_tx, dispatch_rx) = async_broadcast::broadcast(buf_size); + // dont block on waiting for rx + dispatch_tx.set_await_active(false); Writer { store: Default::default(), dyntype, ready_tx: Some(ready_tx), ready_rx: Arc::new(ready_rx), dispatch_tx, + _dispatch_rx: dispatch_rx.deactivate(), } } @@ -73,7 +80,7 @@ where } pub fn subscribe(&self) -> ReflectHandle { - ReflectHandle::new(self.as_reader(), self.dispatcher) + ReflectHandle::new(self.as_reader(), self.dispatch_tx.new_receiver()) } /// Applies a single watcher event to the store @@ -104,16 +111,20 @@ where } pub(crate) async fn dispatch_event(&mut self, event: &watcher::Event) { + if self.dispatch_tx.is_closed() { + return; + } + match event { watcher::Event::Applied(obj) => { let obj_ref = obj.to_object_ref(self.dyntype.clone()); - self.dispatch_tx.broadcast(obj_ref).await; + let _ = self.dispatch_tx.broadcast_direct(obj_ref).await; } watcher::Event::Restarted(new_objs) => { let objs = new_objs.iter().map(|obj| obj.to_object_ref(self.dyntype.clone())); for obj in objs { - self.dispatch_tx.broadcast(obj).await; + let _ = self.dispatch_tx.broadcast_direct(obj).await; } } _ => {} @@ -241,6 +252,16 @@ where (r, w) } +pub fn store_with_dispatch(buf_size: usize, dyntype: K::DynamicType) -> (Store, Writer) +where + K: Lookup + Clone + 'static, + K::DynamicType: Eq + Hash + Clone + Default, +{ + let w = Writer::::new_with_dispatch(dyntype, buf_size); + let r = w.as_reader(); + (r, w) +} + #[cfg(test)] mod tests { use super::{store, Writer}; From 04a53d1696e2a274c920f521bc60e146223ca79a Mon Sep 17 00:00:00 2001 From: Matei David Date: Tue, 26 Mar 2024 19:24:07 +0000 Subject: [PATCH 21/36] rm redundant trait bounds Signed-off-by: Matei David --- kube-runtime/src/utils/reflect.rs | 3 +-- kube-runtime/src/utils/watch_ext.rs | 15 +++------------ 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/kube-runtime/src/utils/reflect.rs b/kube-runtime/src/utils/reflect.rs index babf63e9a..43fa65c2a 100644 --- a/kube-runtime/src/utils/reflect.rs +++ b/kube-runtime/src/utils/reflect.rs @@ -38,7 +38,7 @@ where impl Stream for Reflect where K: Resource + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, + K::DynamicType: Eq + std::hash::Hash + Clone, St: Stream, Error>>, { type Item = Result, Error>; @@ -52,7 +52,6 @@ where } } - #[cfg(test)] pub(crate) mod test { use std::{task::Poll, vec}; diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index 5109a1669..8a1f4fc20 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -8,10 +8,7 @@ use crate::{ }; use kube_client::Resource; -use crate::{ - reflector::store::Writer, - utils::{Reflect, ReflectDispatcher, ReflectHandle}, -}; +use crate::{reflector::store::Writer, utils::Reflect}; use crate::watcher::DefaultBackoff; use backoff::backoff::Backoff; @@ -252,19 +249,13 @@ pub trait WatchStreamExt: Stream { Reflect::new(self, writer) } - fn reflect_dispatch( - self, - writer: Writer, - buf_size: usize, - ) -> (ReflectHandle, impl Stream) + fn reflect_dispatch(self, writer: Writer) -> impl Stream where Self: Stream>> + Sized, K: Resource + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone, { - let (tx, rx) = async_broadcast::broadcast(buf_size); - let handle = ReflectHandle::new(writer.as_reader(), rx); - (reflect.subscribe(), reflect) + crate::reflector(writer, self) } } From 6b5bd317623365fd7b10e75611f4d419af907bc2 Mon Sep 17 00:00:00 2001 From: Matei David Date: Tue, 26 Mar 2024 19:29:15 +0000 Subject: [PATCH 22/36] Update example with new interfaces Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index c24169814..0e2e3f2cd 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -33,19 +33,14 @@ async fn main() -> anyhow::Result<()> { let config = Config::default().concurrency(2); let ctx = Arc::new(Data { client }); - // (1): create a store - let writer = Writer::::new(Default::default()); - - // (2): split the stream: - // - create a handle that can be cloned to get more readers - // - pass through events from root stream through a reflector - // - // Before splitting, we apply a backoff. This is completely optional, but it - // allows us to ensure the APIServer won't be overwhelmed when we retry - // watches on errors. - let (subscriber, reflector) = watcher(pods.clone(), Default::default()) + // (1): create a store (with a dispatcher) + let writer = Writer::::new_with_dispatch(Default::default(), SUBSCRIBE_BUFFER_SIZE); + // (2): create a subscriber + let subscriber = writer.subscribe(); + // (2.5): create a watch stream + let pod_watch = watcher(pods.clone(), Default::default()) .default_backoff() - .reflect_shared(writer, SUBSCRIBE_BUFFER_SIZE); + .reflect_dispatch(writer); // (3): schedule the root (i.e. shared) stream with the runtime. // @@ -54,8 +49,8 @@ async fn main() -> anyhow::Result<()> { // to make progress. tokio::spawn(async move { // Pin on the heap so we don't overflow our stack - let mut reflector = reflector.boxed(); - while let Some(next) = reflector.next().await { + let mut watch = pod_watch.boxed(); + while let Some(next) = watch.next().await { // We are not interested in the returned events here, only in // handling errors. match next { @@ -65,7 +60,6 @@ async fn main() -> anyhow::Result<()> { } }); - // (4): create a reader. We create a metadata controller that will mirror a // pod's labels as annotations. // From def0011f128e10d19bdffc85494b86542b53329f Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 27 Mar 2024 16:58:41 +0000 Subject: [PATCH 23/36] Add comments and a small todo Signed-off-by: Matei David --- kube-runtime/src/reflector/store.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index 8c5679fc3..aab9583b7 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -27,6 +27,8 @@ where ready_rx: Arc>, dispatch_tx: Sender>, + // An inactive reader that prevents the channel from closing until the + // writer is dropped. _dispatch_rx: InactiveReceiver>, } @@ -118,6 +120,8 @@ where match event { watcher::Event::Applied(obj) => { let obj_ref = obj.to_object_ref(self.dyntype.clone()); + // TODO: should this take a timeout to log when backpressure has + // been applied for too long, e.g. 10s let _ = self.dispatch_tx.broadcast_direct(obj_ref).await; } From d69213af3fcdcddea8998249ebd9c9f6a9323776 Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 27 Mar 2024 17:01:24 +0000 Subject: [PATCH 24/36] Remove dispatch mod from utils Signed-off-by: Matei David --- kube-runtime/src/utils/mod.rs | 2 - kube-runtime/src/utils/reflect_dispatch.rs | 464 --------------------- 2 files changed, 466 deletions(-) delete mode 100644 kube-runtime/src/utils/reflect_dispatch.rs diff --git a/kube-runtime/src/utils/mod.rs b/kube-runtime/src/utils/mod.rs index a35068ce4..8450061e0 100644 --- a/kube-runtime/src/utils/mod.rs +++ b/kube-runtime/src/utils/mod.rs @@ -7,7 +7,6 @@ mod event_modify; #[cfg(feature = "unstable-runtime-predicates")] mod predicate; mod reflect; -mod reflect_dispatch; mod stream_backoff; #[cfg(feature = "unstable-runtime-subscribe")] pub mod stream_subscribe; @@ -19,7 +18,6 @@ pub use event_modify::EventModify; #[cfg(feature = "unstable-runtime-predicates")] pub use predicate::{predicates, Predicate, PredicateFilter}; pub use reflect::Reflect; -pub use reflect_dispatch::{ReflectDispatcher, ReflectHandle}; pub use stream_backoff::StreamBackoff; #[cfg(feature = "unstable-runtime-subscribe")] pub use stream_subscribe::StreamSubscribe; diff --git a/kube-runtime/src/utils/reflect_dispatch.rs b/kube-runtime/src/utils/reflect_dispatch.rs deleted file mode 100644 index 0f14f2196..000000000 --- a/kube-runtime/src/utils/reflect_dispatch.rs +++ /dev/null @@ -1,464 +0,0 @@ -use core::{ - pin::Pin, - task::{Context, Poll}, -}; -use std::{collections::VecDeque, sync::Arc}; - -use futures::{ready, Future, Stream, StreamExt, TryStream}; -use pin_project::pin_project; -use tokio::time; -use tracing::{debug, error, trace}; - -use crate::{ - reflector::{store::Writer, Lookup, ObjectRef, Store}, - watcher::{Error, Event}, -}; -use async_broadcast::{InactiveReceiver, Receiver, Sender}; - -/// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method -#[pin_project] -pub struct ReflectDispatcher -where - K: Lookup + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, -{ - #[pin] - stream: St, - writer: Writer, - tx: Sender>, - rx: InactiveReceiver>, - - #[pin] - sleep: time::Sleep, - buffer: VecDeque>, - deadline: time::Duration, -} - -impl ReflectDispatcher -where - St: Stream, Error>> + 'static, - K: Lookup + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, -{ - pub(super) fn new(stream: St, writer: Writer, buf_size: usize) -> ReflectDispatcher { - let (tx, rx) = async_broadcast::broadcast(buf_size); - Self { - stream, - writer, - tx, - rx: rx.deactivate(), - deadline: time::Duration::from_secs(10), - sleep: time::sleep(time::Duration::ZERO), - buffer: VecDeque::new(), - } - } - - pub fn subscribe(&self) -> ReflectHandle { - // Note: broadcast::Sender::new_receiver() will return a new receiver - // that _will not_ replay any messages in the channel, effectively - // starting from the latest message. - // - // Since we create a reader and a writer when calling reflect_shared() - // this should be fine. All subsequent clones should go through - // ReflectHandle::clone() to get a receiver that replays all of the - // messages in the channel. - ReflectHandle::new(self.writer.as_reader(), self.tx.new_receiver()) - } -} - -impl Stream for ReflectDispatcher -where - K: Lookup + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, - St: Stream, Error>>, -{ - type Item = Result, Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - loop { - if let Some(msg) = this.buffer.pop_front() { - match this.tx.try_broadcast(msg) { - Ok(_) => { - trace!("Broadcast value"); - } - Err(async_broadcast::TrySendError::Full(msg)) => { - // When the broadcast buffer is full, retry with a - // deadline. - // - // First, push the msg back to the front of the buffer - // so ordering is preserved. - this.buffer.push_front(msg); - trace!( - deadline_ms = %this.deadline.as_millis(), - queue_depth = %this.buffer.len(), - active_readers = %this.tx.receiver_count(), - "Root stream's buffer is full, retrying with a deadline" - ); - ready!(this.sleep.as_mut().poll(cx)); - error!("Shared stream cannot make progress; ensure subscribers are being driven"); - // Reset timer - this.sleep.as_mut().reset(time::Instant::now() + *this.deadline); - } - Err(error) if error.is_disconnected() => { - // When the broadcast channel is disconnected, we have - // no active receivers. We should clear the buffer and - // avoid writing to the channel. - this.buffer.clear(); - debug!("No active readers subscribed to shared stream"); - } - _ => { - // Other possible error is a closed channel. - // We should never hit this since we are holding a - // writer and an inactive reader. - } - } - } else { - break; - } - } - - let next = this.stream.as_mut().poll_next(cx).map_ok(|event| { - this.writer.apply_watcher_event(&event); - event - }); - - let event = match ready!(next) { - Some(Ok(event)) => event, - None => { - tracing::info!("Stream terminated"); - this.tx.close(); - return Poll::Ready(None); - } - Some(Err(error)) => return Poll::Ready(Some(Err(error))), - }; - - match &event { - // Only deal with Deleted events - Event::Applied(obj) => { - let obj_ref = ObjectRef::from_obj(obj); - match this.tx.try_broadcast(obj_ref) { - Ok(_) => {} - Err(async_broadcast::TrySendError::Full(msg)) => { - debug!( - "Attempted to write to subscribers with no buffer space; applying backpressure" - ); - this.buffer.push_back(msg); - } - // Channel is closed or we have no active readers. - // In both cases there's not much we can do, so drive the - // watch strem. - _ => {} - } - } - Event::Restarted(obj_list) => { - let obj_list = obj_list.iter().map(ObjectRef::from_obj); - this.buffer.extend(obj_list); - loop { - if let Some(msg) = this.buffer.pop_front() { - match this.tx.try_broadcast(msg) { - Ok(_) => {} - Err(async_broadcast::TrySendError::Full(msg)) => { - debug!( - "Attempted to write to subscribers with no buffer space; applying backpressure" - ); - this.buffer.push_front(msg); - break; - } - _ => {} - } - } else { - break; - } - } - } - // Delete events should refresh the store. There is no need to propagate - // them to subscribers since we have already updated the store by this - // point. - _ => {} - }; - - Poll::Ready(Some(Ok(event))) - } -} - -#[pin_project] -pub struct ReflectHandle -where - K: Lookup + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - #[pin] - rx: Receiver>, - reader: Store, -} - -impl Clone for ReflectHandle -where - K: Lookup + Clone + 'static, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - fn clone(&self) -> Self { - ReflectHandle::new(self.reader.clone(), self.rx.clone()) - } -} - -impl ReflectHandle -where - K: Lookup + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone, -{ - pub(super) fn new(reader: Store, rx: Receiver>) -> ReflectHandle { - Self { reader, rx } - } - - pub fn reader(&self) -> Store { - self.reader.clone() - } -} - -impl Stream for ReflectHandle -where - K: Lookup + Clone, - K::DynamicType: Eq + std::hash::Hash + Clone + Default, -{ - type Item = Arc; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - match ready!(this.rx.as_mut().poll_next(cx)) { - Some(obj_ref) => this - .reader - .get(&obj_ref) - .map(|obj| Poll::Ready(Some(obj))) - .unwrap_or(Poll::Pending), - None => Poll::Ready(None), - } - } -} - -#[cfg(test)] -pub(crate) mod test { - use std::{sync::Arc, task::Poll, vec}; - - use super::{Error, Event}; - use crate::{reflector, utils::ReflectDispatcher}; - use futures::{pin_mut, poll, stream, StreamExt}; - use k8s_openapi::api::core::v1::Pod; - - fn testpod(name: &str) -> Pod { - let mut pod = Pod::default(); - pod.metadata.name = Some(name.to_string()); - pod - } - - #[tokio::test] - async fn events_are_passed_through() { - let foo = testpod("foo"); - let bar = testpod("bar"); - let st = stream::iter([ - Ok(Event::Applied(foo.clone())), - Err(Error::TooManyObjects), - Ok(Event::Restarted(vec![foo, bar])), - ]); - - let (reader, writer) = reflector::store(); - let reflect = ReflectDispatcher::new(st, writer, 10); - pin_mut!(reflect); - - // Prior to any polls, we should have an empty store. - assert_eq!(reader.len(), 0); - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Ok(Event::Applied(_)))) - )); - - // Make progress and assert all events are seen - assert_eq!(reader.len(), 1); - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Err(Error::TooManyObjects))) - )); - assert_eq!(reader.len(), 1); - - let restarted = poll!(reflect.next()); - assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::Restarted(_)))))); - assert_eq!(reader.len(), 2); - - assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); - assert_eq!(reader.len(), 2); - } - - #[tokio::test] - async fn readers_yield_touched_objects() { - // Readers should yield touched objects they receive from Stream events. - // - // NOTE: a Delete(_) event will be ignored if the item does not exist in - // the cache. Same with a Restarted(vec![delete_item]) - let foo = testpod("foo"); - let bar = testpod("bar"); - let st = stream::iter([ - Ok(Event::Deleted(foo.clone())), - Ok(Event::Applied(foo.clone())), - Err(Error::TooManyObjects), - Ok(Event::Restarted(vec![foo.clone(), bar.clone()])), - ]); - - let foo = Arc::new(foo); - let bar = Arc::new(bar); - - let (_, writer) = reflector::store(); - let reflect = ReflectDispatcher::new(st, writer, 10); - pin_mut!(reflect); - let subscriber = reflect.subscribe(); - pin_mut!(subscriber); - - // Deleted events should be skipped by subscriber. - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Ok(Event::Deleted(_)))) - )); - assert!(matches!(poll!(subscriber.next()), Poll::Pending)); - - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Ok(Event::Applied(_)))) - )); - assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); - - // Errors are not propagated to subscribers. - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Err(Error::TooManyObjects))) - )); - assert!(matches!(poll!(subscriber.next()), Poll::Pending)); - - // Restart event will yield all objects in the list - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Ok(Event::Restarted(_)))) - )); - assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); - assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); - - // When main channel is closed, it is propagated to subscribers - assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); - assert_eq!(poll!(subscriber.next()), Poll::Ready(None)); - } - - #[tokio::test] - async fn readers_yield_when_tx_drops() { - // Once the main stream is dropped, readers should continue to make - // progress and read values that have been sent on the channel. - let foo = testpod("foo"); - let bar = testpod("bar"); - let st = stream::iter([ - Ok(Event::Applied(foo.clone())), - Ok(Event::Restarted(vec![foo.clone(), bar.clone()])), - ]); - - let foo = Arc::new(foo); - let bar = Arc::new(bar); - - let (_, writer) = reflector::store(); - let reflect = ReflectDispatcher::new(st, writer, 10); - - // We pin the reflector on the heap to make it easier to drop it. - let mut reflect = Box::pin(reflect); - let subscriber = reflect.subscribe(); - pin_mut!(subscriber); - - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Ok(Event::Applied(_)))) - )); - assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); - - // Restart event will yield all objects in the list. Broadcast values - // without polling and then drop. - // - // First, subscribers should be pending. - assert_eq!(poll!(subscriber.next()), Poll::Pending); - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Ok(Event::Restarted(_)))) - )); - drop(reflect); - - assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); - assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); - assert_eq!(poll!(subscriber.next()), Poll::Ready(None)); - } - - #[tokio::test] - async fn reflect_applies_backpressure() { - // When the channel is full, we should observe backpressure applied. - // - // This will be manifested by receiving Poll::Pending on the reflector - // stream while the reader stream is not polled. Once we unblock the - // buffer, the reflector will make progress. - let foo = testpod("foo"); - let bar = testpod("bar"); - let st = stream::iter([ - Ok(Event::Applied(foo.clone())), - Ok(Event::Restarted(vec![foo.clone(), bar.clone()])), - ]); - - let foo = Arc::new(foo); - let bar = Arc::new(bar); - - let (_, writer) = reflector::store(); - let reflect = ReflectDispatcher::new(st, writer, 1); - pin_mut!(reflect); - let subscriber = reflect.subscribe(); - pin_mut!(subscriber); - let subscriber_slow = reflect.subscribe(); - pin_mut!(subscriber_slow); - - assert_eq!(poll!(subscriber.next()), Poll::Pending); - assert_eq!(poll!(subscriber_slow.next()), Poll::Pending); - - // Poll first subscriber, but not the second. - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Ok(Event::Applied(_)))) - )); - assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); - // One subscriber is not reading, so we need to apply backpressure until - // channel has capacity. - // - // At this point, the buffer is full. Polling again will trigger the - // backpressure logic. This means, next event will be returned, but no - // more progress will be made after that until subscriber_slow catches - // up. - assert!(matches!( - poll!(reflect.next()), - Poll::Ready(Some(Ok(Event::Restarted(_)))) - )); - assert!(matches!(poll!(reflect.next()), Poll::Pending)); - // Our "fast" subscriber will also have nothing else to poll until the - // slower subscriber advances its pointer in the buffer. - assert_eq!(poll!(subscriber.next()), Poll::Pending); - - // Advance slow reader - assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(foo.clone()))); - - // We now have room for only one more item. In total, the previous event - // had two. We repeat the same pattern. - assert!(matches!(poll!(reflect.next()), Poll::Pending)); - assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone()))); - assert!(matches!(poll!(reflect.next()), Poll::Pending)); - assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(foo.clone()))); - // Poll again to drain the queue. - assert!(matches!(poll!(reflect.next()), Poll::Ready(None))); - assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone()))); - assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(bar.clone()))); - - assert_eq!(poll!(subscriber.next()), Poll::Ready(None)); - assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(None)); - } - - // TODO (matei): tests around cloning subscribers once a watch stream has already - // been established. This will depend on the interfaces & impl so are left - // out for now. -} From 21dbbae8996ec9d5aa4b0e6a5bd3fdaef1564a7f Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 3 Apr 2024 19:02:50 +0100 Subject: [PATCH 25/36] @clux's feedback Co-authored-by: Eirik A Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 2 +- kube-runtime/src/utils/watch_ext.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 0e2e3f2cd..0cbbb0de3 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -147,7 +147,7 @@ async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result(self, writer: Writer) -> impl Stream + fn reflect_shared(self, writer: Writer) -> impl Stream where Self: Stream>> + Sized, K: Resource + Clone + 'static, From c7fc33345e37259bb3f4ab6f6830b041fef86cf1 Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 3 Apr 2024 20:02:35 +0000 Subject: [PATCH 26/36] @clux's feedback Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 356 +++++++++++------------ kube-runtime/src/reflector/dispatcher.rs | 40 ++- kube-runtime/src/reflector/mod.rs | 2 +- kube-runtime/src/reflector/store.rs | 62 ++-- 4 files changed, 232 insertions(+), 228 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 0cbbb0de3..3294ca6d4 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -1,17 +1,22 @@ use std::{sync::Arc, time::Duration}; use futures::StreamExt; -use k8s_openapi::api::core::v1::{Pod, PodCondition, PodStatus}; +use k8s_openapi::api::core::v1::{Pod, PodCondition}; use kube::{ api::{Patch, PatchParams}, - core::ObjectMeta, - runtime::{controller::Action, reflector::store::Writer, watcher, Config, Controller, WatchStreamExt}, + runtime::{ + controller::Action, + reflector::{self, store::Writer}, + watcher, Config, Controller, WatchStreamExt, + }, Api, Client, ResourceExt, }; -use tracing::{info, info_span, warn, Instrument}; +use tokio::sync::mpsc; +use tracing::{info, warn}; use thiserror::Error; +// Helper module that namespaces two constants describing a Kubernetes status condition pub mod condition { pub static UNDOCUMENTED_TYPE: &str = "UndocumentedPort"; pub static STATUS_TRUE: &str = "True"; @@ -19,119 +24,6 @@ pub mod condition { const SUBSCRIBE_BUFFER_SIZE: usize = 256; -#[derive(Clone)] -struct Data { - client: Client, -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - tracing_subscriber::fmt::init(); - - let client = Client::try_default().await?; - let pods = Api::::namespaced(client.clone(), "default"); - let config = Config::default().concurrency(2); - let ctx = Arc::new(Data { client }); - - // (1): create a store (with a dispatcher) - let writer = Writer::::new_with_dispatch(Default::default(), SUBSCRIBE_BUFFER_SIZE); - // (2): create a subscriber - let subscriber = writer.subscribe(); - // (2.5): create a watch stream - let pod_watch = watcher(pods.clone(), Default::default()) - .default_backoff() - .reflect_dispatch(writer); - - // (3): schedule the root (i.e. shared) stream with the runtime. - // - // The runtime (tokio) will drive this task to readiness; the stream is - // polled continously and allows all downstream readers (i.e. subscribers) - // to make progress. - tokio::spawn(async move { - // Pin on the heap so we don't overflow our stack - let mut watch = pod_watch.boxed(); - while let Some(next) = watch.next().await { - // We are not interested in the returned events here, only in - // handling errors. - match next { - Err(error) => tracing::error!(%error, "Received error from main watcher stream"), - _ => {} - } - } - }); - - // (4): create a reader. We create a metadata controller that will mirror a - // pod's labels as annotations. - // - // To create a controller that operates on a shared stream, we need two - // handles: - // - A handle to the store. - // - A handle to a shared stream. - // - // The handle to the shared stream will be used to receive shared objects as - // they are applied by the reflector. - let reader = subscriber.reader(); - // Store readers can be created on-demand by calling `reader()` on a shared - // stream handle. Stream handles are cheap to clone. - let metadata_controller = Controller::for_shared_stream(subscriber.clone(), reader) - .with_config(config.clone()) - .shutdown_on_signal() - .run( - reconcile_metadata, - |_, _, _| Action::requeue(Duration::from_secs(1)), - ctx.clone(), - ) - .for_each(|res| async move { - match res { - Ok(v) => info!("Reconciled {v:?}"), - Err(error) => warn!(%error, "Failed to reconcile object"), - } - }) - .instrument(info_span!("metadata_controller")); - - // (5): Create status controller. Our status controller write a condition - // whenever a pod has undocumented container ports (i.e. containers with no - // exposed ports). - // - // This is the last controller we will create, so we can just move the - // handle inside the controller. - let reader = subscriber.reader(); - let status_controller = Controller::for_shared_stream(subscriber, reader) - .with_config(config) - .shutdown_on_signal() - .run( - reconcile_status, - |_, _, _| Action::requeue(Duration::from_secs(1)), - ctx, - ) - .for_each(|res| async move { - match res { - Ok(v) => info!("Reconciled {v:?}"), - Err(error) => warn!(%error, "Failed to reconcile object"), - } - }) - .instrument(info_span!("status_controller")); - - let mut terminate = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())?; - // (6): Last step, drive controllers to readiness. Controllers are futures - // and need to be driven to make progress. A controller that's not driven - // and operates on a subscribed stream will eventually block the shared stream. - tokio::select! { - _ = metadata_controller => { - }, - - _ = status_controller => { - }, - - _ = terminate.recv() => { - info!("Received term signal; shutting down...") - } - - } - - Ok(()) -} - #[derive(Debug, Error)] enum Error { #[error("Failed to patch pod: {0}")] @@ -141,28 +33,30 @@ enum Error { MissingField(&'static str), } -/// Controller will trigger this whenever our main pod has changed. The function -/// reconciles a pod by copying over the labels to the annotations +#[derive(Clone)] +struct Data { + client: Client, +} + +/// A simple reconciliation function that will copy a pod's labels into the annotations. async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result { if pod.name_any() == "kube-system" { - return Ok(Action::requeue(Duration::from_secs(300))); + return Ok(Action::await_change()); + } + + let labels = pod.labels(); + if labels.is_empty() { + return Ok(Action::await_change()); } - let labels = pod.metadata.labels(); - if labels.len() == 0 { - return Ok(Action::requeue(Duration::from_secs(180))); + + let mut annotations = pod.annotations().clone(); + for (key, value) in labels { + annotations.insert(key.to_owned(), value.to_owned()); } - let annotations = labels.clone(); - let p = Pod { - metadata: ObjectMeta { - name: Some(pod.name_any()), - labels: Some(labels), - annotations: Some(annotations), - ..ObjectMeta::default() - }, - spec: pod.spec.clone(), - status: pod.status.clone(), - }; + let mut pod = (*pod).clone(); + pod.metadata.annotations = Some(annotations); + pod.metadata.managed_fields = None; let pod_api = Api::::namespaced( ctx.client.clone(), @@ -174,9 +68,9 @@ async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result, ctx: Arc) -> Result, ctx: Arc) -> Result { - let mut conditions = pod - .status - .clone() - .unwrap_or_default() - .conditions - .unwrap_or_default(); - - // If the condition already exists, exit - for cond in conditions.iter() { - if cond.type_ == condition::UNDOCUMENTED_TYPE { - return Ok(Action::requeue(Duration::from_secs(300))); + for container in pod.spec.clone().unwrap_or_default().containers.iter() { + if container.ports.clone().unwrap_or_default().len() != 0 { + tracing::debug!(name = %pod.name_any(), "Skipped updating pod with documented ports"); + return Ok(Action::await_change()); } } - pod.spec - .clone() - .unwrap_or_default() - .containers - .iter() - .for_each(|c| { - if c.ports.clone().unwrap_or_default().len() == 0 { - conditions.push(PodCondition { - type_: condition::UNDOCUMENTED_TYPE.into(), - status: condition::STATUS_TRUE.into(), - ..Default::default() - }) - } - }); - - let mut current_conds = pod - .status - .clone() - .unwrap_or_default() - .conditions - .unwrap_or_default() - .into_iter() - .filter(|c| c.type_ != condition::UNDOCUMENTED_TYPE && c.status != condition::STATUS_TRUE) - .collect::>(); - - for condition in conditions { - current_conds.push(condition); - } - - let status = PodStatus { - conditions: Some(current_conds), - ..Default::default() - }; let pod_api = Api::::namespaced( ctx.client.clone(), pod.metadata @@ -243,18 +96,143 @@ async fn reconcile_status(pod: Arc, ctx: Arc) -> Result anyhow::Result<()> { + tracing_subscriber::fmt::init(); + + let client = Client::try_default().await?; + let pods = Api::::namespaced(client.clone(), "default"); + let config = Config::default().concurrency(2); + let ctx = Arc::new(Data { client }); + + // Create a shared store with a predefined buffer that will be shared between subscribers. + let (reader, writer) = reflector::shared_store(SUBSCRIBE_BUFFER_SIZE); + // Before threading an object watch through the store, create a subscriber. + // Any number of subscribers can be created from one writer. + let subscriber = writer + .subscribe() + .expect("subscribers can only be created from shared stores"); + + // Reflect a stream of pod watch events into the store and apply a backoff. For subscribers to + // be able to consume updates, the reflector must be shared. + let mut pod_watch = watcher(pods.clone(), Default::default()) + .default_backoff() + .reflect_shared(writer) + .boxed(); + + // Create the first controller using the reconcile_metadata function. Controllers accept + // subscribers through a dedicated interface. + let mut metadata_controller = Controller::for_shared_stream(subscriber.clone(), reader) + .with_config(config.clone()) + .run( + reconcile_metadata, + |pod, error, _| { + tracing::error!(%error, name = %pod.name_any(), "Failed to reconcile metadata"); + Action::requeue(Duration::from_secs(10)) + }, + ctx.clone(), + ) + .boxed(); + + // Subscribers can be used to get a read handle on the store, if the initial handle has been + // moved or dropped. + let reader = subscriber.reader(); + // Create the second controller using the reconcile_status function. + let mut status_controller = Controller::for_shared_stream(subscriber, reader) + .with_config(config) + .run( + reconcile_status, + |pod, error, _| { + tracing::error!(%error, name = %pod.name_any(), "Failed to reconcile status"); + Action::requeue(Duration::from_secs(10)) + }, + ctx, + ) + .boxed(); + + // A simple handler to shutdown on CTRL-C or SIGTERM. + let mut shutdown_rx = shutdown_handler(); + + // Drive streams to readiness. The initial watch (that is reflected) needs to be driven to + // consume events from the API Server and forward them to subscribers. + // + // Both controllers will operate on shared objects. + loop { + tokio::select! { + Some(res) = metadata_controller.next() => { + match res { + Ok(v) => info!("Reconciled metadata {v:?}"), + Err(error) => warn!(%error, "Failed to reconcile metadata"), + } + }, + + Some(res) = status_controller.next() => { + match res { + Ok(v) => info!("Reconciled status {v:?}"), + Err(error) => warn!(%error, "Failed to reconcile object"), + } + }, + + Some(item) = pod_watch.next() => { + match item { + Err(error) => tracing::error!(%error, "Received error from main watcher stream"), + _ => {} + } + }, + + _ = shutdown_rx.recv() => { + tracing::info!("Received shutdown signal; terminating..."); + break; + } + } + } + + Ok(()) +} + +// Create a channel that will hold at most one item. Whenever a signal is received it is sent +// through the channel. +// We do not use a oneshot because we don't want to clone the receiver in each loop iteration. +fn shutdown_handler() -> mpsc::Receiver<()> { + let (shutdown_tx, shutdown_rx) = mpsc::channel(1); + let mut terminate = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("should not fail to register sighandler"); + let ctrlc = tokio::signal::ctrl_c(); + tokio::spawn(async move { + tokio::select! { + _ = terminate.recv() => { + shutdown_tx.send(()).await + }, + + _ = ctrlc => { + shutdown_tx.send(()).await + } + } + }); + + shutdown_rx +} diff --git a/kube-runtime/src/reflector/dispatcher.rs b/kube-runtime/src/reflector/dispatcher.rs index f493e58bb..151b0e15c 100644 --- a/kube-runtime/src/reflector/dispatcher.rs +++ b/kube-runtime/src/reflector/dispatcher.rs @@ -2,16 +2,54 @@ use core::{ pin::Pin, task::{Context, Poll}, }; +use std::fmt::Debug; use std::sync::Arc; +use derivative::Derivative; use futures::{ready, Stream}; use pin_project::pin_project; use crate::reflector::{ObjectRef, Store}; -use async_broadcast::Receiver; +use async_broadcast::{InactiveReceiver, Receiver, Sender}; use super::Lookup; +#[derive(Derivative)] +#[derivative(Debug(bound = "K: Debug, K::DynamicType: Debug"), Clone)] +pub(crate) struct Dispatcher +where + K: Lookup + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + dispatch_tx: Sender>, + // An inactive reader that prevents the channel from closing until the + // writer is dropped. + _dispatch_rx: InactiveReceiver>, +} + +impl Dispatcher +where + K: Lookup + Clone + 'static, + K::DynamicType: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn new(buf_size: usize) -> Dispatcher { + let (mut dispatch_tx, dispatch_rx) = async_broadcast::broadcast(buf_size); + dispatch_tx.set_await_active(false); + Self { + dispatch_tx, + _dispatch_rx: dispatch_rx.deactivate(), + } + } + + pub(crate) async fn broadcast(&mut self, obj_ref: ObjectRef) { + let _ = self.dispatch_tx.broadcast_direct(obj_ref).await; + } + + pub(crate) fn subscribe(&self, reader: Store) -> ReflectHandle { + ReflectHandle::new(reader, self.dispatch_tx.new_receiver()) + } +} + #[pin_project] pub struct ReflectHandle where diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index d16850a8b..7950a9416 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -10,7 +10,7 @@ use crate::watcher; use async_stream::stream; use futures::{Stream, StreamExt}; use std::hash::Hash; -pub use store::{store, store_with_dispatch, Store}; +pub use store::{shared_store, store, Store}; /// Cache objects from a [`watcher()`] stream into a local [`Store`] /// diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index aab9583b7..ef44f8baf 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -1,10 +1,9 @@ -use super::{Lookup, ObjectRef, ReflectHandle}; +use super::{dispatcher::Dispatcher, Lookup, ObjectRef, ReflectHandle}; use crate::{ utils::delayed_init::{self, DelayedInit}, watcher, }; use ahash::AHashMap; -use async_broadcast::{InactiveReceiver, Sender}; use derivative::Derivative; use parking_lot::RwLock; use std::{fmt::Debug, hash::Hash, sync::Arc}; @@ -25,11 +24,7 @@ where dyntype: K::DynamicType, ready_tx: Option>, ready_rx: Arc>, - - dispatch_tx: Sender>, - // An inactive reader that prevents the channel from closing until the - // writer is dropped. - _dispatch_rx: InactiveReceiver>, + dispatcher: Option>, } impl Writer @@ -42,30 +37,23 @@ where /// `k8s_openapi` types) you can use `Default` instead. pub fn new(dyntype: K::DynamicType) -> Self { let (ready_tx, ready_rx) = DelayedInit::new(); - let (dispatch_tx, dispatch_rx) = async_broadcast::broadcast(1); - dispatch_tx.close(); Writer { store: Default::default(), dyntype, ready_tx: Some(ready_tx), ready_rx: Arc::new(ready_rx), - dispatch_tx, - _dispatch_rx: dispatch_rx.deactivate(), + dispatcher: None, } } - pub fn new_with_dispatch(dyntype: K::DynamicType, buf_size: usize) -> Self { + pub fn new_shared(dyntype: K::DynamicType, buf_size: usize) -> Self { let (ready_tx, ready_rx) = DelayedInit::new(); - let (mut dispatch_tx, dispatch_rx) = async_broadcast::broadcast(buf_size); - // dont block on waiting for rx - dispatch_tx.set_await_active(false); Writer { store: Default::default(), dyntype, ready_tx: Some(ready_tx), ready_rx: Arc::new(ready_rx), - dispatch_tx, - _dispatch_rx: dispatch_rx.deactivate(), + dispatcher: Some(Dispatcher::new(buf_size)), } } @@ -81,8 +69,10 @@ where } } - pub fn subscribe(&self) -> ReflectHandle { - ReflectHandle::new(self.as_reader(), self.dispatch_tx.new_receiver()) + pub fn subscribe(&self) -> Option> { + self.dispatcher + .as_ref() + .and_then(|dispatcher| Some(dispatcher.subscribe(self.as_reader()))) } /// Applies a single watcher event to the store @@ -113,25 +103,23 @@ where } pub(crate) async fn dispatch_event(&mut self, event: &watcher::Event) { - if self.dispatch_tx.is_closed() { - return; - } - - match event { - watcher::Event::Applied(obj) => { - let obj_ref = obj.to_object_ref(self.dyntype.clone()); - // TODO: should this take a timeout to log when backpressure has - // been applied for too long, e.g. 10s - let _ = self.dispatch_tx.broadcast_direct(obj_ref).await; - } + if let Some(ref mut dispatcher) = self.dispatcher { + match event { + watcher::Event::Applied(obj) => { + let obj_ref = obj.to_object_ref(self.dyntype.clone()); + // TODO (matei): should this take a timeout to log when backpressure has + // been applied for too long, e.g. 10s + dispatcher.broadcast(obj_ref).await; + } - watcher::Event::Restarted(new_objs) => { - let objs = new_objs.iter().map(|obj| obj.to_object_ref(self.dyntype.clone())); - for obj in objs { - let _ = self.dispatch_tx.broadcast_direct(obj).await; + watcher::Event::Restarted(new_objs) => { + let obj_refs = new_objs.iter().map(|obj| obj.to_object_ref(self.dyntype.clone())); + for obj_ref in obj_refs { + dispatcher.broadcast(obj_ref).await; + } } + _ => {} } - _ => {} } } } @@ -256,12 +244,12 @@ where (r, w) } -pub fn store_with_dispatch(buf_size: usize, dyntype: K::DynamicType) -> (Store, Writer) +pub fn shared_store(buf_size: usize) -> (Store, Writer) where K: Lookup + Clone + 'static, K::DynamicType: Eq + Hash + Clone + Default, { - let w = Writer::::new_with_dispatch(dyntype, buf_size); + let w = Writer::::new_shared(Default::default(), buf_size); let r = w.as_reader(); (r, w) } From c6d1027ecbda0afb95145351bf6aa8a959e7f90f Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 3 Apr 2024 20:18:29 +0000 Subject: [PATCH 27/36] Fix tests & clippy warns Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 8 ++----- kube-runtime/src/controller/mod.rs | 4 ++-- kube-runtime/src/reflector/dispatcher.rs | 30 ++++++++++++------------ kube-runtime/src/reflector/mod.rs | 2 +- kube-runtime/src/reflector/object_ref.rs | 2 +- kube-runtime/src/reflector/store.rs | 8 ++++--- 6 files changed, 26 insertions(+), 28 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index 3294ca6d4..cbfe79e95 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -4,11 +4,7 @@ use futures::StreamExt; use k8s_openapi::api::core::v1::{Pod, PodCondition}; use kube::{ api::{Patch, PatchParams}, - runtime::{ - controller::Action, - reflector::{self, store::Writer}, - watcher, Config, Controller, WatchStreamExt, - }, + runtime::{controller::Action, reflector, watcher, Config, Controller, WatchStreamExt}, Api, Client, ResourceExt, }; use tokio::sync::mpsc; @@ -130,7 +126,7 @@ async fn main() -> anyhow::Result<()> { let ctx = Arc::new(Data { client }); // Create a shared store with a predefined buffer that will be shared between subscribers. - let (reader, writer) = reflector::shared_store(SUBSCRIBE_BUFFER_SIZE); + let (reader, writer) = reflector::store_shared(SUBSCRIBE_BUFFER_SIZE); // Before threading an object watch through the store, create a subscriber. // Any number of subscribers can be created from one writer. let subscriber = writer diff --git a/kube-runtime/src/controller/mod.rs b/kube-runtime/src/controller/mod.rs index 52de72544..c83aa0bb0 100644 --- a/kube-runtime/src/controller/mod.rs +++ b/kube-runtime/src/controller/mod.rs @@ -133,7 +133,7 @@ where { trigger_with(stream, move |obj| { Some(ReconcileRequest { - obj_ref: ObjectRef::from_shared_obj_with(obj.clone(), dyntype.clone()), + obj_ref: ObjectRef::from_obj_with(obj.as_ref(), dyntype.clone()), reason: ReconcileReason::ObjectUpdated, }) }) @@ -735,7 +735,7 @@ where dyntype: K::DynamicType, ) -> Self { let mut trigger_selector = stream::SelectAll::new(); - let self_watcher = trigger_self_shared(trigger.map(|obj| Ok(obj)), dyntype.clone()).boxed(); + let self_watcher = trigger_self_shared(trigger.map(Ok), dyntype.clone()).boxed(); trigger_selector.push(self_watcher); Self { trigger_selector, diff --git a/kube-runtime/src/reflector/dispatcher.rs b/kube-runtime/src/reflector/dispatcher.rs index 151b0e15c..fdc290d0b 100644 --- a/kube-runtime/src/reflector/dispatcher.rs +++ b/kube-runtime/src/reflector/dispatcher.rs @@ -77,9 +77,10 @@ where K::DynamicType: Eq + std::hash::Hash + Clone, { pub(super) fn new(reader: Store, rx: Receiver>) -> ReflectHandle { - Self { reader, rx } + Self { rx, reader } } + #[must_use] pub fn reader(&self) -> Store { self.reader.clone() } @@ -98,8 +99,7 @@ where Some(obj_ref) => this .reader .get(&obj_ref) - .map(|obj| Poll::Ready(Some(obj))) - .unwrap_or(Poll::Pending), + .map_or(Poll::Pending, |obj| Poll::Ready(Some(obj))), None => Poll::Ready(None), } } @@ -133,8 +133,8 @@ pub(crate) mod test { Ok(Event::Restarted(vec![foo, bar])), ]); - let (reader, writer) = reflector::store_with_dispatch(10, Default::default()); - let reflect = st.reflect_dispatch(writer); + let (reader, writer) = reflector::store_shared(10); + let reflect = st.reflect_shared(writer); pin_mut!(reflect); // Prior to any polls, we should have an empty store. @@ -178,9 +178,9 @@ pub(crate) mod test { let foo = Arc::new(foo); let bar = Arc::new(bar); - let (_, writer) = reflector::store_with_dispatch(10, Default::default()); - let subscriber = writer.subscribe(); - let reflect = st.reflect_dispatch(writer); + let (_, writer) = reflector::store_shared(10); + let subscriber = writer.subscribe().unwrap(); + let reflect = st.reflect_shared(writer); pin_mut!(reflect); pin_mut!(subscriber); @@ -231,9 +231,9 @@ pub(crate) mod test { let foo = Arc::new(foo); let bar = Arc::new(bar); - let (_, writer) = reflector::store_with_dispatch(10, Default::default()); - let subscriber = writer.subscribe(); - let mut reflect = Box::pin(st.reflect_dispatch(writer)); + let (_, writer) = reflector::store_shared(10); + let subscriber = writer.subscribe().unwrap(); + let mut reflect = Box::pin(st.reflect_shared(writer)); pin_mut!(subscriber); assert!(matches!( @@ -275,10 +275,10 @@ pub(crate) mod test { let foo = Arc::new(foo); let bar = Arc::new(bar); - let (_, writer) = reflector::store_with_dispatch(1, Default::default()); - let subscriber = writer.subscribe(); - let subscriber_slow = writer.subscribe(); - let reflect = st.reflect_dispatch(writer); + let (_, writer) = reflector::store_shared(1); + let subscriber = writer.subscribe().unwrap(); + let subscriber_slow = writer.subscribe().unwrap(); + let reflect = st.reflect_shared(writer); pin_mut!(reflect); pin_mut!(subscriber); pin_mut!(subscriber_slow); diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index 7950a9416..eb998b346 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -10,7 +10,7 @@ use crate::watcher; use async_stream::stream; use futures::{Stream, StreamExt}; use std::hash::Hash; -pub use store::{shared_store, store, Store}; +pub use store::{store, store_shared, Store}; /// Cache objects from a [`watcher()`] stream into a local [`Store`] /// diff --git a/kube-runtime/src/reflector/object_ref.rs b/kube-runtime/src/reflector/object_ref.rs index f2533cfde..6f2257b33 100644 --- a/kube-runtime/src/reflector/object_ref.rs +++ b/kube-runtime/src/reflector/object_ref.rs @@ -203,7 +203,7 @@ impl ObjectRef { obj.to_object_ref(dyntype) } - pub fn from_shared_obj_with(obj: Arc, dyntype: K::DynamicType) -> Self + pub fn from_shared_obj_with(obj: &Arc, dyntype: K::DynamicType) -> Self where K: Lookup, { diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index ef44f8baf..a3917b29e 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -72,7 +72,7 @@ where pub fn subscribe(&self) -> Option> { self.dispatcher .as_ref() - .and_then(|dispatcher| Some(dispatcher.subscribe(self.as_reader()))) + .map(|dispatcher| dispatcher.subscribe(self.as_reader())) } /// Applies a single watcher event to the store @@ -118,7 +118,7 @@ where dispatcher.broadcast(obj_ref).await; } } - _ => {} + watcher::Event::Deleted(_) => {} } } } @@ -244,7 +244,9 @@ where (r, w) } -pub fn shared_store(buf_size: usize) -> (Store, Writer) +#[must_use] +#[allow(clippy::module_name_repetitions)] +pub fn store_shared(buf_size: usize) -> (Store, Writer) where K: Lookup + Clone + 'static, K::DynamicType: Eq + Hash + Clone + Default, From a30f2e62e739586ecd226ad4b7ac15e715d2eed0 Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 3 Apr 2024 20:20:04 +0000 Subject: [PATCH 28/36] Run fmt Signed-off-by: Matei David --- kube-runtime/src/reflector/dispatcher.rs | 3 +-- kube-runtime/src/reflector/mod.rs | 6 ++++-- kube-runtime/src/reflector/object_ref.rs | 3 +-- kube-runtime/src/utils/mod.rs | 6 ++---- 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/kube-runtime/src/reflector/dispatcher.rs b/kube-runtime/src/reflector/dispatcher.rs index fdc290d0b..71b6716a1 100644 --- a/kube-runtime/src/reflector/dispatcher.rs +++ b/kube-runtime/src/reflector/dispatcher.rs @@ -2,8 +2,7 @@ use core::{ pin::Pin, task::{Context, Poll}, }; -use std::fmt::Debug; -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use derivative::Derivative; use futures::{ready, Stream}; diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index eb998b346..45a0a532e 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -4,8 +4,10 @@ mod dispatcher; mod object_ref; pub mod store; -pub use self::dispatcher::ReflectHandle; -pub use self::object_ref::{Extra as ObjectRefExtra, Lookup, ObjectRef}; +pub use self::{ + dispatcher::ReflectHandle, + object_ref::{Extra as ObjectRefExtra, Lookup, ObjectRef}, +}; use crate::watcher; use async_stream::stream; use futures::{Stream, StreamExt}; diff --git a/kube-runtime/src/reflector/object_ref.rs b/kube-runtime/src/reflector/object_ref.rs index 6f2257b33..56bd1a6b9 100644 --- a/kube-runtime/src/reflector/object_ref.rs +++ b/kube-runtime/src/reflector/object_ref.rs @@ -1,7 +1,6 @@ use derivative::Derivative; use k8s_openapi::{api::core::v1::ObjectReference, apimachinery::pkg::apis::meta::v1::OwnerReference}; -#[cfg(doc)] -use kube_client::core::ObjectMeta; +#[cfg(doc)] use kube_client::core::ObjectMeta; use kube_client::{ api::{DynamicObject, Resource}, core::api_version_from_group_version, diff --git a/kube-runtime/src/utils/mod.rs b/kube-runtime/src/utils/mod.rs index 8450061e0..bdf85b227 100644 --- a/kube-runtime/src/utils/mod.rs +++ b/kube-runtime/src/utils/mod.rs @@ -4,12 +4,10 @@ mod backoff_reset_timer; pub(crate) mod delayed_init; mod event_flatten; mod event_modify; -#[cfg(feature = "unstable-runtime-predicates")] -mod predicate; +#[cfg(feature = "unstable-runtime-predicates")] mod predicate; mod reflect; mod stream_backoff; -#[cfg(feature = "unstable-runtime-subscribe")] -pub mod stream_subscribe; +#[cfg(feature = "unstable-runtime-subscribe")] pub mod stream_subscribe; mod watch_ext; pub use backoff_reset_timer::ResetTimerBackoff; From fef5d834c86850f7ebf008be24f3d30d7b5a695f Mon Sep 17 00:00:00 2001 From: Matei David Date: Mon, 8 Apr 2024 18:11:08 +0100 Subject: [PATCH 29/36] Update examples/shared_stream_controllers.rs Co-authored-by: Eirik A Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index cbfe79e95..c2b891719 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -45,10 +45,9 @@ async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result Date: Thu, 11 Apr 2024 12:04:07 +0000 Subject: [PATCH 30/36] @clux's feedback on examples Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 121 ++++++++------------------ 1 file changed, 38 insertions(+), 83 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index c2b891719..bf97eba7f 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -8,7 +8,7 @@ use kube::{ Api, Client, ResourceExt, }; use tokio::sync::mpsc; -use tracing::{info, warn}; +use tracing::{debug, error, info, warn}; use thiserror::Error; @@ -45,13 +45,11 @@ async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result::namespaced( ctx.client.clone(), @@ -78,7 +76,7 @@ async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result, ctx: Arc) -> Result { for container in pod.spec.clone().unwrap_or_default().containers.iter() { if container.ports.clone().unwrap_or_default().len() != 0 { - tracing::debug!(name = %pod.name_any(), "Skipped updating pod with documented ports"); + debug!(name = %pod.name_any(), "Skipped updating pod with documented ports"); return Ok(Action::await_change()); } } @@ -115,6 +113,11 @@ async fn reconcile_status(pod: Arc, ctx: Arc) -> Result, error: &Error, _ctx: Arc) -> Action { + error!(%error, name = %obj.name_any(), "Failed reconciliation"); + Action::requeue(Duration::from_secs(10)) +} + #[tokio::main] async fn main() -> anyhow::Result<()> { tracing_subscriber::fmt::init(); @@ -134,100 +137,52 @@ async fn main() -> anyhow::Result<()> { // Reflect a stream of pod watch events into the store and apply a backoff. For subscribers to // be able to consume updates, the reflector must be shared. - let mut pod_watch = watcher(pods.clone(), Default::default()) + let pod_watch = watcher(pods.clone(), Default::default()) .default_backoff() .reflect_shared(writer) - .boxed(); + .for_each(|res| async move { + match res { + Ok(event) => debug!("Received event on root stream {event:?}"), + Err(error) => error!(%error, "Unexpected error when watching resource"), + } + }); // Create the first controller using the reconcile_metadata function. Controllers accept // subscribers through a dedicated interface. - let mut metadata_controller = Controller::for_shared_stream(subscriber.clone(), reader) + let metadata_controller = Controller::for_shared_stream(subscriber.clone(), reader) .with_config(config.clone()) - .run( - reconcile_metadata, - |pod, error, _| { - tracing::error!(%error, name = %pod.name_any(), "Failed to reconcile metadata"); - Action::requeue(Duration::from_secs(10)) - }, - ctx.clone(), - ) - .boxed(); + .shutdown_on_signal() + .run(reconcile_metadata, error_policy, ctx.clone()) + .for_each(|res| async move { + match res { + Ok(v) => info!("Reconciled metadata {v:?}"), + Err(error) => warn!(%error, "Failed to reconcile metadata"), + } + }); // Subscribers can be used to get a read handle on the store, if the initial handle has been // moved or dropped. let reader = subscriber.reader(); // Create the second controller using the reconcile_status function. - let mut status_controller = Controller::for_shared_stream(subscriber, reader) + let status_controller = Controller::for_shared_stream(subscriber, reader) .with_config(config) - .run( - reconcile_status, - |pod, error, _| { - tracing::error!(%error, name = %pod.name_any(), "Failed to reconcile status"); - Action::requeue(Duration::from_secs(10)) - }, - ctx, - ) - .boxed(); - - // A simple handler to shutdown on CTRL-C or SIGTERM. - let mut shutdown_rx = shutdown_handler(); + .shutdown_on_signal() + .run(reconcile_status, error_policy, ctx) + .for_each(|res| async move { + match res { + Ok(v) => info!("Reconciled status {v:?}"), + Err(error) => warn!(%error, "Failed to reconcile status"), + } + }); // Drive streams to readiness. The initial watch (that is reflected) needs to be driven to // consume events from the API Server and forward them to subscribers. // // Both controllers will operate on shared objects. - loop { - tokio::select! { - Some(res) = metadata_controller.next() => { - match res { - Ok(v) => info!("Reconciled metadata {v:?}"), - Err(error) => warn!(%error, "Failed to reconcile metadata"), - } - }, - - Some(res) = status_controller.next() => { - match res { - Ok(v) => info!("Reconciled status {v:?}"), - Err(error) => warn!(%error, "Failed to reconcile object"), - } - }, - - Some(item) = pod_watch.next() => { - match item { - Err(error) => tracing::error!(%error, "Received error from main watcher stream"), - _ => {} - } - }, - - _ = shutdown_rx.recv() => { - tracing::info!("Received shutdown signal; terminating..."); - break; - } - } + tokio::select! { + _ = futures::future::join(metadata_controller, status_controller) => {}, + _ = pod_watch => {} } Ok(()) } - -// Create a channel that will hold at most one item. Whenever a signal is received it is sent -// through the channel. -// We do not use a oneshot because we don't want to clone the receiver in each loop iteration. -fn shutdown_handler() -> mpsc::Receiver<()> { - let (shutdown_tx, shutdown_rx) = mpsc::channel(1); - let mut terminate = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) - .expect("should not fail to register sighandler"); - let ctrlc = tokio::signal::ctrl_c(); - tokio::spawn(async move { - tokio::select! { - _ = terminate.recv() => { - shutdown_tx.send(()).await - }, - - _ = ctrlc => { - shutdown_tx.send(()).await - } - } - }); - - shutdown_rx -} From 8347103acbebb893740b599bae186e1e09145de5 Mon Sep 17 00:00:00 2001 From: Matei David Date: Mon, 15 Apr 2024 18:26:53 +0000 Subject: [PATCH 31/36] Fix name in ns Signed-off-by: Matei David --- examples/shared_stream_controllers.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/examples/shared_stream_controllers.rs b/examples/shared_stream_controllers.rs index bf97eba7f..e14e73148 100644 --- a/examples/shared_stream_controllers.rs +++ b/examples/shared_stream_controllers.rs @@ -4,10 +4,13 @@ use futures::StreamExt; use k8s_openapi::api::core::v1::{Pod, PodCondition}; use kube::{ api::{Patch, PatchParams}, - runtime::{controller::Action, reflector, watcher, Config, Controller, WatchStreamExt}, + runtime::{ + controller::Action, + reflector::{self}, + watcher, Config, Controller, WatchStreamExt, + }, Api, Client, ResourceExt, }; -use tokio::sync::mpsc; use tracing::{debug, error, info, warn}; use thiserror::Error; @@ -36,12 +39,8 @@ struct Data { /// A simple reconciliation function that will copy a pod's labels into the annotations. async fn reconcile_metadata(pod: Arc, ctx: Arc) -> Result { - if pod.name_any() == "kube-system" { - return Ok(Action::await_change()); - } - - let labels = pod.labels(); - if labels.is_empty() { + let namespace = &pod.namespace().unwrap_or_default(); + if namespace == "kube-system" { return Ok(Action::await_change()); } From 9f7edd12e1194daacb87bda01b525ef33f83d41e Mon Sep 17 00:00:00 2001 From: Matei David Date: Mon, 15 Apr 2024 19:28:03 +0000 Subject: [PATCH 32/36] Add comments and feature flags Signed-off-by: Matei David --- kube-runtime/src/reflector/dispatcher.rs | 26 ++++++++ kube-runtime/src/reflector/store.rs | 21 +++++++ kube-runtime/src/utils/watch_ext.rs | 78 ++++++++++++++++++++++++ 3 files changed, 125 insertions(+) diff --git a/kube-runtime/src/reflector/dispatcher.rs b/kube-runtime/src/reflector/dispatcher.rs index 71b6716a1..22084214c 100644 --- a/kube-runtime/src/reflector/dispatcher.rs +++ b/kube-runtime/src/reflector/dispatcher.rs @@ -15,6 +15,8 @@ use super::Lookup; #[derive(Derivative)] #[derivative(Debug(bound = "K: Debug, K::DynamicType: Debug"), Clone)] +// A helper type that holds a broadcast transmitter and a broadcast receiver, +// used to fan-out events from a root stream to multiple listeners. pub(crate) struct Dispatcher where K: Lookup + Clone + 'static, @@ -32,7 +34,11 @@ where K::DynamicType: Eq + std::hash::Hash + Clone, { pub(crate) fn new(buf_size: usize) -> Dispatcher { + // Create a broadcast (tx, rx) pair let (mut dispatch_tx, dispatch_rx) = async_broadcast::broadcast(buf_size); + // The tx half will not wait for any receivers to be active before + // broadcasting events. If no receivers are active, events will be + // buffered. dispatch_tx.set_await_active(false); Self { dispatch_tx, @@ -40,15 +46,35 @@ where } } + // Calls broadcast on the channel. Will return when the channel has enough + // space to send an event. pub(crate) async fn broadcast(&mut self, obj_ref: ObjectRef) { let _ = self.dispatch_tx.broadcast_direct(obj_ref).await; } + // Creates a `ReflectHandle` by creating a receiver from the tx half. + // N.B: the new receiver will be fast-forwarded to the _latest_ event. + // The receiver won't have access to any events that are currently waiting + // to be acked by listeners. pub(crate) fn subscribe(&self, reader: Store) -> ReflectHandle { ReflectHandle::new(reader, self.dispatch_tx.new_receiver()) } } +/// A handle to a shared stream reader +/// +/// [`ReflectHandle`]s are created by calling [`subscribe()`] on a [`Writer`], +/// or by calling `clone()` on an already existing [`ReflectHandle`]. Each +/// shared stream reader should be polled independently and driven to readiness +/// to avoid deadlocks. When the [`Writer`]'s buffer is filled, backpressure +/// will be applied on the root stream side. +/// +/// When the root stream is dropped, or it ends, all [`ReflectHandle`]s +/// subscribed to the stream will also terminate after all events yielded by +/// the root stream have been observed. This means [`ReflectHandle`] streams +/// can still be polled after the root stream has been dropped. +/// +/// [`Writer`]: crate::reflector::Writer #[pin_project] pub struct ReflectHandle where diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index a3917b29e..b6645e46b 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -46,6 +46,16 @@ where } } + /// Creates a new Writer with the specified dynamic type and buffer size. + /// + /// When the Writer is created through `new_shared`, it will be able to + /// be subscribed. Stored objects will be propagated to all subscribers. The + /// buffer size is used for the underlying channel. An object is cleared + /// from the buffer only when all subscribers have seen it. + /// + /// If the dynamic type is default-able (for example when writer is used with + /// `k8s_openapi` types) you can use `Default` instead. + #[cfg(feature = "unstable-runtime-subscribe")] pub fn new_shared(dyntype: K::DynamicType, buf_size: usize) -> Self { let (ready_tx, ready_rx) = DelayedInit::new(); Writer { @@ -69,6 +79,10 @@ where } } + /// Return a handle to a subscriber + /// + /// Multiple subscribe handles may be obtained, by either calling + /// `subscribe` multiple times, or by calling `clone()` pub fn subscribe(&self) -> Option> { self.dispatcher .as_ref() @@ -102,6 +116,7 @@ where } } + /// Broadcast an event to any downstream listeners subscribed on the store pub(crate) async fn dispatch_event(&mut self, event: &watcher::Event) { if let Some(ref mut dispatcher) = self.dispatcher { match event { @@ -244,8 +259,14 @@ where (r, w) } +/// Create a (Reader, Writer) for a `Store` for a typed resource `K` +/// +/// The resulting `Writer` can be subscribed on in order to fan out events from +/// a watcher. The `Writer` should be passed to a [`reflector`](crate::reflector()), +/// and the [`Store`] is a read-only handle. #[must_use] #[allow(clippy::module_name_repetitions)] +#[cfg(feature = "unstable-runtime-subscribe")] pub fn store_shared(buf_size: usize) -> (Store, Writer) where K: Lookup + Clone + 'static, diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index d5d2b4a92..d01a389bf 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -249,6 +249,84 @@ pub trait WatchStreamExt: Stream { Reflect::new(self, writer) } + /// Reflect a shared [`watcher()`] stream into a [`Store`] through a [`Writer`] + /// + /// Returns the stream unmodified, but passes every [`watcher::Event`] + /// through a [`Writer`]. This populates a [`Store`] as the stream is + /// polled. When the [`watcher::Event`] is not an error or a + /// [`watcher::Event::Deleted`] then its inner object will also be + /// propagated to subscribers. + /// + /// Subscribers can be created by calling [`subscribe()`] on a [`Writer`]. + /// This will return a [`ReflectHandle`] stream that should be polled + /// independently. When the root stream is dropped, or it ends, all [`ReflectHandle`]s + /// subscribed to the stream will also terminate after all events yielded by + /// the root stream have been observed. This means [`ReflectHandle`] streams + /// can still be polled after the root stream has been dropped. + /// + /// **NB**: This adapter requires an + /// [`unstable`](https://github.com/kube-rs/kube/blob/main/kube-runtime/Cargo.toml#L17-L21) + /// feature + /// + /// ## Warning + /// + /// If the root [`Stream`] is not polled, [`ReflectHandle`] streams will + /// never receive any events. This will cause the streams to deadlock since + /// the root stream will apply backpressure when downstream readers are not + /// consuming events. + /// + /// + /// [`Store`]: crate::reflector::Store + /// [`ReflectHandle`]: crate::reflector::dispatcher::ReflectHandle + /// ## Usage + /// ```no_run + /// # use futures::{pin_mut, Stream, StreamExt, TryStreamExt}; + /// # use std::time::Duration; + /// # use tracing::{info, warn}; + /// use kube::{Api, Client, ResourceExt}; + /// use kube_runtime::{watcher, WatchStreamExt, reflector}; + /// use k8s_openapi::api::apps::v1::Deployment; + /// # async fn wrapper() -> Result<(), Box> { + /// # let client: kube::Client = todo!(); + /// + /// let deploys: Api = Api::default_namespaced(client); + /// let subscriber_buf_sz = 100; + /// let (reader, writer) = reflector::store_shared(subscriber_buf_sz)::(); + /// let subscriber = &writer.subscribe().unwrap(); + /// + /// tokio::spawn(async move { + /// // start polling the store once the reader is ready + /// reader.wait_until_ready().await.unwrap(); + /// loop { + /// let names = reader.state().iter().map(|d| d.name_any()).collect::>(); + /// info!("Current {} deploys: {:?}", names.len(), names); + /// tokio::time::sleep(Duration::from_secs(10)).await; + /// } + /// }); + /// + /// // configure the watcher stream and populate the store while polling + /// watcher(deploys, watcher::Config::default()) + /// .reflect_shared(writer) + /// .applied_objects() + /// .for_each(|res| async move { + /// match res { + /// Ok(o) => info!("saw in root stream {}", o.name_any()), + /// Err(e) => warn!("watcher error in root stream: {}", e), + /// } + /// }) + /// .await; + /// + /// // subscriber can be used to receive applied_objects + /// subscriber + /// .for_each(|obj| async move { + /// info!("saw in subscriber {}", &obj.name_any()) + /// }) + /// await; + /// + /// # Ok(()) + /// # } + /// ``` + #[cfg(feature = "unstable-runtime-subscribe")] fn reflect_shared(self, writer: Writer) -> impl Stream where Self: Stream>> + Sized, From a14d6b4403b31ea276bf708dd8abe4d39a119c7d Mon Sep 17 00:00:00 2001 From: Matei David Date: Tue, 16 Apr 2024 17:47:31 +0000 Subject: [PATCH 33/36] Fix CI checks Signed-off-by: Matei David --- Cargo.toml | 2 + kube-runtime/Cargo.toml | 4 +- kube-runtime/src/controller/mod.rs | 90 ++++++++++++++++++++++-- kube-runtime/src/reflector/dispatcher.rs | 4 +- kube-runtime/src/reflector/mod.rs | 4 +- kube-runtime/src/utils/watch_ext.rs | 10 ++- 6 files changed, 99 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 01de4b142..8f7ef0dab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,8 @@ missing_docs = "deny" ahash = "0.8" anyhow = "1.0.71" assert-json-diff = "2.0.2" +async-broadcast = "0.7.0" +async-stream = "0.3.5" async-trait = "0.1.64" backoff = "0.4.0" base64 = "0.22.0" diff --git a/kube-runtime/Cargo.toml b/kube-runtime/Cargo.toml index e70e304c2..233eeae52 100644 --- a/kube-runtime/Cargo.toml +++ b/kube-runtime/Cargo.toml @@ -48,8 +48,8 @@ backoff.workspace = true async-trait.workspace = true hashbrown.workspace = true k8s-openapi.workspace = true -async-broadcast = "0.7.0" -async-stream = "0.3.5" +async-broadcast.workspace = true +async-stream.workspace = true [dev-dependencies] kube = { path = "../kube", features = ["derive", "client", "runtime"], version = "<1.0.0, >=0.60.0" } diff --git a/kube-runtime/src/controller/mod.rs b/kube-runtime/src/controller/mod.rs index ddd1db248..9dedd1f22 100644 --- a/kube-runtime/src/controller/mod.rs +++ b/kube-runtime/src/controller/mod.rs @@ -123,11 +123,16 @@ where }) } -pub fn trigger_self_shared( +/// Enqueues the object itself for reconciliation when the object is behind a +/// shared pointer +#[cfg(feature = "unstable-runtime-subscribe")] +fn trigger_self_shared( stream: S, dyntype: K::DynamicType, ) -> impl Stream, S::Error>> where + // Input stream has item as some Arc'd Resource (via + // Controller::for_shared_stream) S: TryStream>, K: Resource, K::DynamicType: Clone, @@ -720,10 +725,66 @@ where } } - // TODO: do an entrypoint for shared streams of owned objects - // - // Is it better to use a concrete type (i.e. a SubscribeHandle as a trigger) - // or to pass in the reader out-of-band? + /// This is the same as [`Controller::for_stream`]. Instead of taking an + /// `Api` (e.g. [`Controller::new`]), a stream of resources is used. Shared + /// streams can be created out-of-band by subscribing on a store `Writer`. + /// Through this interface, multiple controllers can use the same root + /// (shared) input stream of resources to keep memory overheads smaller. + /// + /// **N.B**: This constructor requires an + /// [`unstable`](https://github.com/kube-rs/kube/blob/main/kube-runtime/Cargo.toml#L17-L21) + /// feature. + /// + /// Prefer [`Controller::new`] or [`Controller::for_stream`] if you do not + /// need to share the stream. + /// + /// ## Warning: + /// + /// You **must** ensure the root stream (i.e. stream created through a `reflector()`) + /// is driven to readiness independently of this controller to ensure the + /// watcher never deadlocks. + /// + /// # Example: + /// + /// ```no_run + /// # use futures::StreamExt; + /// # use k8s_openapi::api::apps::v1::Deployment; + /// # use kube::runtime::controller::{Action, Controller}; + /// # use kube::runtime::{predicates, watcher, reflector, WatchStreamExt}; + /// # use kube::{Api, Client, Error, ResourceExt}; + /// # use std::sync::Arc; + /// # async fn reconcile(_: Arc, _: Arc<()>) -> Result { Ok(Action::await_change()) } + /// # fn error_policy(_: Arc, _: &kube::Error, _: Arc<()>) -> Action { Action::await_change() } + /// # async fn doc(client: kube::Client) { + /// let api: Api = Api::default_namespaced(client); + /// let (reader, writer) = reflector::store_shared(128); + /// let subscriber = writer + /// .subscribe() + /// .expect("subscribers can only be created from shared stores"); + /// let deploys = watcher(api, watcher::Config::default()) + /// .default_backoff() + /// .reflect(writer) + /// .applied_objects() + /// .for_each(|ev| async move { + /// match ev { + /// Ok(obj) => tracing::info!("got obj {obj:?}"), + /// Err(error) => tracing::error!(%error, "received error") + /// } + /// }); + /// + /// let controller = Controller::for_shared_stream(subscriber, reader) + /// .run(reconcile, error_policy, Arc::new(())) + /// .for_each(|ev| async move { + /// tracing::info!("reconciled {ev:?}") + /// }); + /// + /// // Drive streams using a select statement + /// tokio::select! { + /// _ = deploys => {}, + /// _ = controller => {}, + /// } + /// # } + #[cfg(feature = "unstable-runtime-subscribe")] pub fn for_shared_stream(trigger: impl Stream> + Send + 'static, reader: Store) -> Self where K::DynamicType: Default, @@ -731,6 +792,25 @@ where Self::for_shared_stream_with(trigger, reader, Default::default()) } + /// This is the same as [`Controller::for_stream`]. Instead of taking an + /// `Api` (e.g. [`Controller::new`]), a stream of resources is used. Shared + /// streams can be created out-of-band by subscribing on a store `Writer`. + /// Through this interface, multiple controllers can use the same root + /// (shared) input stream of resources to keep memory overheads smaller. + /// + /// **N.B**: This constructor requires an + /// [`unstable`](https://github.com/kube-rs/kube/blob/main/kube-runtime/Cargo.toml#L17-L21) + /// feature. + /// + /// Prefer [`Controller::new`] or [`Controller::for_stream`] if you do not + /// need to share the stream. + /// + /// This variant constructor is used for [`dynamic`] types found through + /// discovery. Prefer [`Controller::for_shared_stream`] for static types (i.e. + /// known at compile time). + /// + /// [`dynamic`]: kube_client::core::dynamic + #[cfg(feature = "unstable-runtime-subscribe")] pub fn for_shared_stream_with( trigger: impl Stream> + Send + 'static, reader: Store, diff --git a/kube-runtime/src/reflector/dispatcher.rs b/kube-runtime/src/reflector/dispatcher.rs index 22084214c..6c8f348ec 100644 --- a/kube-runtime/src/reflector/dispatcher.rs +++ b/kube-runtime/src/reflector/dispatcher.rs @@ -5,8 +5,9 @@ use core::{ use std::{fmt::Debug, sync::Arc}; use derivative::Derivative; -use futures::{ready, Stream}; +use futures::Stream; use pin_project::pin_project; +use std::task::ready; use crate::reflector::{ObjectRef, Store}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; @@ -130,6 +131,7 @@ where } } +#[cfg(feature = "unstable-runtime-subscribe")] #[cfg(test)] pub(crate) mod test { use crate::{ diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index 72e075578..0426c50d6 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -12,7 +12,9 @@ use crate::watcher; use async_stream::stream; use futures::{Stream, StreamExt}; use std::hash::Hash; -pub use store::{store, store_shared, Store}; +#[cfg(feature = "unstable-runtime-subscribe")] +pub use store::store_shared; +pub use store::{store, Store}; /// Cache objects from a [`watcher()`] stream into a local [`Store`] /// diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index 7fb5c1302..f2033fc3d 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -160,7 +160,7 @@ pub trait WatchStreamExt: Stream { /// impl Stream + Send + Sized + 'static, /// ) /// where - /// K: Debug + Send + Sync + 'static, + /// K: Clone + Debug + Send + Sync + 'static, /// S: Stream, watcher::Error>> + Send + Sized + 'static, /// { /// // Create a stream that can be subscribed to @@ -291,7 +291,7 @@ pub trait WatchStreamExt: Stream { /// /// let deploys: Api = Api::default_namespaced(client); /// let subscriber_buf_sz = 100; - /// let (reader, writer) = reflector::store_shared(subscriber_buf_sz)::(); + /// let (reader, writer) = reflector::store_shared::(subscriber_buf_sz); /// let subscriber = &writer.subscribe().unwrap(); /// /// tokio::spawn(async move { @@ -317,11 +317,9 @@ pub trait WatchStreamExt: Stream { /// .await; /// /// // subscriber can be used to receive applied_objects - /// subscriber - /// .for_each(|obj| async move { + /// subscriber.for_each(|obj| async move { /// info!("saw in subscriber {}", &obj.name_any()) - /// }) - /// await; + /// }).await; /// /// # Ok(()) /// # } From de2eda19c9744028b25b23123c3b920f1255561d Mon Sep 17 00:00:00 2001 From: Matei David Date: Tue, 16 Apr 2024 17:50:20 +0000 Subject: [PATCH 34/36] Run rustfmt Signed-off-by: Matei David --- kube-runtime/src/reflector/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index 0426c50d6..2c6048ba9 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -12,8 +12,7 @@ use crate::watcher; use async_stream::stream; use futures::{Stream, StreamExt}; use std::hash::Hash; -#[cfg(feature = "unstable-runtime-subscribe")] -pub use store::store_shared; +#[cfg(feature = "unstable-runtime-subscribe")] pub use store::store_shared; pub use store::{store, Store}; /// Cache objects from a [`watcher()`] stream into a local [`Store`] From 276b75ee2fe5c887f4a88c107ea678cf80fc39f1 Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 17 Apr 2024 19:23:53 +0000 Subject: [PATCH 35/36] @clux's feedback Signed-off-by: Matei David --- kube-runtime/src/reflector/dispatcher.rs | 10 ++++++++++ kube-runtime/src/reflector/object_ref.rs | 11 ++--------- kube-runtime/src/reflector/store.rs | 7 +++++-- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/kube-runtime/src/reflector/dispatcher.rs b/kube-runtime/src/reflector/dispatcher.rs index 6c8f348ec..7fb1a2cf0 100644 --- a/kube-runtime/src/reflector/dispatcher.rs +++ b/kube-runtime/src/reflector/dispatcher.rs @@ -34,6 +34,16 @@ where K: Lookup + Clone + 'static, K::DynamicType: Eq + std::hash::Hash + Clone, { + /// Creates and returns a new self that wraps a broadcast sender and an + /// inactive broadcast receiver + /// + /// A buffer size is required to create the underlying broadcast channel. + /// Messages will be buffered until all active readers have received a copy + /// of the message. When the channel is full, senders will apply + /// backpressure by waiting for space to free up. + // + // N.B messages are eagerly broadcasted, meaning no active receivers are + // required for a message to be broadcasted. pub(crate) fn new(buf_size: usize) -> Dispatcher { // Create a broadcast (tx, rx) pair let (mut dispatch_tx, dispatch_rx) = async_broadcast::broadcast(buf_size); diff --git a/kube-runtime/src/reflector/object_ref.rs b/kube-runtime/src/reflector/object_ref.rs index 56bd1a6b9..487bb7b7f 100644 --- a/kube-runtime/src/reflector/object_ref.rs +++ b/kube-runtime/src/reflector/object_ref.rs @@ -1,6 +1,7 @@ use derivative::Derivative; use k8s_openapi::{api::core::v1::ObjectReference, apimachinery::pkg::apis::meta::v1::OwnerReference}; -#[cfg(doc)] use kube_client::core::ObjectMeta; +#[cfg(doc)] +use kube_client::core::ObjectMeta; use kube_client::{ api::{DynamicObject, Resource}, core::api_version_from_group_version, @@ -9,7 +10,6 @@ use std::{ borrow::Cow, fmt::{Debug, Display}, hash::Hash, - sync::Arc, }; /// Minimal lookup behaviour needed by a [reflector store](super::Store). @@ -202,13 +202,6 @@ impl ObjectRef { obj.to_object_ref(dyntype) } - pub fn from_shared_obj_with(obj: &Arc, dyntype: K::DynamicType) -> Self - where - K: Lookup, - { - obj.as_ref().to_object_ref(dyntype) - } - /// Create an `ObjectRef` from an `OwnerReference` /// /// Returns `None` if the types do not match. diff --git a/kube-runtime/src/reflector/store.rs b/kube-runtime/src/reflector/store.rs index b6645e46b..a78976c19 100644 --- a/kube-runtime/src/reflector/store.rs +++ b/kube-runtime/src/reflector/store.rs @@ -56,7 +56,7 @@ where /// If the dynamic type is default-able (for example when writer is used with /// `k8s_openapi` types) you can use `Default` instead. #[cfg(feature = "unstable-runtime-subscribe")] - pub fn new_shared(dyntype: K::DynamicType, buf_size: usize) -> Self { + pub fn new_shared(buf_size: usize, dyntype: K::DynamicType) -> Self { let (ready_tx, ready_rx) = DelayedInit::new(); Writer { store: Default::default(), @@ -264,6 +264,9 @@ where /// The resulting `Writer` can be subscribed on in order to fan out events from /// a watcher. The `Writer` should be passed to a [`reflector`](crate::reflector()), /// and the [`Store`] is a read-only handle. +/// +/// A buffer size is used for the underlying message channel. When the buffer is +/// full, backpressure will be applied by waiting for capacity. #[must_use] #[allow(clippy::module_name_repetitions)] #[cfg(feature = "unstable-runtime-subscribe")] @@ -272,7 +275,7 @@ where K: Lookup + Clone + 'static, K::DynamicType: Eq + Hash + Clone + Default, { - let w = Writer::::new_shared(Default::default(), buf_size); + let w = Writer::::new_shared(buf_size, Default::default()); let r = w.as_reader(); (r, w) } From eca6be1b82a75adaeaf425f237b9582349871daf Mon Sep 17 00:00:00 2001 From: Matei David Date: Wed, 17 Apr 2024 22:32:52 +0000 Subject: [PATCH 36/36] Run fmt Signed-off-by: Matei David --- kube-runtime/src/reflector/object_ref.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kube-runtime/src/reflector/object_ref.rs b/kube-runtime/src/reflector/object_ref.rs index 487bb7b7f..47e8b2d2f 100644 --- a/kube-runtime/src/reflector/object_ref.rs +++ b/kube-runtime/src/reflector/object_ref.rs @@ -1,7 +1,6 @@ use derivative::Derivative; use k8s_openapi::{api::core::v1::ObjectReference, apimachinery::pkg::apis::meta::v1::OwnerReference}; -#[cfg(doc)] -use kube_client::core::ObjectMeta; +#[cfg(doc)] use kube_client::core::ObjectMeta; use kube_client::{ api::{DynamicObject, Resource}, core::api_version_from_group_version,