|
| 1 | +use snafu::{ResultExt, Snafu}; |
| 2 | +use stackable_operator::{ |
| 3 | + builder::{self, meta::ObjectMetaBuilder}, |
| 4 | + k8s_openapi::api::core::v1::{Service, ServicePort, ServiceSpec}, |
| 5 | + kube::ResourceExt, |
| 6 | + kvp::{Annotations, Labels}, |
| 7 | +}; |
| 8 | + |
| 9 | +use super::crd::CONNECT_APP_NAME; |
| 10 | +use crate::connect::{ |
| 11 | + GRPC, HTTP, |
| 12 | + common::{self, SparkConnectRole}, |
| 13 | + crd::{CONNECT_GRPC_PORT, CONNECT_UI_PORT, v1alpha1}, |
| 14 | +}; |
| 15 | + |
| 16 | +#[derive(Snafu, Debug)] |
| 17 | +#[allow(clippy::enum_variant_names)] |
| 18 | +pub enum Error { |
| 19 | + #[snafu(display("object is missing metadata to build owner reference"))] |
| 20 | + ObjectMissingMetadataForOwnerRef { source: builder::meta::Error }, |
| 21 | + |
| 22 | + #[snafu(display("failed to build Labels"))] |
| 23 | + LabelBuild { |
| 24 | + source: stackable_operator::kvp::LabelError, |
| 25 | + }, |
| 26 | + |
| 27 | + #[snafu(display("failed to build Metadata"))] |
| 28 | + MetadataBuild { source: builder::meta::Error }, |
| 29 | +} |
| 30 | + |
| 31 | +// This is the headless driver service used for the internal |
| 32 | +// communication with the executors as recommended by the Spark docs. |
| 33 | +pub(crate) fn build_headless_service( |
| 34 | + scs: &v1alpha1::SparkConnectServer, |
| 35 | + app_version_label: &str, |
| 36 | +) -> Result<Service, Error> { |
| 37 | + let service_name = format!( |
| 38 | + "{cluster}-{role}-headless", |
| 39 | + cluster = scs.name_any(), |
| 40 | + role = SparkConnectRole::Server |
| 41 | + ); |
| 42 | + |
| 43 | + let selector = |
| 44 | + Labels::role_selector(scs, CONNECT_APP_NAME, &SparkConnectRole::Server.to_string()) |
| 45 | + .context(LabelBuildSnafu)? |
| 46 | + .into(); |
| 47 | + |
| 48 | + Ok(Service { |
| 49 | + metadata: ObjectMetaBuilder::new() |
| 50 | + .name_and_namespace(scs) |
| 51 | + .name(service_name) |
| 52 | + .ownerreference_from_resource(scs, None, Some(true)) |
| 53 | + .context(ObjectMissingMetadataForOwnerRefSnafu)? |
| 54 | + .with_recommended_labels(common::labels( |
| 55 | + scs, |
| 56 | + app_version_label, |
| 57 | + &SparkConnectRole::Server.to_string(), |
| 58 | + )) |
| 59 | + .context(MetadataBuildSnafu)? |
| 60 | + .build(), |
| 61 | + spec: Some(ServiceSpec { |
| 62 | + type_: Some("ClusterIP".to_owned()), |
| 63 | + cluster_ip: Some("None".to_owned()), |
| 64 | + ports: Some(vec![ |
| 65 | + ServicePort { |
| 66 | + name: Some(String::from(GRPC)), |
| 67 | + port: CONNECT_GRPC_PORT, |
| 68 | + ..ServicePort::default() |
| 69 | + }, |
| 70 | + ServicePort { |
| 71 | + name: Some(String::from(HTTP)), |
| 72 | + port: CONNECT_UI_PORT, |
| 73 | + ..ServicePort::default() |
| 74 | + }, |
| 75 | + ]), |
| 76 | + selector: Some(selector), |
| 77 | + // The flag `publish_not_ready_addresses` *must* be `true` to allow for readiness |
| 78 | + // probes. Without it, the driver runs into a deadlock beacuse the Pod cannot become |
| 79 | + // "ready" until the Service is "ready" and vice versa. |
| 80 | + publish_not_ready_addresses: Some(true), |
| 81 | + ..ServiceSpec::default() |
| 82 | + }), |
| 83 | + status: None, |
| 84 | + }) |
| 85 | +} |
| 86 | + |
| 87 | +// This is the metrics service |
| 88 | +pub(crate) fn build_metrics_service( |
| 89 | + scs: &v1alpha1::SparkConnectServer, |
| 90 | + app_version_label: &str, |
| 91 | +) -> Result<Service, Error> { |
| 92 | + let service_name = format!( |
| 93 | + "{cluster}-{role}-metrics", |
| 94 | + cluster = scs.name_any(), |
| 95 | + role = SparkConnectRole::Server |
| 96 | + ); |
| 97 | + |
| 98 | + let selector = |
| 99 | + Labels::role_selector(scs, CONNECT_APP_NAME, &SparkConnectRole::Server.to_string()) |
| 100 | + .context(LabelBuildSnafu)? |
| 101 | + .into(); |
| 102 | + |
| 103 | + Ok(Service { |
| 104 | + metadata: ObjectMetaBuilder::new() |
| 105 | + .name_and_namespace(scs) |
| 106 | + .name(service_name) |
| 107 | + .ownerreference_from_resource(scs, None, Some(true)) |
| 108 | + .context(ObjectMissingMetadataForOwnerRefSnafu)? |
| 109 | + .with_recommended_labels(common::labels( |
| 110 | + scs, |
| 111 | + app_version_label, |
| 112 | + &SparkConnectRole::Server.to_string(), |
| 113 | + )) |
| 114 | + .context(MetadataBuildSnafu)? |
| 115 | + .with_labels(prometheus_labels()) |
| 116 | + .with_annotations(prometheus_annotations()) |
| 117 | + .build(), |
| 118 | + spec: Some(ServiceSpec { |
| 119 | + type_: Some("ClusterIP".to_owned()), |
| 120 | + cluster_ip: Some("None".to_owned()), |
| 121 | + ports: Some(metrics_ports()), |
| 122 | + selector: Some(selector), |
| 123 | + // The flag `publish_not_ready_addresses` *must* be `true` to allow for readiness |
| 124 | + // probes. Without it, the driver runs into a deadlock beacuse the Pod cannot become |
| 125 | + // "ready" until the Service is "ready" and vice versa. |
| 126 | + publish_not_ready_addresses: Some(true), |
| 127 | + ..ServiceSpec::default() |
| 128 | + }), |
| 129 | + status: None, |
| 130 | + }) |
| 131 | +} |
| 132 | + |
| 133 | +fn metrics_ports() -> Vec<ServicePort> { |
| 134 | + vec![ServicePort { |
| 135 | + name: Some("metrics".to_string()), |
| 136 | + port: CONNECT_UI_PORT, |
| 137 | + protocol: Some("TCP".to_string()), |
| 138 | + ..ServicePort::default() |
| 139 | + }] |
| 140 | +} |
| 141 | + |
| 142 | +/// Common labels for Prometheus |
| 143 | +fn prometheus_labels() -> Labels { |
| 144 | + Labels::try_from([("prometheus.io/scrape", "true")]).expect("should be a valid label") |
| 145 | +} |
| 146 | + |
| 147 | +/// Common annotations for Prometheus |
| 148 | +/// |
| 149 | +/// These annotations can be used in a ServiceMonitor. |
| 150 | +/// |
| 151 | +/// see also <https://github.com/prometheus-community/helm-charts/blob/prometheus-27.32.0/charts/prometheus/values.yaml#L983-L1036> |
| 152 | +fn prometheus_annotations() -> Annotations { |
| 153 | + Annotations::try_from([ |
| 154 | + ( |
| 155 | + "prometheus.io/path".to_owned(), |
| 156 | + "/metrics/prometheus".to_owned(), |
| 157 | + ), |
| 158 | + ("prometheus.io/port".to_owned(), CONNECT_UI_PORT.to_string()), |
| 159 | + ("prometheus.io/scheme".to_owned(), "http".to_owned()), |
| 160 | + ("prometheus.io/scrape".to_owned(), "true".to_owned()), |
| 161 | + ]) |
| 162 | + .expect("should be valid annotations") |
| 163 | +} |
0 commit comments