@@ -28,9 +28,10 @@ use stackable_operator::{
2828 schemars:: { self , JsonSchema } ,
2929 time:: Duration ,
3030} ;
31+ use stackable_versioned:: versioned;
3132use strum:: { Display , EnumIter } ;
3233
33- use crate :: { affinity:: history_affinity, constants:: * , logdir:: ResolvedLogDir } ;
34+ use crate :: crd :: { affinity:: history_affinity, constants:: * , logdir:: ResolvedLogDir } ;
3435
3536#[ derive( Snafu , Debug ) ]
3637pub enum Error {
@@ -48,62 +49,63 @@ pub enum Error {
4849 CannotRetrieveRoleGroup { role_group : String } ,
4950}
5051
51- /// A Spark cluster history server component. This resource is managed by the Stackable operator
52- /// for Apache Spark. Find more information on how to use it in the
53- /// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/ history- server).
54- # [ derive ( Clone , CustomResource , Debug , Deserialize , JsonSchema , Serialize ) ]
55- # [ kube (
56- group = "spark.stackable.tech" ,
57- version = "v1alpha1 " ,
58- kind = "SparkHistoryServer " ,
59- shortname = "shs" ,
60- namespaced ,
61- crates (
62- kube_core = "stackable_operator::kube::core " ,
63- k8s_openapi = "stackable_operator::k8s_openapi" ,
64- schemars = "stackable_operator::schemars"
65- )
66- ) ]
67- #[ serde( rename_all = "camelCase" ) ]
68- pub struct SparkHistoryServerSpec {
69- pub image : ProductImage ,
70-
71- /// Global Spark history server configuration that applies to all roles and role groups.
72- #[ serde( default ) ]
73- pub cluster_config : SparkHistoryServerClusterConfig ,
74-
75- /// Name of the Vector aggregator discovery ConfigMap.
76- /// It must contain the key `ADDRESS` with the address of the Vector aggregator.
77- #[ serde( skip_serializing_if = "Option::is_none" ) ]
78- pub vector_aggregator_config_map_name : Option < String > ,
79-
80- /// The log file directory definition used by the Spark history server.
81- pub log_file_directory : LogFileDirectorySpec ,
82-
83- /// A map of key/value strings that will be passed directly to Spark when deploying the history server.
84- #[ serde( default ) ]
85- pub spark_conf : BTreeMap < String , String > ,
86-
87- /// A history server node role definition.
88- pub nodes : Role < HistoryConfigFragment > ,
89- }
52+ # [ versioned ( version ( name = "v1alpha1" ) ) ]
53+ pub mod versioned {
54+ /// A Spark cluster history server component. This resource is managed by the Stackable operator
55+ /// for Apache Spark. Find more information on how to use it in the
56+ /// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/history-server).
57+ # [ versioned ( k8s (
58+ group = "spark.stackable.tech " ,
59+ shortname = "sparkhist " ,
60+ namespaced ,
61+ crates (
62+ kube_core = "stackable_operator::kube::core" ,
63+ k8s_openapi = "stackable_operator::k8s_openapi " ,
64+ schemars = "stackable_operator::schemars"
65+ )
66+ ) ) ]
67+ # [ derive ( Clone , CustomResource , Debug , Deserialize , JsonSchema , Serialize ) ]
68+ #[ serde( rename_all = "camelCase" ) ]
69+ pub struct SparkHistoryServerSpec {
70+ pub image : ProductImage ,
71+
72+ /// Global Spark history server configuration that applies to all roles and role groups.
73+ #[ serde( default ) ]
74+ pub cluster_config : v1alpha1 :: SparkHistoryServerClusterConfig ,
75+
76+ /// Name of the Vector aggregator discovery ConfigMap.
77+ /// It must contain the key `ADDRESS` with the address of the Vector aggregator.
78+ #[ serde( skip_serializing_if = "Option::is_none" ) ]
79+ pub vector_aggregator_config_map_name : Option < String > ,
80+
81+ /// The log file directory definition used by the Spark history server.
82+ pub log_file_directory : LogFileDirectorySpec ,
83+
84+ /// A map of key/value strings that will be passed directly to Spark when deploying the history server.
85+ #[ serde( default ) ]
86+ pub spark_conf : BTreeMap < String , String > ,
87+
88+ /// A history server node role definition.
89+ pub nodes : Role < HistoryConfigFragment > ,
90+ }
9091
91- #[ derive( Clone , Deserialize , Debug , Default , Eq , JsonSchema , PartialEq , Serialize ) ]
92- #[ serde( rename_all = "camelCase" ) ]
93- pub struct SparkHistoryServerClusterConfig {
94- /// This field controls which type of Service the Operator creates for this HistoryServer:
95- ///
96- /// * cluster-internal: Use a ClusterIP service
97- ///
98- /// * external-unstable: Use a NodePort service
99- ///
100- /// * external-stable: Use a LoadBalancer service
101- ///
102- /// This is a temporary solution with the goal to keep yaml manifests forward compatible.
103- /// In the future, this setting will control which ListenerClass <https://docs.stackable.tech/home/stable/listener-operator/listenerclass.html>
104- /// will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change.
105- #[ serde( default ) ]
106- pub listener_class : CurrentlySupportedListenerClasses ,
92+ #[ derive( Clone , Deserialize , Debug , Default , Eq , JsonSchema , PartialEq , Serialize ) ]
93+ #[ serde( rename_all = "camelCase" ) ]
94+ pub struct SparkHistoryServerClusterConfig {
95+ /// This field controls which type of Service the Operator creates for this HistoryServer:
96+ ///
97+ /// * cluster-internal: Use a ClusterIP service
98+ ///
99+ /// * external-unstable: Use a NodePort service
100+ ///
101+ /// * external-stable: Use a LoadBalancer service
102+ ///
103+ /// This is a temporary solution with the goal to keep yaml manifests forward compatible.
104+ /// In the future, this setting will control which ListenerClass <https://docs.stackable.tech/home/stable/listener-operator/listenerclass.html>
105+ /// will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change.
106+ #[ serde( default ) ]
107+ pub listener_class : CurrentlySupportedListenerClasses ,
108+ }
107109}
108110
109111// TODO: Temporary solution until listener-operator is finished
@@ -129,7 +131,7 @@ impl CurrentlySupportedListenerClasses {
129131 }
130132}
131133
132- impl SparkHistoryServer {
134+ impl v1alpha1 :: SparkHistoryServer {
133135 /// Returns a reference to the role. Raises an error if the role is not defined.
134136 pub fn role ( & self ) -> & Role < HistoryConfigFragment > {
135137 & self . spec . nodes
@@ -138,7 +140,7 @@ impl SparkHistoryServer {
138140 /// Returns a reference to the role group. Raises an error if the role or role group are not defined.
139141 pub fn rolegroup (
140142 & self ,
141- rolegroup_ref : & RoleGroupRef < SparkHistoryServer > ,
143+ rolegroup_ref : & RoleGroupRef < Self > ,
142144 ) -> Result < RoleGroup < HistoryConfigFragment , GenericProductSpecificCommonConfig > , Error > {
143145 self . spec
144146 . nodes
@@ -152,7 +154,7 @@ impl SparkHistoryServer {
152154
153155 pub fn merged_config (
154156 & self ,
155- rolegroup_ref : & RoleGroupRef < SparkHistoryServer > ,
157+ rolegroup_ref : & RoleGroupRef < Self > ,
156158 ) -> Result < HistoryConfig , Error > {
157159 // Initialize the result with all default values as baseline
158160 let conf_defaults = HistoryConfig :: default_config ( & self . name_any ( ) ) ;
@@ -184,7 +186,7 @@ impl SparkHistoryServer {
184186 . map ( i32:: from)
185187 }
186188
187- pub fn cleaner_rolegroups ( & self ) -> Vec < RoleGroupRef < SparkHistoryServer > > {
189+ pub fn cleaner_rolegroups ( & self ) -> Vec < RoleGroupRef < Self > > {
188190 let mut rgs = vec ! [ ] ;
189191 for ( rg_name, rg_config) in & self . spec . nodes . role_groups {
190192 if let Some ( true ) = rg_config. config . config . cleaner {
@@ -444,7 +446,7 @@ impl HistoryConfig {
444446}
445447
446448impl Configuration for HistoryConfigFragment {
447- type Configurable = SparkHistoryServer ;
449+ type Configurable = v1alpha1 :: SparkHistoryServer ;
448450
449451 fn compute_env (
450452 & self ,
@@ -484,7 +486,7 @@ mod test {
484486 } ;
485487
486488 use super :: * ;
487- use crate :: logdir:: S3LogDir ;
489+ use crate :: crd :: logdir:: S3LogDir ;
488490
489491 #[ test]
490492 pub fn test_env_overrides ( ) {
@@ -515,7 +517,7 @@ mod test {
515517 "# } ;
516518
517519 let deserializer = serde_yaml:: Deserializer :: from_str ( input) ;
518- let history: SparkHistoryServer =
520+ let history: v1alpha1 :: SparkHistoryServer =
519521 serde_yaml:: with:: singleton_map_recursive:: deserialize ( deserializer) . unwrap ( ) ;
520522
521523 let log_dir = ResolvedLogDir :: S3 ( S3LogDir {
0 commit comments