@@ -124,136 +124,141 @@ pub enum Error {
124124 ConstructJvmArguments { source : crate :: config:: jvm:: Error } ,
125125}
126126
127- #[ derive( Clone , Debug , Deserialize , PartialEq , Serialize , JsonSchema ) ]
128- #[ allow( clippy:: derive_partial_eq_without_eq) ]
129- #[ serde( rename_all = "camelCase" ) ]
130- pub struct SparkApplicationStatus {
131- pub phase : String ,
132- }
133-
134- /// A Spark cluster stacklet. This resource is managed by the Stackable operator for Apache Spark.
135- /// Find more information on how to use it and the resources that the operator generates in the
136- /// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/).
137- ///
138- /// The SparkApplication CRD looks a little different than the CRDs of the other products on the
139- /// Stackable Data Platform.
140127#[ versioned(
141128 version( name = "v1alpha1" ) ,
142- k8s(
129+ crates(
130+ kube_core = "stackable_operator::kube::core" ,
131+ kube_client = "stackable_operator::kube::client" ,
132+ k8s_openapi = "stackable_operator::k8s_openapi" ,
133+ schemars = "stackable_operator::schemars" ,
134+ versioned = "stackable_operator::versioned"
135+ )
136+ ) ]
137+ pub mod versioned {
138+
139+ #[ derive( Clone , Debug , Deserialize , PartialEq , Serialize , JsonSchema ) ]
140+ #[ allow( clippy:: derive_partial_eq_without_eq) ]
141+ #[ serde( rename_all = "camelCase" ) ]
142+ pub struct SparkApplicationStatus {
143+ pub phase : String ,
144+ }
145+
146+ /// A Spark cluster stacklet. This resource is managed by the Stackable operator for Apache Spark.
147+ /// Find more information on how to use it and the resources that the operator generates in the
148+ /// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/).
149+ ///
150+ /// The SparkApplication CRD looks a little different than the CRDs of the other products on the
151+ /// Stackable Data Platform.
152+ #[ versioned( crd(
143153 group = "spark.stackable.tech" ,
144154 shortname = "sparkapp" ,
145155 status = "SparkApplicationStatus" ,
146156 namespaced,
147- crates(
148- kube_core = "stackable_operator::kube::core" ,
149- k8s_openapi = "stackable_operator::k8s_openapi" ,
150- schemars = "stackable_operator::schemars"
151- )
152- )
153- ) ]
154- #[ derive( Clone , CustomResource , Debug , Deserialize , JsonSchema , Serialize ) ]
155- #[ serde( rename_all = "camelCase" ) ]
156- pub struct SparkApplicationSpec {
157- /// Mode: cluster or client. Currently only cluster is supported.
158- pub mode : SparkMode ,
159-
160- /// The main class - i.e. entry point - for JVM artifacts.
161- #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
162- pub main_class : Option < String > ,
163-
164- /// The actual application file that will be called by `spark-submit`.
165- pub main_application_file : String ,
166-
167- /// User-supplied image containing spark-job dependencies that will be copied to the specified volume mount.
168- /// See the [examples](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/examples).
169- #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
170- pub image : Option < String > ,
171-
172- // no doc - docs in ProductImage struct.
173- pub spark_image : ProductImage ,
174-
175- /// Name of the Vector aggregator [discovery ConfigMap](DOCS_BASE_URL_PLACEHOLDER/concepts/service_discovery).
176- /// It must contain the key `ADDRESS` with the address of the Vector aggregator.
177- /// Follow the [logging tutorial](DOCS_BASE_URL_PLACEHOLDER/tutorials/logging-vector-aggregator)
178- /// to learn how to configure log aggregation with Vector.
179- #[ serde( skip_serializing_if = "Option::is_none" ) ]
180- pub vector_aggregator_config_map_name : Option < String > ,
181-
182- /// The job builds a spark-submit command, complete with arguments and referenced dependencies
183- /// such as templates, and passes it on to Spark.
184- /// The reason this property uses its own type (SubmitConfigFragment) is because logging is not
185- /// supported for spark-submit processes.
186- //
187- // IMPORTANT: Please note that the jvmArgumentOverrides have no effect here!
188- // However, due to product-config things I wasn't able to remove them.
189- #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
190- pub job : Option < CommonConfiguration < SubmitConfigFragment , JavaCommonConfig > > ,
191-
192- /// The driver role specifies the configuration that, together with the driver pod template, is used by
193- /// Spark to create driver pods.
194- #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
195- pub driver : Option < CommonConfiguration < RoleConfigFragment , JavaCommonConfig > > ,
196-
197- /// The executor role specifies the configuration that, together with the driver pod template, is used by
198- /// Spark to create the executor pods.
199- /// This is RoleGroup instead of plain CommonConfiguration because it needs to allow for the number of replicas.
200- /// to be specified.
201- #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
202- pub executor : Option < RoleGroup < RoleConfigFragment , JavaCommonConfig > > ,
203-
204- /// A map of key/value strings that will be passed directly to spark-submit.
205- #[ serde( default ) ]
206- pub spark_conf : HashMap < String , String > ,
207-
208- /// Job dependencies: a list of python packages that will be installed via pip, a list of packages
209- /// or repositories that is passed directly to spark-submit, or a list of excluded packages
210- /// (also passed directly to spark-submit).
211- #[ serde( default ) ]
212- pub deps : JobDependencies ,
213-
214- /// Configure an S3 connection that the SparkApplication has access to.
215- /// Read more in the [Spark S3 usage guide](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/s3).
216- #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
217- pub s3connection : Option < s3:: v1alpha1:: InlineConnectionOrReference > ,
218-
219- /// Arguments passed directly to the job artifact.
220- #[ serde( default ) ]
221- pub args : Vec < String > ,
222-
223- /// A list of volumes that can be made available to the job, driver or executors via their volume mounts.
224- #[ serde( default ) ]
225- #[ schemars( schema_with = "raw_object_list_schema" ) ]
226- pub volumes : Vec < Volume > ,
227-
228- /// A list of environment variables that will be set in the job pod and the driver and executor
229- /// pod templates.
230- #[ serde( default ) ]
231- pub env : Vec < EnvVar > ,
232-
233- /// The log file directory definition used by the Spark history server.
234- #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
235- pub log_file_directory : Option < LogFileDirectorySpec > ,
236- }
157+ ) ) ]
158+ #[ derive( Clone , CustomResource , Debug , Deserialize , JsonSchema , Serialize ) ]
159+ #[ serde( rename_all = "camelCase" ) ]
160+ pub struct SparkApplicationSpec {
161+ /// Mode: cluster or client. Currently only cluster is supported.
162+ pub mode : SparkMode ,
163+
164+ /// The main class - i.e. entry point - for JVM artifacts.
165+ #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
166+ pub main_class : Option < String > ,
167+
168+ /// The actual application file that will be called by `spark-submit`.
169+ pub main_application_file : String ,
170+
171+ /// User-supplied image containing spark-job dependencies that will be copied to the specified volume mount.
172+ /// See the [examples](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/examples).
173+ #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
174+ pub image : Option < String > ,
175+
176+ // no doc - docs in ProductImage struct.
177+ pub spark_image : ProductImage ,
178+
179+ /// Name of the Vector aggregator [discovery ConfigMap](DOCS_BASE_URL_PLACEHOLDER/concepts/service_discovery).
180+ /// It must contain the key `ADDRESS` with the address of the Vector aggregator.
181+ /// Follow the [logging tutorial](DOCS_BASE_URL_PLACEHOLDER/tutorials/logging-vector-aggregator)
182+ /// to learn how to configure log aggregation with Vector.
183+ #[ serde( skip_serializing_if = "Option::is_none" ) ]
184+ pub vector_aggregator_config_map_name : Option < String > ,
185+
186+ /// The job builds a spark-submit command, complete with arguments and referenced dependencies
187+ /// such as templates, and passes it on to Spark.
188+ /// The reason this property uses its own type (SubmitConfigFragment) is because logging is not
189+ /// supported for spark-submit processes.
190+ //
191+ // IMPORTANT: Please note that the jvmArgumentOverrides have no effect here!
192+ // However, due to product-config things I wasn't able to remove them.
193+ #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
194+ pub job : Option < CommonConfiguration < SubmitConfigFragment , JavaCommonConfig > > ,
195+
196+ /// The driver role specifies the configuration that, together with the driver pod template, is used by
197+ /// Spark to create driver pods.
198+ #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
199+ pub driver : Option < CommonConfiguration < RoleConfigFragment , JavaCommonConfig > > ,
200+
201+ /// The executor role specifies the configuration that, together with the driver pod template, is used by
202+ /// Spark to create the executor pods.
203+ /// This is RoleGroup instead of plain CommonConfiguration because it needs to allow for the number of replicas.
204+ /// to be specified.
205+ #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
206+ pub executor : Option < RoleGroup < RoleConfigFragment , JavaCommonConfig > > ,
207+
208+ /// A map of key/value strings that will be passed directly to spark-submit.
209+ #[ serde( default ) ]
210+ pub spark_conf : HashMap < String , String > ,
211+
212+ /// Job dependencies: a list of python packages that will be installed via pip, a list of packages
213+ /// or repositories that is passed directly to spark-submit, or a list of excluded packages
214+ /// (also passed directly to spark-submit).
215+ #[ serde( default ) ]
216+ pub deps : JobDependencies ,
217+
218+ /// Configure an S3 connection that the SparkApplication has access to.
219+ /// Read more in the [Spark S3 usage guide](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/s3).
220+ #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
221+ pub s3connection : Option < s3:: v1alpha1:: InlineConnectionOrReference > ,
222+
223+ /// Arguments passed directly to the job artifact.
224+ #[ serde( default ) ]
225+ pub args : Vec < String > ,
226+
227+ /// A list of volumes that can be made available to the job, driver or executors via their volume mounts.
228+ #[ serde( default ) ]
229+ #[ schemars( schema_with = "raw_object_list_schema" ) ]
230+ pub volumes : Vec < Volume > ,
231+
232+ /// A list of environment variables that will be set in the job pod and the driver and executor
233+ /// pod templates.
234+ #[ serde( default ) ]
235+ pub env : Vec < EnvVar > ,
236+
237+ /// The log file directory definition used by the Spark history server.
238+ #[ serde( default , skip_serializing_if = "Option::is_none" ) ]
239+ pub log_file_directory : Option < LogFileDirectorySpec > ,
240+ }
237241
238- #[ derive( Clone , Debug , Default , Deserialize , JsonSchema , PartialEq , Eq , Serialize ) ]
239- #[ serde( rename_all = "camelCase" ) ]
240- pub struct JobDependencies {
241- /// Under the `requirements` you can specify Python dependencies that will be installed with `pip`.
242- /// Example: `tabulate==0.8.9`
243- #[ serde( default ) ]
244- pub requirements : Vec < String > ,
245-
246- /// A list of packages that is passed directly to `spark-submit`.
247- #[ serde( default ) ]
248- pub packages : Vec < String > ,
249-
250- /// A list of repositories that is passed directly to `spark-submit`.
251- #[ serde( default ) ]
252- pub repositories : Vec < String > ,
253-
254- /// A list of excluded packages that is passed directly to `spark-submit`.
255- #[ serde( default ) ]
256- pub exclude_packages : Vec < String > ,
242+ #[ derive( Clone , Debug , Default , Deserialize , JsonSchema , PartialEq , Eq , Serialize ) ]
243+ #[ serde( rename_all = "camelCase" ) ]
244+ pub struct JobDependencies {
245+ /// Under the `requirements` you can specify Python dependencies that will be installed with `pip`.
246+ /// Example: `tabulate==0.8.9`
247+ #[ serde( default ) ]
248+ pub requirements : Vec < String > ,
249+
250+ /// A list of packages that is passed directly to `spark-submit`.
251+ #[ serde( default ) ]
252+ pub packages : Vec < String > ,
253+
254+ /// A list of repositories that is passed directly to `spark-submit`.
255+ #[ serde( default ) ]
256+ pub repositories : Vec < String > ,
257+
258+ /// A list of excluded packages that is passed directly to `spark-submit`.
259+ #[ serde( default ) ]
260+ pub exclude_packages : Vec < String > ,
261+ }
257262}
258263
259264impl v1alpha1:: SparkApplication {
0 commit comments