Skip to content

Commit ad11049

Browse files
committed
chore: Bump stackable-operator to 0.94.0 and update other dependencies
1 parent df77f63 commit ad11049

File tree

13 files changed

+1370
-1969
lines changed

13 files changed

+1370
-1969
lines changed

Cargo.lock

Lines changed: 400 additions & 558 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.nix

Lines changed: 794 additions & 1241 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ repository = "https://github.com/stackabletech/spark-k8s-operator"
1111

1212
[workspace.dependencies]
1313
product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.7.0" }
14-
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", features = ["telemetry", "versioned"], tag = "stackable-operator-0.93.1" }
14+
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", features = ["telemetry", "versioned"], tag = "stackable-operator-0.94.0" }
1515

1616
anyhow = "1.0"
1717
built = { version = "0.8", features = ["chrono", "git2"] }

crate-hashes.json

Lines changed: 7 additions & 7 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

deploy/helm/spark-k8s-operator/crds/crds.yaml

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2354,11 +2354,6 @@ spec:
23542354
format: date-time
23552355
nullable: true
23562356
type: string
2357-
lastUpdateTime:
2358-
description: The last time this condition was updated.
2359-
format: date-time
2360-
nullable: true
2361-
type: string
23622357
message:
23632358
description: A human readable message indicating details about the transition.
23642359
nullable: true

rust/operator-binary/src/connect/crd.rs

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -55,24 +55,27 @@ pub enum Error {
5555
FragmentValidationFailure { source: ValidationError },
5656
}
5757

58-
#[versioned(version(name = "v1alpha1"))]
58+
#[versioned(
59+
version(name = "v1alpha1"),
60+
crates(
61+
kube_core = "stackable_operator::kube::core",
62+
kube_client = "stackable_operator::kube::client",
63+
k8s_openapi = "stackable_operator::k8s_openapi",
64+
schemars = "stackable_operator::schemars",
65+
versioned = "stackable_operator::versioned"
66+
)
67+
)]
5968
pub mod versioned {
6069

6170
/// An Apache Spark Connect server component. This resource is managed by the Stackable operator
6271
/// for Apache Spark. Find more information on how to use it in the
6372
/// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/connect-server).
64-
#[versioned(k8s(
73+
#[versioned(crd(
6574
group = "spark.stackable.tech",
66-
kind = "SparkConnectServer",
6775
plural = "sparkconnectservers",
6876
shortname = "sparkconnect",
6977
status = "SparkConnectServerStatus",
7078
namespaced,
71-
crates(
72-
kube_core = "stackable_operator::kube::core",
73-
k8s_openapi = "stackable_operator::k8s_openapi",
74-
schemars = "stackable_operator::schemars"
75-
)
7679
))]
7780
#[derive(Clone, CustomResource, Debug, Deserialize, JsonSchema, Serialize)]
7881
#[serde(rename_all = "camelCase")]

rust/operator-binary/src/connect/server.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,6 @@ pub(crate) fn build_stateful_set(
341341
&ListenerReference::ListenerName(listener_name.to_string()),
342342
&recommended_labels,
343343
)
344-
.context(BuildListenerVolumeSnafu)?
345344
.build_pvc(LISTENER_VOLUME_NAME.to_string())
346345
.context(BuildListenerVolumeSnafu)?,
347346
]);

rust/operator-binary/src/crd/history.rs

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -63,21 +63,21 @@ pub enum Error {
6363
},
6464
}
6565

66-
#[versioned(version(name = "v1alpha1"))]
66+
#[versioned(
67+
version(name = "v1alpha1"),
68+
crates(
69+
kube_core = "stackable_operator::kube::core",
70+
kube_client = "stackable_operator::kube::client",
71+
k8s_openapi = "stackable_operator::k8s_openapi",
72+
schemars = "stackable_operator::schemars",
73+
versioned = "stackable_operator::versioned"
74+
)
75+
)]
6776
pub mod versioned {
6877
/// A Spark cluster history server component. This resource is managed by the Stackable operator
6978
/// for Apache Spark. Find more information on how to use it in the
7079
/// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/history-server).
71-
#[versioned(k8s(
72-
group = "spark.stackable.tech",
73-
shortname = "sparkhist",
74-
namespaced,
75-
crates(
76-
kube_core = "stackable_operator::kube::core",
77-
k8s_openapi = "stackable_operator::k8s_openapi",
78-
schemars = "stackable_operator::schemars"
79-
)
80-
))]
80+
#[versioned(crd(group = "spark.stackable.tech", shortname = "sparkhist", namespaced,))]
8181
#[derive(Clone, CustomResource, Debug, Deserialize, JsonSchema, Serialize)]
8282
#[serde(rename_all = "camelCase")]
8383
pub struct SparkHistoryServerSpec {

rust/operator-binary/src/crd/mod.rs

Lines changed: 128 additions & 123 deletions
Original file line numberDiff line numberDiff line change
@@ -124,136 +124,141 @@ pub enum Error {
124124
ConstructJvmArguments { source: crate::config::jvm::Error },
125125
}
126126

127-
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize, JsonSchema)]
128-
#[allow(clippy::derive_partial_eq_without_eq)]
129-
#[serde(rename_all = "camelCase")]
130-
pub struct SparkApplicationStatus {
131-
pub phase: String,
132-
}
133-
134-
/// A Spark cluster stacklet. This resource is managed by the Stackable operator for Apache Spark.
135-
/// Find more information on how to use it and the resources that the operator generates in the
136-
/// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/).
137-
///
138-
/// The SparkApplication CRD looks a little different than the CRDs of the other products on the
139-
/// Stackable Data Platform.
140127
#[versioned(
141128
version(name = "v1alpha1"),
142-
k8s(
129+
crates(
130+
kube_core = "stackable_operator::kube::core",
131+
kube_client = "stackable_operator::kube::client",
132+
k8s_openapi = "stackable_operator::k8s_openapi",
133+
schemars = "stackable_operator::schemars",
134+
versioned = "stackable_operator::versioned"
135+
)
136+
)]
137+
pub mod versioned {
138+
139+
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize, JsonSchema)]
140+
#[allow(clippy::derive_partial_eq_without_eq)]
141+
#[serde(rename_all = "camelCase")]
142+
pub struct SparkApplicationStatus {
143+
pub phase: String,
144+
}
145+
146+
/// A Spark cluster stacklet. This resource is managed by the Stackable operator for Apache Spark.
147+
/// Find more information on how to use it and the resources that the operator generates in the
148+
/// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/).
149+
///
150+
/// The SparkApplication CRD looks a little different than the CRDs of the other products on the
151+
/// Stackable Data Platform.
152+
#[versioned(crd(
143153
group = "spark.stackable.tech",
144154
shortname = "sparkapp",
145155
status = "SparkApplicationStatus",
146156
namespaced,
147-
crates(
148-
kube_core = "stackable_operator::kube::core",
149-
k8s_openapi = "stackable_operator::k8s_openapi",
150-
schemars = "stackable_operator::schemars"
151-
)
152-
)
153-
)]
154-
#[derive(Clone, CustomResource, Debug, Deserialize, JsonSchema, Serialize)]
155-
#[serde(rename_all = "camelCase")]
156-
pub struct SparkApplicationSpec {
157-
/// Mode: cluster or client. Currently only cluster is supported.
158-
pub mode: SparkMode,
159-
160-
/// The main class - i.e. entry point - for JVM artifacts.
161-
#[serde(default, skip_serializing_if = "Option::is_none")]
162-
pub main_class: Option<String>,
163-
164-
/// The actual application file that will be called by `spark-submit`.
165-
pub main_application_file: String,
166-
167-
/// User-supplied image containing spark-job dependencies that will be copied to the specified volume mount.
168-
/// See the [examples](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/examples).
169-
#[serde(default, skip_serializing_if = "Option::is_none")]
170-
pub image: Option<String>,
171-
172-
// no doc - docs in ProductImage struct.
173-
pub spark_image: ProductImage,
174-
175-
/// Name of the Vector aggregator [discovery ConfigMap](DOCS_BASE_URL_PLACEHOLDER/concepts/service_discovery).
176-
/// It must contain the key `ADDRESS` with the address of the Vector aggregator.
177-
/// Follow the [logging tutorial](DOCS_BASE_URL_PLACEHOLDER/tutorials/logging-vector-aggregator)
178-
/// to learn how to configure log aggregation with Vector.
179-
#[serde(skip_serializing_if = "Option::is_none")]
180-
pub vector_aggregator_config_map_name: Option<String>,
181-
182-
/// The job builds a spark-submit command, complete with arguments and referenced dependencies
183-
/// such as templates, and passes it on to Spark.
184-
/// The reason this property uses its own type (SubmitConfigFragment) is because logging is not
185-
/// supported for spark-submit processes.
186-
//
187-
// IMPORTANT: Please note that the jvmArgumentOverrides have no effect here!
188-
// However, due to product-config things I wasn't able to remove them.
189-
#[serde(default, skip_serializing_if = "Option::is_none")]
190-
pub job: Option<CommonConfiguration<SubmitConfigFragment, JavaCommonConfig>>,
191-
192-
/// The driver role specifies the configuration that, together with the driver pod template, is used by
193-
/// Spark to create driver pods.
194-
#[serde(default, skip_serializing_if = "Option::is_none")]
195-
pub driver: Option<CommonConfiguration<RoleConfigFragment, JavaCommonConfig>>,
196-
197-
/// The executor role specifies the configuration that, together with the driver pod template, is used by
198-
/// Spark to create the executor pods.
199-
/// This is RoleGroup instead of plain CommonConfiguration because it needs to allow for the number of replicas.
200-
/// to be specified.
201-
#[serde(default, skip_serializing_if = "Option::is_none")]
202-
pub executor: Option<RoleGroup<RoleConfigFragment, JavaCommonConfig>>,
203-
204-
/// A map of key/value strings that will be passed directly to spark-submit.
205-
#[serde(default)]
206-
pub spark_conf: HashMap<String, String>,
207-
208-
/// Job dependencies: a list of python packages that will be installed via pip, a list of packages
209-
/// or repositories that is passed directly to spark-submit, or a list of excluded packages
210-
/// (also passed directly to spark-submit).
211-
#[serde(default)]
212-
pub deps: JobDependencies,
213-
214-
/// Configure an S3 connection that the SparkApplication has access to.
215-
/// Read more in the [Spark S3 usage guide](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/s3).
216-
#[serde(default, skip_serializing_if = "Option::is_none")]
217-
pub s3connection: Option<s3::v1alpha1::InlineConnectionOrReference>,
218-
219-
/// Arguments passed directly to the job artifact.
220-
#[serde(default)]
221-
pub args: Vec<String>,
222-
223-
/// A list of volumes that can be made available to the job, driver or executors via their volume mounts.
224-
#[serde(default)]
225-
#[schemars(schema_with = "raw_object_list_schema")]
226-
pub volumes: Vec<Volume>,
227-
228-
/// A list of environment variables that will be set in the job pod and the driver and executor
229-
/// pod templates.
230-
#[serde(default)]
231-
pub env: Vec<EnvVar>,
232-
233-
/// The log file directory definition used by the Spark history server.
234-
#[serde(default, skip_serializing_if = "Option::is_none")]
235-
pub log_file_directory: Option<LogFileDirectorySpec>,
236-
}
157+
))]
158+
#[derive(Clone, CustomResource, Debug, Deserialize, JsonSchema, Serialize)]
159+
#[serde(rename_all = "camelCase")]
160+
pub struct SparkApplicationSpec {
161+
/// Mode: cluster or client. Currently only cluster is supported.
162+
pub mode: SparkMode,
163+
164+
/// The main class - i.e. entry point - for JVM artifacts.
165+
#[serde(default, skip_serializing_if = "Option::is_none")]
166+
pub main_class: Option<String>,
167+
168+
/// The actual application file that will be called by `spark-submit`.
169+
pub main_application_file: String,
170+
171+
/// User-supplied image containing spark-job dependencies that will be copied to the specified volume mount.
172+
/// See the [examples](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/examples).
173+
#[serde(default, skip_serializing_if = "Option::is_none")]
174+
pub image: Option<String>,
175+
176+
// no doc - docs in ProductImage struct.
177+
pub spark_image: ProductImage,
178+
179+
/// Name of the Vector aggregator [discovery ConfigMap](DOCS_BASE_URL_PLACEHOLDER/concepts/service_discovery).
180+
/// It must contain the key `ADDRESS` with the address of the Vector aggregator.
181+
/// Follow the [logging tutorial](DOCS_BASE_URL_PLACEHOLDER/tutorials/logging-vector-aggregator)
182+
/// to learn how to configure log aggregation with Vector.
183+
#[serde(skip_serializing_if = "Option::is_none")]
184+
pub vector_aggregator_config_map_name: Option<String>,
185+
186+
/// The job builds a spark-submit command, complete with arguments and referenced dependencies
187+
/// such as templates, and passes it on to Spark.
188+
/// The reason this property uses its own type (SubmitConfigFragment) is because logging is not
189+
/// supported for spark-submit processes.
190+
//
191+
// IMPORTANT: Please note that the jvmArgumentOverrides have no effect here!
192+
// However, due to product-config things I wasn't able to remove them.
193+
#[serde(default, skip_serializing_if = "Option::is_none")]
194+
pub job: Option<CommonConfiguration<SubmitConfigFragment, JavaCommonConfig>>,
195+
196+
/// The driver role specifies the configuration that, together with the driver pod template, is used by
197+
/// Spark to create driver pods.
198+
#[serde(default, skip_serializing_if = "Option::is_none")]
199+
pub driver: Option<CommonConfiguration<RoleConfigFragment, JavaCommonConfig>>,
200+
201+
/// The executor role specifies the configuration that, together with the driver pod template, is used by
202+
/// Spark to create the executor pods.
203+
/// This is RoleGroup instead of plain CommonConfiguration because it needs to allow for the number of replicas.
204+
/// to be specified.
205+
#[serde(default, skip_serializing_if = "Option::is_none")]
206+
pub executor: Option<RoleGroup<RoleConfigFragment, JavaCommonConfig>>,
207+
208+
/// A map of key/value strings that will be passed directly to spark-submit.
209+
#[serde(default)]
210+
pub spark_conf: HashMap<String, String>,
211+
212+
/// Job dependencies: a list of python packages that will be installed via pip, a list of packages
213+
/// or repositories that is passed directly to spark-submit, or a list of excluded packages
214+
/// (also passed directly to spark-submit).
215+
#[serde(default)]
216+
pub deps: JobDependencies,
217+
218+
/// Configure an S3 connection that the SparkApplication has access to.
219+
/// Read more in the [Spark S3 usage guide](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/s3).
220+
#[serde(default, skip_serializing_if = "Option::is_none")]
221+
pub s3connection: Option<s3::v1alpha1::InlineConnectionOrReference>,
222+
223+
/// Arguments passed directly to the job artifact.
224+
#[serde(default)]
225+
pub args: Vec<String>,
226+
227+
/// A list of volumes that can be made available to the job, driver or executors via their volume mounts.
228+
#[serde(default)]
229+
#[schemars(schema_with = "raw_object_list_schema")]
230+
pub volumes: Vec<Volume>,
231+
232+
/// A list of environment variables that will be set in the job pod and the driver and executor
233+
/// pod templates.
234+
#[serde(default)]
235+
pub env: Vec<EnvVar>,
236+
237+
/// The log file directory definition used by the Spark history server.
238+
#[serde(default, skip_serializing_if = "Option::is_none")]
239+
pub log_file_directory: Option<LogFileDirectorySpec>,
240+
}
237241

238-
#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Eq, Serialize)]
239-
#[serde(rename_all = "camelCase")]
240-
pub struct JobDependencies {
241-
/// Under the `requirements` you can specify Python dependencies that will be installed with `pip`.
242-
/// Example: `tabulate==0.8.9`
243-
#[serde(default)]
244-
pub requirements: Vec<String>,
245-
246-
/// A list of packages that is passed directly to `spark-submit`.
247-
#[serde(default)]
248-
pub packages: Vec<String>,
249-
250-
/// A list of repositories that is passed directly to `spark-submit`.
251-
#[serde(default)]
252-
pub repositories: Vec<String>,
253-
254-
/// A list of excluded packages that is passed directly to `spark-submit`.
255-
#[serde(default)]
256-
pub exclude_packages: Vec<String>,
242+
#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Eq, Serialize)]
243+
#[serde(rename_all = "camelCase")]
244+
pub struct JobDependencies {
245+
/// Under the `requirements` you can specify Python dependencies that will be installed with `pip`.
246+
/// Example: `tabulate==0.8.9`
247+
#[serde(default)]
248+
pub requirements: Vec<String>,
249+
250+
/// A list of packages that is passed directly to `spark-submit`.
251+
#[serde(default)]
252+
pub packages: Vec<String>,
253+
254+
/// A list of repositories that is passed directly to `spark-submit`.
255+
#[serde(default)]
256+
pub repositories: Vec<String>,
257+
258+
/// A list of excluded packages that is passed directly to `spark-submit`.
259+
#[serde(default)]
260+
pub exclude_packages: Vec<String>,
261+
}
257262
}
258263

259264
impl v1alpha1::SparkApplication {

rust/operator-binary/src/history/history_controller.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,6 @@ fn build_stateful_set(
600600
&ListenerReference::ListenerName(group_listener_name(shs, &rolegroupref.role)),
601601
&recommended_labels,
602602
)
603-
.context(BuildListenerVolumeSnafu)?
604603
.build_pvc(LISTENER_VOLUME_NAME.to_string())
605604
.context(BuildListenerVolumeSnafu)?,
606605
]);

0 commit comments

Comments
 (0)