Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,13 @@ All notable changes to this project will be documented in this file.
- Document that Spark Connect doesn't integrate with the history server ([#559])
- test: Bump to Vector `0.46.1` ([#565]).
- Use versioned common structs ([#572]).
- BREAKING: Change the label `app.kubernetes.io/name` for Spark history and connect objects to use `spark-history` and `spark-connect` instead of `spark-k8s` ([#573]).
- BREAKING: The history Pods now have their own ClusterRole named `spark-history-clusterrole` ([#573]).

### Fixed

- Use `json` file extension for log files ([#553]).
- The Spark connect controller now watches StatefulSets instead of Deployments (again) ([#573]).

### Removed

Expand All @@ -46,6 +49,7 @@ All notable changes to this project will be documented in this file.
[#565]: https://github.com/stackabletech/spark-k8s-operator/pull/565
[#570]: https://github.com/stackabletech/spark-k8s-operator/pull/570
[#572]: https://github.com/stackabletech/spark-k8s-operator/pull/572
[#573]: https://github.com/stackabletech/spark-k8s-operator/pull/573

## [25.3.0] - 2025-03-21

Expand Down
7 changes: 7 additions & 0 deletions deploy/helm/spark-k8s-operator/templates/roles.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,14 @@ rules:
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: spark-connect-clusterrole
labels:
{{- include "operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- persistentvolumeclaims
- pods
- secrets
- serviceaccounts
- services
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- events.k8s.io
resources:
- events
verbs:
- create
{{ if .Capabilities.APIVersions.Has "security.openshift.io/v1" }}
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
resourceNames:
- nonroot-v2
verbs:
- use
{{ end }}
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: spark-history-clusterrole
labels:
{{- include "operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- persistentvolumeclaims
- pods
- secrets
- serviceaccounts
- services
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- events.k8s.io
resources:
- events
verbs:
- create
{{ if .Capabilities.APIVersions.Has "security.openshift.io/v1" }}
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
resourceNames:
- nonroot-v2
verbs:
- use
{{ end }}
7 changes: 4 additions & 3 deletions rust/operator-binary/src/connect/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,10 @@ use strum::Display;
use super::crd::CONNECT_EXECUTOR_ROLE_NAME;
use crate::{
connect::crd::{
CONNECT_CONTROLLER_NAME, CONNECT_SERVER_ROLE_NAME, DUMMY_SPARK_CONNECT_GROUP_NAME,
CONNECT_APP_NAME, CONNECT_CONTROLLER_NAME, CONNECT_SERVER_ROLE_NAME,
DUMMY_SPARK_CONNECT_GROUP_NAME,
},
crd::constants::{APP_NAME, OPERATOR_NAME},
crd::constants::OPERATOR_NAME,
};

#[derive(Snafu, Debug)]
Expand Down Expand Up @@ -42,7 +43,7 @@ pub(crate) fn labels<'a, T>(
) -> ObjectLabels<'a, T> {
ObjectLabels {
owner: scs,
app_name: APP_NAME,
app_name: CONNECT_APP_NAME,
app_version: app_version_label,
operator_name: OPERATOR_NAME,
controller_name: CONNECT_CONTROLLER_NAME,
Expand Down
8 changes: 4 additions & 4 deletions rust/operator-binary/src/connect/controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ use stackable_operator::{
};
use strum::{EnumDiscriminants, IntoStaticStr};

use super::crd::{CONNECT_CONTROLLER_NAME, v1alpha1};
use super::crd::{CONNECT_APP_NAME, CONNECT_CONTROLLER_NAME, v1alpha1};
use crate::{
Ctx,
connect::{common, crd::SparkConnectServerStatus, executor, server},
crd::constants::{APP_NAME, OPERATOR_NAME, SPARK_IMAGE_BASE_NAME},
crd::constants::{OPERATOR_NAME, SPARK_IMAGE_BASE_NAME},
};

#[derive(Snafu, Debug, EnumDiscriminants)]
Expand Down Expand Up @@ -168,7 +168,7 @@ pub async fn reconcile(
let client = &ctx.client;

let mut cluster_resources = ClusterResources::new(
APP_NAME,
CONNECT_APP_NAME,
OPERATOR_NAME,
CONNECT_CONTROLLER_NAME,
&scs.object_ref(&()),
Expand All @@ -184,7 +184,7 @@ pub async fn reconcile(
// Use a dedicated service account for connect server pods.
let (service_account, role_binding) = build_rbac_resources(
scs,
APP_NAME,
CONNECT_APP_NAME,
cluster_resources
.get_required_labels()
.context(GetRequiredLabelsSnafu)?,
Expand Down
5 changes: 3 additions & 2 deletions rust/operator-binary/src/connect/crd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ use stackable_operator::{
use strum::{Display, EnumIter};

use super::common::SparkConnectRole;
use crate::crd::constants::APP_NAME;

pub const CONNECT_CONTROLLER_NAME: &str = "connect";
pub const CONNECT_FULL_CONTROLLER_NAME: &str = concatcp!(
Expand All @@ -48,6 +47,8 @@ pub const CONNECT_UI_PORT: i32 = 4040;

pub const DUMMY_SPARK_CONNECT_GROUP_NAME: &str = "default";

pub const CONNECT_APP_NAME: &str = "spark-connect";

#[derive(Snafu, Debug)]
pub enum Error {
#[snafu(display("fragment validation failure"))]
Expand Down Expand Up @@ -346,7 +347,7 @@ impl v1alpha1::ExecutorConfig {

fn affinity(cluster_name: &str) -> StackableAffinityFragment {
let affinity_between_role_pods = affinity_between_role_pods(
APP_NAME,
CONNECT_APP_NAME,
cluster_name,
&SparkConnectRole::Executor.to_string(),
70,
Expand Down
12 changes: 7 additions & 5 deletions rust/operator-binary/src/connect/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ use stackable_operator::{
role_utils::RoleGroupRef,
};

use super::crd::CONNECT_APP_NAME;
use crate::{
connect::{
common::{self, SparkConnectRole, object_name},
Expand All @@ -45,7 +46,7 @@ use crate::{
},
crd::{
constants::{
APP_NAME, JVM_SECURITY_PROPERTIES_FILE, LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME,
JVM_SECURITY_PROPERTIES_FILE, LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME,
LOG4J2_CONFIG_FILE, MAX_SPARK_LOG_FILES_SIZE, METRICS_PROPERTIES_FILE,
POD_TEMPLATE_FILE, SPARK_DEFAULTS_FILE_NAME, SPARK_UID, VOLUME_MOUNT_NAME_CONFIG,
VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_NAME_LOG_CONFIG, VOLUME_MOUNT_PATH_CONFIG,
Expand Down Expand Up @@ -370,7 +371,7 @@ pub(crate) fn build_stateful_set(
match_labels: Some(
Labels::role_group_selector(
scs,
APP_NAME,
CONNECT_APP_NAME,
&SparkConnectRole::Server.to_string(),
DUMMY_SPARK_CONNECT_GROUP_NAME,
)
Expand All @@ -393,9 +394,10 @@ pub(crate) fn build_internal_service(
) -> Result<Service, Error> {
let service_name = object_name(&scs.name_any(), SparkConnectRole::Server);

let selector = Labels::role_selector(scs, APP_NAME, &SparkConnectRole::Server.to_string())
.context(LabelBuildSnafu)?
.into();
let selector =
Labels::role_selector(scs, CONNECT_APP_NAME, &SparkConnectRole::Server.to_string())
.context(LabelBuildSnafu)?
.into();

Ok(Service {
metadata: ObjectMetaBuilder::new()
Expand Down
2 changes: 1 addition & 1 deletion rust/operator-binary/src/crd/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ pub const POD_DRIVER_FULL_CONTROLLER_NAME: &str =
pub const HISTORY_CONTROLLER_NAME: &str = "history";
pub const HISTORY_FULL_CONTROLLER_NAME: &str =
concatcp!(HISTORY_CONTROLLER_NAME, '.', OPERATOR_NAME);

pub const HISTORY_APP_NAME: &str = "spark-history";
pub const HISTORY_ROLE_NAME: &str = "node";

pub const SPARK_IMAGE_BASE_NAME: &str = "spark-k8s";
Expand Down
22 changes: 11 additions & 11 deletions rust/operator-binary/src/history/history_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,13 @@ use crate::{
Ctx,
crd::{
constants::{
ACCESS_KEY_ID, APP_NAME, HISTORY_CONTROLLER_NAME, HISTORY_ROLE_NAME, HISTORY_UI_PORT,
JVM_SECURITY_PROPERTIES_FILE, LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME,
MAX_SPARK_LOG_FILES_SIZE, METRICS_PORT, OPERATOR_NAME, SECRET_ACCESS_KEY,
SPARK_DEFAULTS_FILE_NAME, SPARK_ENV_SH_FILE_NAME, SPARK_IMAGE_BASE_NAME, SPARK_UID,
STACKABLE_TRUST_STORE, VOLUME_MOUNT_NAME_CONFIG, VOLUME_MOUNT_NAME_LOG,
VOLUME_MOUNT_NAME_LOG_CONFIG, VOLUME_MOUNT_PATH_CONFIG, VOLUME_MOUNT_PATH_LOG,
VOLUME_MOUNT_PATH_LOG_CONFIG,
ACCESS_KEY_ID, HISTORY_APP_NAME, HISTORY_CONTROLLER_NAME, HISTORY_ROLE_NAME,
HISTORY_UI_PORT, JVM_SECURITY_PROPERTIES_FILE, LISTENER_VOLUME_DIR,
LISTENER_VOLUME_NAME, MAX_SPARK_LOG_FILES_SIZE, METRICS_PORT, OPERATOR_NAME,
SECRET_ACCESS_KEY, SPARK_DEFAULTS_FILE_NAME, SPARK_ENV_SH_FILE_NAME,
SPARK_IMAGE_BASE_NAME, SPARK_UID, STACKABLE_TRUST_STORE, VOLUME_MOUNT_NAME_CONFIG,
VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_NAME_LOG_CONFIG, VOLUME_MOUNT_PATH_CONFIG,
VOLUME_MOUNT_PATH_LOG, VOLUME_MOUNT_PATH_LOG_CONFIG,
},
history::{self, HistoryConfig, SparkHistoryServerContainer, v1alpha1},
listener_ext,
Expand Down Expand Up @@ -248,7 +248,7 @@ pub async fn reconcile(
let client = &ctx.client;

let mut cluster_resources = ClusterResources::new(
APP_NAME,
HISTORY_APP_NAME,
OPERATOR_NAME,
HISTORY_CONTROLLER_NAME,
&shs.object_ref(&()),
Expand All @@ -271,7 +271,7 @@ pub async fn reconcile(
// Use a dedicated service account for history server pods.
let (service_account, role_binding) = build_rbac_resources(
shs,
APP_NAME,
HISTORY_APP_NAME,
cluster_resources
.get_required_labels()
.context(GetRequiredLabelsSnafu)?,
Expand Down Expand Up @@ -659,7 +659,7 @@ fn build_stateful_set(
match_labels: Some(
Labels::role_group_selector(
shs,
APP_NAME,
HISTORY_APP_NAME,
&rolegroupref.role,
&rolegroupref.role_group,
)
Expand Down Expand Up @@ -726,7 +726,7 @@ fn labels<'a, T>(
) -> ObjectLabels<'a, T> {
ObjectLabels {
owner: shs,
app_name: APP_NAME,
app_name: HISTORY_APP_NAME,
app_version: app_version_label,
operator_name: OPERATOR_NAME,
controller_name: HISTORY_CONTROLLER_NAME,
Expand Down
4 changes: 2 additions & 2 deletions rust/operator-binary/src/history/operations/pdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use stackable_operator::{
};

use crate::crd::{
constants::{APP_NAME, HISTORY_CONTROLLER_NAME, HISTORY_ROLE_NAME, OPERATOR_NAME},
constants::{HISTORY_APP_NAME, HISTORY_CONTROLLER_NAME, HISTORY_ROLE_NAME, OPERATOR_NAME},
history::v1alpha1,
};

Expand Down Expand Up @@ -37,7 +37,7 @@ pub async fn add_pdbs(
.unwrap_or(max_unavailable_history_servers());
let pdb = PodDisruptionBudgetBuilder::new_with_role(
history,
APP_NAME,
HISTORY_APP_NAME,
HISTORY_ROLE_NAME,
OPERATOR_NAME,
HISTORY_CONTROLLER_NAME,
Expand Down
4 changes: 2 additions & 2 deletions rust/operator-binary/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use stackable_operator::{
YamlSchema,
cli::{Command, ProductOperatorRun},
k8s_openapi::api::{
apps::v1::{Deployment, StatefulSet},
apps::v1::StatefulSet,
core::v1::{ConfigMap, Pod, Service},
},
kube::{
Expand Down Expand Up @@ -273,7 +273,7 @@ async fn main() -> anyhow::Result<()> {
watcher::Config::default(),
)
.owns(
watch_namespace.get_api::<DeserializeGuard<Deployment>>(&client),
watch_namespace.get_api::<DeserializeGuard<StatefulSet>>(&client),
watcher::Config::default(),
)
.owns(
Expand Down
Loading