Skip to content

Commit c3b98b2

Browse files
committed
expose history pods via listener classes
1 parent 29d2cd3 commit c3b98b2

File tree

4 files changed

+61
-35
lines changed

4 files changed

+61
-35
lines changed
Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,19 @@
1-
= Service exposition with ListenerClasses
1+
= History service exposition with listener classes
2+
:description: Configure the Spark history service exposure with listener classes: cluster-internal, external-unstable, or external-stable.
23

3-
The Spark operator deploys SparkApplications, and does not offer a UI or other API, so no services are exposed.
4-
However, the operator can also deploy HistoryServers, which do offer a UI and API.
5-
The operator deploys a service called `<name>-historyserver` (where `<name>` is the name of the spark application) through which the HistoryServer can be reached.
6-
7-
This service can have three different types: `cluster-internal`, `external-unstable` and `external-stable`.
8-
Read more about the types in the xref:concepts:service-exposition.adoc[service exposition] documentation at platform level.
9-
10-
This is how the ListenerClass is configured:
4+
The operator deploys a xref:listener-operator:listener.adoc[Listener] for each spark history pod.
5+
The default is to only being accessible from within the Kubernetes cluster, but this can be changed by setting `.spec.clusterConfig.listenerClass`:
116

127
[source,yaml]
138
----
9+
apiVersion: spark.stackable.tech/v1alpha1
10+
kind: SparkHistoryServer
11+
metadata:
12+
name: spark-history
1413
spec:
1514
clusterConfig:
16-
listenerClass: cluster-internal # <1>
15+
listenerClass: external-unstable # <1>
1716
----
18-
<1> The default `cluster-internal` setting.
17+
<1> Specify one of `external-stable`, `external-unstable`, `cluster-internal` (the default setting is `cluster-internal`).
18+
19+
For the example above, the listener operator creates a service named `spark-history-node-default-0-listener` where `spark-history` is the name of the SparkHistoryServer, `node` is the service role (the only service role available for history servers) and `default` is the role group.

rust/operator-binary/src/crd/constants.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,3 +88,6 @@ pub const SPARK_ENV_SH_FILE_NAME: &str = "spark-env.sh";
8888
pub const SPARK_CLUSTER_ROLE: &str = "spark-k8s-clusterrole";
8989
pub const SPARK_UID: i64 = 1000;
9090
pub const METRICS_PORT: u16 = 18081;
91+
92+
pub const LISTENER_VOLUME_NAME: &str = "listener";
93+
pub const LISTENER_VOLUME_DIR: &str = "/stackable/listener";

rust/operator-binary/src/history/history_controller.rs

Lines changed: 42 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,12 @@ use crate::{
5252
crd::{
5353
constants::{
5454
ACCESS_KEY_ID, APP_NAME, HISTORY_CONTROLLER_NAME, HISTORY_ROLE_NAME,
55-
JVM_SECURITY_PROPERTIES_FILE, MAX_SPARK_LOG_FILES_SIZE, METRICS_PORT, OPERATOR_NAME,
56-
SECRET_ACCESS_KEY, SPARK_CLUSTER_ROLE, SPARK_DEFAULTS_FILE_NAME,
57-
SPARK_ENV_SH_FILE_NAME, SPARK_IMAGE_BASE_NAME, SPARK_UID, STACKABLE_TRUST_STORE,
58-
VOLUME_MOUNT_NAME_CONFIG, VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_NAME_LOG_CONFIG,
59-
VOLUME_MOUNT_PATH_CONFIG, VOLUME_MOUNT_PATH_LOG, VOLUME_MOUNT_PATH_LOG_CONFIG,
55+
JVM_SECURITY_PROPERTIES_FILE, LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME,
56+
MAX_SPARK_LOG_FILES_SIZE, METRICS_PORT, OPERATOR_NAME, SECRET_ACCESS_KEY,
57+
SPARK_CLUSTER_ROLE, SPARK_DEFAULTS_FILE_NAME, SPARK_ENV_SH_FILE_NAME,
58+
SPARK_IMAGE_BASE_NAME, SPARK_UID, STACKABLE_TRUST_STORE, VOLUME_MOUNT_NAME_CONFIG,
59+
VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_NAME_LOG_CONFIG, VOLUME_MOUNT_PATH_CONFIG,
60+
VOLUME_MOUNT_PATH_LOG, VOLUME_MOUNT_PATH_LOG_CONFIG,
6061
},
6162
history::{self, HistoryConfig, SparkHistoryServerContainer, v1alpha1},
6263
logdir::ResolvedLogDir,
@@ -437,12 +438,16 @@ fn build_stateful_set(
437438
rolegroupref.object_name()
438439
};
439440

440-
let metadata = ObjectMetaBuilder::new()
441-
.with_recommended_labels(labels(
442-
shs,
443-
&resolved_product_image.app_version_label,
444-
&rolegroupref.role_group,
445-
))
441+
let recommended_object_labels = labels(
442+
shs,
443+
&resolved_product_image.app_version_label,
444+
rolegroupref.role_group.as_ref(),
445+
);
446+
let recommended_labels =
447+
Labels::recommended(recommended_object_labels.clone()).context(LabelBuildSnafu)?;
448+
449+
let pb_metadata = ObjectMetaBuilder::new()
450+
.with_recommended_labels(recommended_object_labels.clone())
446451
.context(MetadataBuildSnafu)?
447452
.build();
448453

@@ -452,7 +457,7 @@ fn build_stateful_set(
452457
.requested_secret_lifetime
453458
.context(MissingSecretLifetimeSnafu)?;
454459
pb.service_account_name(serviceaccount.name_unchecked())
455-
.metadata(metadata)
460+
.metadata(pb_metadata)
456461
.image_pull_secrets_from_product_image(resolved_product_image)
457462
.add_volume(
458463
VolumeBuilder::new(VOLUME_MOUNT_NAME_CONFIG)
@@ -524,7 +529,22 @@ fn build_stateful_set(
524529
.context(AddVolumeMountSnafu)?
525530
.add_volume_mount(VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_PATH_LOG)
526531
.context(AddVolumeMountSnafu)?
532+
.add_volume_mount(LISTENER_VOLUME_NAME, LISTENER_VOLUME_DIR)
533+
.context(AddVolumeMountSnafu)?
527534
.build();
535+
536+
// Add listener volume
537+
let listener_class = &shs.spec.cluster_config.listener_class;
538+
// all listeners will use ephemeral volumes as they can/should
539+
// be removed when the pods are *terminated* (ephemeral PVCs will
540+
// survive re-starts)
541+
pb.add_listener_volume_by_listener_class(
542+
LISTENER_VOLUME_NAME,
543+
&listener_class.to_string(),
544+
&recommended_labels.clone(),
545+
)
546+
.context(AddVolumeSnafu)?;
547+
528548
pb.add_container(container);
529549

530550
if merged_config.logging.enable_vector_agent {
@@ -560,19 +580,17 @@ fn build_stateful_set(
560580
pod_template.merge_from(shs.role().config.pod_overrides.clone());
561581
pod_template.merge_from(role_group.config.pod_overrides);
562582

583+
let sts_metadata = ObjectMetaBuilder::new()
584+
.name_and_namespace(shs)
585+
.name(rolegroupref.object_name())
586+
.ownerreference_from_resource(shs, None, Some(true))
587+
.context(ObjectMissingMetadataForOwnerRefSnafu)?
588+
.with_recommended_labels(recommended_object_labels)
589+
.context(MetadataBuildSnafu)?
590+
.build();
591+
563592
Ok(StatefulSet {
564-
metadata: ObjectMetaBuilder::new()
565-
.name_and_namespace(shs)
566-
.name(rolegroupref.object_name())
567-
.ownerreference_from_resource(shs, None, Some(true))
568-
.context(ObjectMissingMetadataForOwnerRefSnafu)?
569-
.with_recommended_labels(labels(
570-
shs,
571-
&resolved_product_image.app_version_label,
572-
rolegroupref.role_group.as_ref(),
573-
))
574-
.context(MetadataBuildSnafu)?
575-
.build(),
593+
metadata: sts_metadata,
576594
spec: Some(StatefulSetSpec {
577595
template: pod_template,
578596
replicas: shs.replicas(rolegroupref),

tests/templates/kuttl/spark-history-server/06-deploy-history-server.yaml.j2

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ spec:
3535
{% if lookup('env', 'VECTOR_AGGREGATOR') %}
3636
vectorAggregatorConfigMapName: vector-aggregator-discovery
3737
{% endif %}
38+
39+
clusterConfig:
40+
listenerClass: external-unstable
41+
3842
logFileDirectory:
3943
s3:
4044
prefix: eventlogs/

0 commit comments

Comments
 (0)