Skip to content

Commit e78462e

Browse files
committed
chore: Fix clippy errors
1 parent a1e55c7 commit e78462e

File tree

8 files changed

+163
-165
lines changed

8 files changed

+163
-165
lines changed

rust/operator-binary/src/config/jvm.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ use stackable_operator::{
66
};
77

88
use crate::{
9-
crd::{constants::JVM_SECURITY_PROPERTIES_FILE, HdfsCluster, HdfsRole},
9+
crd::{constants::JVM_SECURITY_PROPERTIES_FILE, HdfsCluster, HdfsNodeRole},
1010
security::kerberos::KERBEROS_CONTAINER_PATH,
1111
};
1212

@@ -51,7 +51,7 @@ pub fn construct_global_jvm_args(kerberos_enabled: bool) -> String {
5151

5252
pub fn construct_role_specific_jvm_args(
5353
hdfs: &HdfsCluster,
54-
hdfs_role: &HdfsRole,
54+
hdfs_role: &HdfsNodeRole,
5555
role_group: &str,
5656
kerberos_enabled: bool,
5757
resources: Option<&ResourceRequirements>,
@@ -193,7 +193,7 @@ mod tests {
193193
fn construct_test_role_specific_jvm_args(hdfs_cluster: &str, kerberos_enabled: bool) -> String {
194194
let hdfs: HdfsCluster = serde_yaml::from_str(hdfs_cluster).expect("illegal test input");
195195

196-
let role = HdfsRole::NameNode;
196+
let role = HdfsNodeRole::Name;
197197
let merged_config = role.merged_config(&hdfs, "default").unwrap();
198198
let container_config = ContainerConfig::from(role);
199199
let resources = container_config.resources(&merged_config);

rust/operator-binary/src/container.rs

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ use crate::{
6969
SERVICE_PORT_NAME_RPC, STACKABLE_ROOT_DATA_DIR,
7070
},
7171
storage::DataNodeStorageConfig,
72-
AnyNodeConfig, DataNodeContainer, HdfsCluster, HdfsPodRef, HdfsRole, NameNodeContainer,
72+
AnyNodeConfig, DataNodeContainer, HdfsCluster, HdfsNodeRole, HdfsPodRef, NameNodeContainer,
7373
UpgradeState,
7474
},
7575
product_logging::{
@@ -150,7 +150,7 @@ pub enum Error {
150150
pub enum ContainerConfig {
151151
Hdfs {
152152
/// HDFS role (name-, data-, journal-node) which will be the container_name.
153-
role: HdfsRole,
153+
role: HdfsNodeRole,
154154
/// The container name derived from the provided role.
155155
container_name: String,
156156
/// Volume mounts for config and logging.
@@ -214,7 +214,7 @@ impl ContainerConfig {
214214
pb: &mut PodBuilder,
215215
hdfs: &HdfsCluster,
216216
cluster_info: &KubernetesClusterInfo,
217-
role: &HdfsRole,
217+
role: &HdfsNodeRole,
218218
role_group: &str,
219219
resolved_product_image: &ResolvedProductImage,
220220
merged_config: &AnyNodeConfig,
@@ -305,7 +305,7 @@ impl ContainerConfig {
305305

306306
// role specific pod settings configured here
307307
match role {
308-
HdfsRole::NameNode => {
308+
HdfsNodeRole::Name => {
309309
// Zookeeper fail over container
310310
let zkfc_container_config = Self::try_from(NameNodeContainer::Zkfc.to_string())?;
311311
pb.add_volumes(zkfc_container_config.volumes(
@@ -370,7 +370,7 @@ impl ContainerConfig {
370370
labels,
371371
)?);
372372
}
373-
HdfsRole::DataNode => {
373+
HdfsNodeRole::Data => {
374374
// Wait for namenode init container
375375
let wait_for_namenodes_container_config =
376376
Self::try_from(DataNodeContainer::WaitForNameNodes.to_string())?;
@@ -393,7 +393,7 @@ impl ContainerConfig {
393393
labels,
394394
)?);
395395
}
396-
HdfsRole::JournalNode => {}
396+
HdfsNodeRole::Journal => {}
397397
}
398398

399399
Ok(())
@@ -404,7 +404,7 @@ impl ContainerConfig {
404404
labels: &Labels,
405405
) -> Result<Vec<PersistentVolumeClaim>> {
406406
match merged_config {
407-
AnyNodeConfig::NameNode(node) => {
407+
AnyNodeConfig::Name(node) => {
408408
let listener = ListenerOperatorVolumeSourceBuilder::new(
409409
&ListenerReference::ListenerClass(node.listener_class.to_string()),
410410
labels,
@@ -432,11 +432,11 @@ impl ContainerConfig {
432432

433433
Ok(pvcs)
434434
}
435-
AnyNodeConfig::JournalNode(node) => Ok(vec![node.resources.storage.data.build_pvc(
435+
AnyNodeConfig::Journal(node) => Ok(vec![node.resources.storage.data.build_pvc(
436436
ContainerConfig::DATA_VOLUME_MOUNT_NAME,
437437
Some(vec!["ReadWriteOnce"]),
438438
)]),
439-
AnyNodeConfig::DataNode(node) => Ok(DataNodeStorageConfig {
439+
AnyNodeConfig::Data(node) => Ok(DataNodeStorageConfig {
440440
pvcs: node.resources.storage.clone(),
441441
}
442442
.build_pvcs()),
@@ -453,7 +453,7 @@ impl ContainerConfig {
453453
&self,
454454
hdfs: &HdfsCluster,
455455
cluster_info: &KubernetesClusterInfo,
456-
role: &HdfsRole,
456+
role: &HdfsNodeRole,
457457
role_group: &str,
458458
resolved_product_image: &ResolvedProductImage,
459459
zookeeper_config_map_name: &str,
@@ -514,7 +514,7 @@ impl ContainerConfig {
514514
&self,
515515
hdfs: &HdfsCluster,
516516
cluster_info: &KubernetesClusterInfo,
517-
role: &HdfsRole,
517+
role: &HdfsNodeRole,
518518
role_group: &str,
519519
resolved_product_image: &ResolvedProductImage,
520520
zookeeper_config_map_name: &str,
@@ -587,7 +587,7 @@ impl ContainerConfig {
587587
&self,
588588
hdfs: &HdfsCluster,
589589
cluster_info: &KubernetesClusterInfo,
590-
role: &HdfsRole,
590+
role: &HdfsNodeRole,
591591
merged_config: &AnyNodeConfig,
592592
namenode_podrefs: &[HdfsPodRef],
593593
) -> Result<Vec<String>, Error> {
@@ -601,7 +601,7 @@ impl ContainerConfig {
601601
}
602602

603603
let upgrade_args = if hdfs.upgrade_state().ok() == Some(Some(UpgradeState::Upgrading))
604-
&& *role == HdfsRole::NameNode
604+
&& *role == HdfsNodeRole::Name
605605
{
606606
"-rollingUpgrade started"
607607
} else {
@@ -810,7 +810,7 @@ wait_for_termination $!
810810
/// Needs the POD_NAME env var to be present, which will be provided by the PodSpec
811811
fn get_kerberos_ticket(
812812
hdfs: &HdfsCluster,
813-
role: &HdfsRole,
813+
role: &HdfsNodeRole,
814814
cluster_info: &KubernetesClusterInfo,
815815
) -> Result<String, Error> {
816816
let principal = format!(
@@ -945,9 +945,9 @@ wait_for_termination $!
945945
| ContainerConfig::FormatNameNodes { .. }
946946
| ContainerConfig::FormatZooKeeper { .. }
947947
| ContainerConfig::WaitForNameNodes { .. } => match merged_config {
948-
AnyNodeConfig::NameNode(node) => Some(node.resources.clone().into()),
949-
AnyNodeConfig::DataNode(node) => Some(node.resources.clone().into()),
950-
AnyNodeConfig::JournalNode(node) => Some(node.resources.clone().into()),
948+
AnyNodeConfig::Name(node) => Some(node.resources.clone().into()),
949+
AnyNodeConfig::Data(node) => Some(node.resources.clone().into()),
950+
AnyNodeConfig::Journal(node) => Some(node.resources.clone().into()),
951951
},
952952
}
953953
}
@@ -1021,7 +1021,7 @@ wait_for_termination $!
10211021
let mut volumes = vec![];
10221022

10231023
if let ContainerConfig::Hdfs { .. } = self {
1024-
if let AnyNodeConfig::DataNode(node) = merged_config {
1024+
if let AnyNodeConfig::Data(node) = merged_config {
10251025
volumes.push(
10261026
VolumeBuilder::new(LISTENER_VOLUME_NAME)
10271027
.ephemeral(
@@ -1126,15 +1126,15 @@ wait_for_termination $!
11261126
}
11271127
ContainerConfig::Hdfs { role, .. } => {
11281128
// JournalNode doesn't use listeners, since it's only used internally by the namenodes
1129-
if let HdfsRole::NameNode | HdfsRole::DataNode = role {
1129+
if let HdfsNodeRole::Name | HdfsNodeRole::Data = role {
11301130
volume_mounts.push(
11311131
VolumeMountBuilder::new(LISTENER_VOLUME_NAME, LISTENER_VOLUME_DIR).build(),
11321132
);
11331133
}
11341134

11351135
// Add data volume
11361136
match role {
1137-
HdfsRole::NameNode | HdfsRole::JournalNode => {
1137+
HdfsNodeRole::Name | HdfsNodeRole::Journal => {
11381138
volume_mounts.push(
11391139
VolumeMountBuilder::new(
11401140
Self::DATA_VOLUME_MOUNT_NAME,
@@ -1143,7 +1143,7 @@ wait_for_termination $!
11431143
.build(),
11441144
);
11451145
}
1146-
HdfsRole::DataNode => {
1146+
HdfsNodeRole::Data => {
11471147
for pvc in Self::volume_claim_templates(merged_config, labels)? {
11481148
let pvc_name = pvc.name_any();
11491149
volume_mounts.push(VolumeMount {
@@ -1358,10 +1358,10 @@ wait_for_termination $!
13581358
}
13591359
}
13601360

1361-
impl From<HdfsRole> for ContainerConfig {
1362-
fn from(role: HdfsRole) -> Self {
1361+
impl From<HdfsNodeRole> for ContainerConfig {
1362+
fn from(role: HdfsNodeRole) -> Self {
13631363
match role {
1364-
HdfsRole::NameNode => Self::Hdfs {
1364+
HdfsNodeRole::Name => Self::Hdfs {
13651365
role,
13661366
container_name: role.to_string(),
13671367
volume_mounts: ContainerVolumeDirs::from(role),
@@ -1370,7 +1370,7 @@ impl From<HdfsRole> for ContainerConfig {
13701370
web_ui_https_port_name: SERVICE_PORT_NAME_HTTPS,
13711371
metrics_port: DEFAULT_NAME_NODE_METRICS_PORT,
13721372
},
1373-
HdfsRole::DataNode => Self::Hdfs {
1373+
HdfsNodeRole::Data => Self::Hdfs {
13741374
role,
13751375
container_name: role.to_string(),
13761376
volume_mounts: ContainerVolumeDirs::from(role),
@@ -1379,7 +1379,7 @@ impl From<HdfsRole> for ContainerConfig {
13791379
web_ui_https_port_name: SERVICE_PORT_NAME_HTTPS,
13801380
metrics_port: DEFAULT_DATA_NODE_METRICS_PORT,
13811381
},
1382-
HdfsRole::JournalNode => Self::Hdfs {
1382+
HdfsNodeRole::Journal => Self::Hdfs {
13831383
role,
13841384
container_name: role.to_string(),
13851385
volume_mounts: ContainerVolumeDirs::from(role),
@@ -1396,7 +1396,7 @@ impl TryFrom<String> for ContainerConfig {
13961396
type Error = Error;
13971397

13981398
fn try_from(container_name: String) -> Result<Self, Self::Error> {
1399-
match HdfsRole::from_str(container_name.as_str()) {
1399+
match HdfsNodeRole::from_str(container_name.as_str()) {
14001400
Ok(role) => Ok(ContainerConfig::from(role)),
14011401
// No hadoop main process container
14021402
Err(_) => match container_name {
@@ -1469,8 +1469,8 @@ impl ContainerVolumeDirs {
14691469
}
14701470
}
14711471

1472-
impl From<HdfsRole> for ContainerVolumeDirs {
1473-
fn from(role: HdfsRole) -> Self {
1472+
impl From<HdfsNodeRole> for ContainerVolumeDirs {
1473+
fn from(role: HdfsNodeRole) -> Self {
14741474
ContainerVolumeDirs {
14751475
final_config_dir: format!("{base}/{role}", base = Self::NODE_BASE_CONFIG_DIR),
14761476
config_mount: format!("{base}/{role}", base = Self::NODE_BASE_CONFIG_DIR_MOUNT),
@@ -1481,8 +1481,8 @@ impl From<HdfsRole> for ContainerVolumeDirs {
14811481
}
14821482
}
14831483

1484-
impl From<&HdfsRole> for ContainerVolumeDirs {
1485-
fn from(role: &HdfsRole) -> Self {
1484+
impl From<&HdfsNodeRole> for ContainerVolumeDirs {
1485+
fn from(role: &HdfsNodeRole) -> Self {
14861486
ContainerVolumeDirs {
14871487
final_config_dir: format!("{base}/{role}", base = Self::NODE_BASE_CONFIG_DIR),
14881488
config_mount: format!("{base}/{role}", base = Self::NODE_BASE_CONFIG_DIR_MOUNT),
@@ -1497,7 +1497,7 @@ impl TryFrom<&str> for ContainerVolumeDirs {
14971497
type Error = Error;
14981498

14991499
fn try_from(container_name: &str) -> Result<Self, Error> {
1500-
if let Ok(role) = HdfsRole::from_str(container_name) {
1500+
if let Ok(role) = HdfsNodeRole::from_str(container_name) {
15011501
return Ok(ContainerVolumeDirs::from(role));
15021502
}
15031503

rust/operator-binary/src/crd/affinity.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,9 @@ use stackable_operator::{
55
k8s_openapi::api::core::v1::{PodAffinity, PodAntiAffinity},
66
};
77

8-
use crate::crd::{constants::APP_NAME, HdfsRole};
8+
use crate::crd::{constants::APP_NAME, HdfsNodeRole};
99

10-
pub fn get_affinity(cluster_name: &str, role: &HdfsRole) -> StackableAffinityFragment {
10+
pub fn get_affinity(cluster_name: &str, role: &HdfsNodeRole) -> StackableAffinityFragment {
1111
StackableAffinityFragment {
1212
pod_affinity: Some(PodAffinity {
1313
preferred_during_scheduling_ignored_during_execution: Some(vec![
@@ -41,13 +41,13 @@ mod test {
4141
},
4242
};
4343

44-
use crate::crd::{HdfsCluster, HdfsRole};
44+
use crate::crd::{HdfsCluster, HdfsNodeRole};
4545

4646
#[rstest]
47-
#[case(HdfsRole::JournalNode)]
48-
#[case(HdfsRole::NameNode)]
49-
#[case(HdfsRole::DataNode)]
50-
fn test_affinity_defaults(#[case] role: HdfsRole) {
47+
#[case(HdfsNodeRole::Journal)]
48+
#[case(HdfsNodeRole::Name)]
49+
#[case(HdfsNodeRole::Data)]
50+
fn test_affinity_defaults(#[case] role: HdfsNodeRole) {
5151
let input = r#"
5252
apiVersion: hdfs.stackable.tech/v1alpha1
5353
kind: HdfsCluster

0 commit comments

Comments
 (0)