Skip to content

Commit 3f53b3c

Browse files
committed
Merge remote-tracking branch 'origin/main' into op-rs-0.79.0
2 parents 1e2a510 + 6500912 commit 3f53b3c

File tree

8 files changed

+54
-30
lines changed

8 files changed

+54
-30
lines changed

.github/workflows/build.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ env:
2525
CARGO_TERM_COLOR: always
2626
CARGO_INCREMENTAL: '0'
2727
CARGO_PROFILE_DEV_DEBUG: '0'
28-
RUST_TOOLCHAIN_VERSION: "1.80.1"
28+
RUST_TOOLCHAIN_VERSION: "1.81.0"
2929
RUSTFLAGS: "-D warnings"
3030
RUSTDOCFLAGS: "-D warnings"
3131
RUST_LOG: "info"

.github/workflows/pr_pre-commit.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ on:
66

77
env:
88
CARGO_TERM_COLOR: always
9-
RUST_TOOLCHAIN_VERSION: "1.80.1"
9+
RUST_TOOLCHAIN_VERSION: "1.81.0"
1010
HADOLINT_VERSION: "v2.12.0"
1111
PYTHON_VERSION: "3.12"
1212

.pre-commit-config.yaml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ default_language_version:
66

77
repos:
88
- repo: https://github.com/pre-commit/pre-commit-hooks
9-
rev: 2c9f875913ee60ca25ce70243dc24d5b6415598c # 4.6.0
9+
rev: cef0300fd0fc4d2a87a85fa2093c6b283ea36f4b # 5.0.0
1010
hooks:
1111
- id: trailing-whitespace
1212
- id: end-of-file-fixer
@@ -28,7 +28,7 @@ repos:
2828
- id: yamllint
2929

3030
- repo: https://github.com/igorshubovych/markdownlint-cli
31-
rev: f295829140d25717bc79368d3f966fc1f67a824f # 0.41.0
31+
rev: aa975a18c9a869648007d33864034dbc7481fe5e # 0.42.0
3232
hooks:
3333
- id: markdownlint
3434
types: [text]
@@ -44,15 +44,15 @@ repos:
4444
# If you do not, you will need to delete the cached ruff binary shown in the
4545
# error message
4646
- repo: https://github.com/astral-sh/ruff-pre-commit
47-
rev: f1ebc5730d98440041cc43e4d69829ad598ae1e7 # 0.6.3
47+
rev: 8983acb92ee4b01924893632cf90af926fa608f0 # 0.7.0
4848
hooks:
4949
# Run the linter.
5050
- id: ruff
5151
# Run the formatter.
5252
- id: ruff-format
5353

5454
- repo: https://github.com/rhysd/actionlint
55-
rev: 62dc61a45fc95efe8c800af7a557ab0b9165d63b # 1.7.1
55+
rev: 4e683ab8014a63fafa117492a0c6053758e6d593 # 1.7.3
5656
hooks:
5757
- id: actionlint
5858

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,13 @@ All notable changes to this project will be documented in this file.
2121
### Fixed
2222

2323
- BREAKING: The fields `connection` and `host` on `S3Connection` as well as `bucketName` on `S3Bucket`are now mandatory ([#518]).
24+
- An invalid `HiveCluster` doesn't cause the operator to stop functioning ([#523]).
2425

2526
[#505]: https://github.com/stackabletech/hive-operator/pull/505
2627
[#508]: https://github.com/stackabletech/hive-operator/pull/508
2728
[#518]: https://github.com/stackabletech/hive-operator/pull/518
2829
[#522]: https://github.com/stackabletech/hive-operator/pull/522
30+
[#523]: https://github.com/stackabletech/hive-operator/pull/523
2931

3032
## [24.7.0] - 2024-07-24
3133

Makefile

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,10 @@ docker-publish:
4848
# Uses the keyless signing flow with Github Actions as identity provider\
4949
cosign sign -y "${DOCKER_REPO}/${ORGANIZATION}/${OPERATOR_NAME}@$$REPO_DIGEST_OF_IMAGE";\
5050
# Generate the SBOM for the operator image, this leverages the already generated SBOM for the operator binary by cargo-cyclonedx\
51-
syft scan --output cyclonedx-json=sbom.json --select-catalogers "-cargo-auditable-binary-cataloger" --scope all-layers --source-name "${OPERATOR_NAME}" --source-version "${VERSION}" "${DOCKER_REPO}/${ORGANIZATION}/${OPERATOR_NAME}@$$REPO_DIGEST_OF_IMAGE";\
51+
syft scan --output cyclonedx-json=sbom.json --select-catalogers "-cargo-auditable-binary-cataloger" --scope all-layers --source-name "${OPERATOR_NAME}" --source-version "${VERSION}-${ARCH}" "${DOCKER_REPO}/${ORGANIZATION}/${OPERATOR_NAME}@$$REPO_DIGEST_OF_IMAGE";\
5252
# Determine the PURL for the container image\
53-
PURL="pkg:docker/${ORGANIZATION}/${OPERATOR_NAME}@$$REPO_DIGEST_OF_IMAGE?repository_url=${DOCKER_REPO}";\
53+
URLENCODED_REPO_DIGEST_OF_IMAGE=$$(echo "$$REPO_DIGEST_OF_IMAGE" | sed 's/:/%3A/g');\
54+
PURL="pkg:oci/${OPERATOR_NAME}@$$URLENCODED_REPO_DIGEST_OF_IMAGE?arch=${ARCH}&repository_url=${DOCKER_REPO}%2F${ORGANIZATION}%2F${OPERATOR_NAME}";\
5455
# Get metadata from the image\
5556
IMAGE_DESCRIPTION=$$(docker inspect --format='{{.Config.Labels.description}}' "${DOCKER_REPO}/${ORGANIZATION}/${OPERATOR_NAME}:${VERSION}-${ARCH}");\
5657
IMAGE_NAME=$$(docker inspect --format='{{.Config.Labels.name}}' "${DOCKER_REPO}/${ORGANIZATION}/${OPERATOR_NAME}:${VERSION}-${ARCH}");\
@@ -73,9 +74,10 @@ docker-publish:
7374
# Uses the keyless signing flow with Github Actions as identity provider\
7475
cosign sign -y "${OCI_REGISTRY_HOSTNAME}/${OCI_REGISTRY_PROJECT_IMAGES}/${OPERATOR_NAME}@$$REPO_DIGEST_OF_IMAGE";\
7576
# Generate the SBOM for the operator image, this leverages the already generated SBOM for the operator binary by cargo-cyclonedx\
76-
syft scan --output cyclonedx-json=sbom.json --select-catalogers "-cargo-auditable-binary-cataloger" --scope all-layers --source-name "${OPERATOR_NAME}" --source-version "${VERSION}" "${OCI_REGISTRY_HOSTNAME}/${OCI_REGISTRY_PROJECT_IMAGES}/${OPERATOR_NAME}@$$REPO_DIGEST_OF_IMAGE";\
77+
syft scan --output cyclonedx-json=sbom.json --select-catalogers "-cargo-auditable-binary-cataloger" --scope all-layers --source-name "${OPERATOR_NAME}" --source-version "${VERSION}-${ARCH}" "${OCI_REGISTRY_HOSTNAME}/${OCI_REGISTRY_PROJECT_IMAGES}/${OPERATOR_NAME}@$$REPO_DIGEST_OF_IMAGE";\
7778
# Determine the PURL for the container image\
78-
PURL="pkg:docker/${OCI_REGISTRY_PROJECT_IMAGES}/${OPERATOR_NAME}@$$REPO_DIGEST_OF_IMAGE?repository_url=${OCI_REGISTRY_HOSTNAME}";\
79+
URLENCODED_REPO_DIGEST_OF_IMAGE=$$(echo "$$REPO_DIGEST_OF_IMAGE" | sed 's/:/%3A/g');\
80+
PURL="pkg:oci/${OPERATOR_NAME}@$$URLENCODED_REPO_DIGEST_OF_IMAGE?arch=${ARCH}&repository_url=${OCI_REGISTRY_HOSTNAME}%2F${OCI_REGISTRY_PROJECT_IMAGES}%2F${OPERATOR_NAME}";\
7981
# Get metadata from the image\
8082
IMAGE_DESCRIPTION=$$(docker inspect --format='{{.Config.Labels.description}}' "${OCI_REGISTRY_HOSTNAME}/${OCI_REGISTRY_PROJECT_IMAGES}/${OPERATOR_NAME}:${VERSION}-${ARCH}");\
8183
IMAGE_NAME=$$(docker inspect --format='{{.Config.Labels.name}}' "${OCI_REGISTRY_HOSTNAME}/${OCI_REGISTRY_PROJECT_IMAGES}/${OPERATOR_NAME}:${VERSION}-${ARCH}");\

rust-toolchain.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
# DO NOT EDIT, this file is generated by operator-templating
22
[toolchain]
3-
channel = "1.80.1"
3+
channel = "1.81.0"

rust/operator-binary/src/controller.rs

Lines changed: 37 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ use stackable_operator::{
5454
},
5555
DeepMerge,
5656
},
57+
kube::core::{error_boundary, DeserializeGuard},
5758
kube::{runtime::controller::Action, Resource, ResourceExt},
5859
kvp::{Label, Labels, ObjectLabels},
5960
logging::controller::ReconcilerError,
@@ -323,6 +324,11 @@ pub enum Error {
323324
AddVolumeMount {
324325
source: builder::pod::container::Error,
325326
},
327+
328+
#[snafu(display("HiveCluster object is invalid"))]
329+
InvalidHiveCluster {
330+
source: error_boundary::InvalidObject,
331+
},
326332
}
327333
type Result<T, E = Error> = std::result::Result<T, E>;
328334

@@ -332,8 +338,16 @@ impl ReconcilerError for Error {
332338
}
333339
}
334340

335-
pub async fn reconcile_hive(hive: Arc<HiveCluster>, ctx: Arc<Ctx>) -> Result<Action> {
341+
pub async fn reconcile_hive(
342+
hive: Arc<DeserializeGuard<HiveCluster>>,
343+
ctx: Arc<Ctx>,
344+
) -> Result<Action> {
336345
tracing::info!("Starting reconcile");
346+
let hive = hive
347+
.0
348+
.as_ref()
349+
.map_err(error_boundary::InvalidObject::clone)
350+
.context(InvalidHiveClusterSnafu)?;
337351
let client = &ctx.client;
338352
let hive_namespace = hive.namespace().context(ObjectHasNoNamespaceSnafu)?;
339353

@@ -361,7 +375,7 @@ pub async fn reconcile_hive(hive: Arc<HiveCluster>, ctx: Arc<Ctx>) -> Result<Act
361375
let validated_config = validate_all_roles_and_groups_config(
362376
&resolved_product_image.product_version,
363377
&transform_all_roles_to_config(
364-
hive.as_ref(),
378+
hive,
365379
[(
366380
HiveRole::MetaStore.to_string(),
367381
(
@@ -399,7 +413,7 @@ pub async fn reconcile_hive(hive: Arc<HiveCluster>, ctx: Arc<Ctx>) -> Result<Act
399413
.context(CreateClusterResourcesSnafu)?;
400414

401415
let (rbac_sa, rbac_rolebinding) = build_rbac_resources(
402-
hive.as_ref(),
416+
hive,
403417
APP_NAME,
404418
cluster_resources
405419
.get_required_labels()
@@ -416,15 +430,15 @@ pub async fn reconcile_hive(hive: Arc<HiveCluster>, ctx: Arc<Ctx>) -> Result<Act
416430
.await
417431
.context(ApplyRoleBindingSnafu)?;
418432

419-
let metastore_role_service = build_metastore_role_service(&hive, &resolved_product_image)?;
433+
let metastore_role_service = build_metastore_role_service(hive, &resolved_product_image)?;
420434

421435
// we have to get the assigned ports
422436
let metastore_role_service = cluster_resources
423437
.add(client, metastore_role_service)
424438
.await
425439
.context(ApplyRoleServiceSnafu)?;
426440

427-
let vector_aggregator_address = resolve_vector_aggregator_address(&hive, client)
441+
let vector_aggregator_address = resolve_vector_aggregator_address(hive, client)
428442
.await
429443
.context(ResolveVectorAggregatorAddressSnafu)?;
430444

@@ -437,9 +451,9 @@ pub async fn reconcile_hive(hive: Arc<HiveCluster>, ctx: Arc<Ctx>) -> Result<Act
437451
.merged_config(&HiveRole::MetaStore, &rolegroup)
438452
.context(FailedToResolveResourceConfigSnafu)?;
439453

440-
let rg_service = build_rolegroup_service(&hive, &resolved_product_image, &rolegroup)?;
454+
let rg_service = build_rolegroup_service(hive, &resolved_product_image, &rolegroup)?;
441455
let rg_configmap = build_metastore_rolegroup_config_map(
442-
&hive,
456+
hive,
443457
&hive_namespace,
444458
&resolved_product_image,
445459
&rolegroup,
@@ -449,7 +463,7 @@ pub async fn reconcile_hive(hive: Arc<HiveCluster>, ctx: Arc<Ctx>) -> Result<Act
449463
vector_aggregator_address.as_deref(),
450464
)?;
451465
let rg_statefulset = build_metastore_rolegroup_statefulset(
452-
&hive,
466+
hive,
453467
&hive_role,
454468
&resolved_product_image,
455469
&rolegroup,
@@ -488,7 +502,7 @@ pub async fn reconcile_hive(hive: Arc<HiveCluster>, ctx: Arc<Ctx>) -> Result<Act
488502
pod_disruption_budget: pdb,
489503
}) = role_config
490504
{
491-
add_pdbs(pdb, &hive, &hive_role, client, &mut cluster_resources)
505+
add_pdbs(pdb, hive, &hive_role, client, &mut cluster_resources)
492506
.await
493507
.context(FailedToCreatePdbSnafu)?;
494508
}
@@ -498,8 +512,8 @@ pub async fn reconcile_hive(hive: Arc<HiveCluster>, ctx: Arc<Ctx>) -> Result<Act
498512
let mut discovery_hash = FnvHasher::with_key(0);
499513
for discovery_cm in discovery::build_discovery_configmaps(
500514
client,
501-
&*hive,
502-
&hive,
515+
hive,
516+
hive,
503517
&resolved_product_image,
504518
&metastore_role_service,
505519
None,
@@ -523,14 +537,11 @@ pub async fn reconcile_hive(hive: Arc<HiveCluster>, ctx: Arc<Ctx>) -> Result<Act
523537
// Serialize as a string to discourage users from trying to parse the value,
524538
// and to keep things flexible if we end up changing the hasher at some point.
525539
discovery_hash: Some(discovery_hash.finish().to_string()),
526-
conditions: compute_conditions(
527-
hive.as_ref(),
528-
&[&ss_cond_builder, &cluster_operation_cond_builder],
529-
),
540+
conditions: compute_conditions(hive, &[&ss_cond_builder, &cluster_operation_cond_builder]),
530541
};
531542

532543
client
533-
.apply_patch_status(OPERATOR_NAME, &*hive, &status)
544+
.apply_patch_status(OPERATOR_NAME, hive, &status)
534545
.await
535546
.context(ApplyStatusSnafu)?;
536547

@@ -1117,8 +1128,16 @@ fn env_var_from_secret(var_name: &str, secret: &str, secret_key: &str) -> EnvVar
11171128
}
11181129
}
11191130

1120-
pub fn error_policy(_obj: Arc<HiveCluster>, _error: &Error, _ctx: Arc<Ctx>) -> Action {
1121-
Action::requeue(*Duration::from_secs(5))
1131+
pub fn error_policy(
1132+
_obj: Arc<DeserializeGuard<HiveCluster>>,
1133+
error: &Error,
1134+
_ctx: Arc<Ctx>,
1135+
) -> Action {
1136+
match error {
1137+
// An invalid HBaseCluster was deserialized. Await for it to change.
1138+
Error::InvalidHiveCluster { .. } => Action::await_change(),
1139+
_ => Action::requeue(*Duration::from_secs(5)),
1140+
}
11221141
}
11231142

11241143
pub fn service_ports() -> Vec<ServicePort> {

rust/operator-binary/src/main.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ use stackable_operator::{
1717
apps::v1::StatefulSet,
1818
core::v1::{ConfigMap, Service},
1919
},
20+
kube::core::DeserializeGuard,
2021
kube::runtime::{watcher, Controller},
2122
logging::controller::report_controller_reconciled,
2223
CustomResourceExt,
@@ -70,7 +71,7 @@ async fn main() -> anyhow::Result<()> {
7071
.await?;
7172

7273
Controller::new(
73-
watch_namespace.get_api::<HiveCluster>(&client),
74+
watch_namespace.get_api::<DeserializeGuard<HiveCluster>>(&client),
7475
watcher::Config::default(),
7576
)
7677
.owns(

0 commit comments

Comments
 (0)