diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0b8394caf..b24f9d3f4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -82,6 +82,7 @@ All notable changes to this project will be documented in this file.
- vector: Bump to `0.47.0` ([#1152]).
- zookeeper: backport ZOOKEEPER-4846, ZOOKEEPER-4921, ZOOKEEPER-4925 into Zookeeper 3.9.3 ([#1150]).
- testing-tools: Update base image ([#1165]).
+- use custom versions for patched components ([#1068]).
### Fixed
@@ -146,6 +147,7 @@ All notable changes to this project will be documented in this file.
[#1056]: https://github.com/stackabletech/docker-images/pull/1056
[#1058]: https://github.com/stackabletech/docker-images/pull/1058
[#1060]: https://github.com/stackabletech/docker-images/pull/1060
+[#1068]: https://github.com/stackabletech/docker-images/pull/1068
[#1090]: https://github.com/stackabletech/docker-images/pull/1090
[#1091]: https://github.com/stackabletech/docker-images/pull/1091
[#1093]: https://github.com/stackabletech/docker-images/pull/1093
diff --git a/conf.py b/conf.py
index 2840a9511..d4ebcfd71 100644
--- a/conf.py
+++ b/conf.py
@@ -14,6 +14,7 @@
druid = importlib.import_module("druid.versions")
hadoop = importlib.import_module("hadoop.versions")
hbase = importlib.import_module("hbase.versions")
+hbase_jars = importlib.import_module("hbase.hbase.versions")
hbase_phoenix = importlib.import_module("hbase.phoenix.versions")
hbase_opa_authorizer = importlib.import_module("hbase.hbase-opa-authorizer.versions")
hbase_operator_tools = importlib.import_module("hbase.hbase-operator-tools.versions")
@@ -33,6 +34,7 @@
superset = importlib.import_module("superset.versions")
trino_cli = importlib.import_module("trino-cli.versions")
trino = importlib.import_module("trino.versions")
+trino_jars = importlib.import_module("trino.trino.versions")
trino_storage_connector = importlib.import_module("trino.storage-connector.versions")
kafka_testing_tools = importlib.import_module("kafka-testing-tools.versions")
kcat = importlib.import_module("kafka.kcat.versions")
@@ -47,6 +49,7 @@
{"name": "druid", "versions": druid.versions},
{"name": "hadoop", "versions": hadoop.versions},
{"name": "hbase", "versions": hbase.versions},
+ {"name": "hbase/hbase", "versions": hbase_jars.versions},
{"name": "hbase/phoenix", "versions": hbase_phoenix.versions},
{"name": "hbase/hbase-opa-authorizer", "versions": hbase_opa_authorizer.versions},
{"name": "hbase/hbase-operator-tools", "versions": hbase_operator_tools.versions},
@@ -66,6 +69,7 @@
{"name": "superset", "versions": superset.versions},
{"name": "trino-cli", "versions": trino_cli.versions},
{"name": "trino", "versions": trino.versions},
+ {"name": "trino/trino", "versions": trino_jars.versions},
{"name": "trino/storage-connector", "versions": trino_storage_connector.versions},
{"name": "kafka-testing-tools", "versions": kafka_testing_tools.versions},
{"name": "kafka/kcat", "versions": kcat.versions},
diff --git a/druid/Dockerfile b/druid/Dockerfile
index d8dd84f4f..ba5638b9f 100644
--- a/druid/Dockerfile
+++ b/druid/Dockerfile
@@ -1,14 +1,18 @@
# syntax=docker/dockerfile:1.16.0@sha256:e2dd261f92e4b763d789984f6eab84be66ab4f5f08052316d8eb8f173593acf7
# check=error=true
+FROM stackable/image/hadoop AS hadoop-builder
+
FROM stackable/image/java-devel AS druid-builder
ARG PRODUCT
+ARG RELEASE
ARG JACKSON_DATAFORMAT_XML
ARG STAX2_API
ARG WOODSTOX_CORE
ARG AUTHORIZER
ARG STACKABLE_USER_UID
+ARG HADOOP
# Setting this to anything other than "true" will keep the cache folders around (e.g. for Maven, NPM etc.)
# This can be used to speed up builds when disk space is of no concern.
@@ -35,6 +39,7 @@ WORKDIR /stackable
COPY --chown=${STACKABLE_USER_UID}:0 druid/stackable/patches/patchable.toml /stackable/src/druid/stackable/patches/patchable.toml
COPY --chown=${STACKABLE_USER_UID}:0 druid/stackable/patches/${PRODUCT} /stackable/src/druid/stackable/patches/${PRODUCT}
+COPY --from=hadoop-builder --chown=${STACKABLE_USER_UID}:0 /stackable/patched-libs /stackable/patched-libs
# Cache mounts are owned by root by default
# We need to explicitly give the uid to use which is hardcoded to "1000" in stackable-base
# The cache id has to include the product version that we are building because otherwise
@@ -54,24 +59,33 @@ RUN --mount=type=cache,id=maven-${PRODUCT},uid=${STACKABLE_USER_UID},target=/sta
cd "$(cat /tmp/DRUID_SOURCE_DIR)" || exit 1
rm /tmp/DRUID_SOURCE_DIR
+ORIGINAL_VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout)
+NEW_VERSION="${PRODUCT}-stackable${RELEASE}"
+
+mvn versions:set -DnewVersion=$NEW_VERSION
+
+# Make Maven aware of custom Stackable libraries
+cp -r /stackable/patched-libs/maven/* /stackable/.m2/repository
+
# Create snapshot of the source code including custom patches
-tar -czf /stackable/druid-${PRODUCT}-src.tar.gz .
+tar -czf /stackable/druid-${NEW_VERSION}-src.tar.gz .
mvn \
--batch-mode \
--no-transfer-progress \
clean install \
-Pdist,stackable-bundle-contrib-exts \
- -Dmaven.test.skip `# Skip test compilation` \
+ -Dhadoop.compile.version=${HADOOP}-stackable${RELEASE} \
-DskipTests `# Skip test execution` \
-Dcheckstyle.skip `# Skip checkstyle checks. We dont care if the code is properly formatted, it just wastes time` \
-Dmaven.javadoc.skip=true `# Dont generate javadoc` \
-Dmaven.gitcommitid.skip=true `# The gitcommitid plugin cannot work with git workspaces (ie: patchable)` \
$(if [[ ${PRODUCT} != 30.* ]]; then echo --projects '!quidem-ut'; fi) `# This is just a maven module for tests. https://github.com/apache/druid/pull/16867 added https://raw.githubusercontent.com/kgyrtkirk/datasets/repo/ as a Maven repository, which fails to pull for us (Failed to execute goal on project druid-quidem-ut: Could not resolve dependencies for project org.apache.druid:druid-quidem-ut:jar:33.0.0: com.github.kgyrtkirk.datasets:kttm-nested:jar:0.1 was not found in https://build-repo.stackable.tech/repository/maven-public/). By disabling the maven module we dont pull in this weird dependency...`
-mv distribution/target/apache-druid-${PRODUCT}-bin/apache-druid-${PRODUCT} /stackable/
-mv distribution/target/bom.json /stackable/apache-druid-${PRODUCT}/apache-druid-${PRODUCT}.cdx.json
-rm -rf /stackable/apache-druid-${PRODUCT}-src
+mv distribution/target/apache-druid-${NEW_VERSION}-bin/apache-druid-${NEW_VERSION} /stackable/
+sed -i "s/${NEW_VERSION}/${ORIGINAL_VERSION}/g" distribution/target/bom.json
+mv distribution/target/bom.json /stackable/apache-druid-${NEW_VERSION}/apache-druid-${NEW_VERSION}.cdx.json
+rm -rf /stackable/apache-druid-${NEW_VERSION}-src
# We're removing these to make the intermediate layer smaller
# This can be necessary even though it's only a builder image because the GitHub Action Runners only have very limited space available
@@ -83,11 +97,11 @@ if [ "${DELETE_CACHES}" = "true" ] ; then
rm -rf /stackable/.cache/*
fi
-# Do not remove the /stackable/apache-druid-${PRODUCT}/quickstart folder, it is needed for loading the Wikipedia
+# Do not remove the /stackable/apache-druid-${NEW_VERSION}/quickstart folder, it is needed for loading the Wikipedia
# testdata in kuttl tests and the getting started guide.
# Install OPA authorizer extension.
-curl "https://repo.stackable.tech/repository/packages/druid/druid-opa-authorizer-${AUTHORIZER}.tar.gz" | tar -xzC /stackable/apache-druid-${PRODUCT}/extensions
+curl "https://repo.stackable.tech/repository/packages/druid/druid-opa-authorizer-${AUTHORIZER}.tar.gz" | tar -xzC /stackable/apache-druid-${NEW_VERSION}/extensions
# change groups
chmod -R g=u /stackable
@@ -122,8 +136,8 @@ LABEL io.k8s.description="${DESCRIPTION}"
LABEL io.k8s.display-name="${NAME}"
-COPY --chown=${STACKABLE_USER_UID}:0 --from=druid-builder /stackable/apache-druid-${PRODUCT} /stackable/apache-druid-${PRODUCT}
-COPY --chown=${STACKABLE_USER_UID}:0 --from=druid-builder /stackable/druid-${PRODUCT}-src.tar.gz /stackable
+COPY --chown=${STACKABLE_USER_UID}:0 --from=druid-builder /stackable/apache-druid-${PRODUCT}-stackable${RELEASE} /stackable/apache-druid-${PRODUCT}-stackable${RELEASE}
+COPY --chown=${STACKABLE_USER_UID}:0 --from=druid-builder /stackable/druid-${PRODUCT}-stackable${RELEASE}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 druid/stackable/bin /stackable/bin
COPY --chown=${STACKABLE_USER_UID}:0 druid/licenses /licenses
@@ -136,7 +150,7 @@ chown ${STACKABLE_USER_UID}:0 /stackable/package_manifest.txt
chmod g=u /stackable/package_manifest.txt
rm -rf /var/cache/yum
-ln -sf /stackable/apache-druid-${PRODUCT} /stackable/druid
+ln -sf /stackable/apache-druid-${PRODUCT}-stackable${RELEASE} /stackable/druid
chown -h ${STACKABLE_USER_UID}:0 stackable/druid
# Force to overwrite the existing 'run-druid'
@@ -145,7 +159,7 @@ chown -h ${STACKABLE_USER_UID}:0 /stackable/druid/bin/run-druid
# fix missing permissions
chmod -R g=u /stackable/bin
-chmod g=u /stackable/apache-druid-${PRODUCT}
+chmod g=u /stackable/apache-druid-${PRODUCT}-stackable${RELEASE} /stackable/druid-${PRODUCT}-stackable${RELEASE}-src.tar.gz
EOF
# ----------------------------------------
diff --git a/druid/stackable/patches/30.0.1/0010-Fix-CVE-2023-34455.patch b/druid/stackable/patches/30.0.1/0010-Fix-CVE-2023-34455.patch
deleted file mode 100644
index fab4b0f0d..000000000
--- a/druid/stackable/patches/30.0.1/0010-Fix-CVE-2023-34455.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From f246bea0ec12b167b4fb49dcf775527429715f77 Mon Sep 17 00:00:00 2001
-From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com>
-Date: Tue, 28 Jan 2025 17:29:59 +0100
-Subject: Fix CVE-2023-34455
-
-see https://github.com/stackabletech/vulnerabilities/issues/558
-
-At the end of build process, Druid downloads dependencies directly from a remote
-Maven repository ignoring existing patches that have been applyed locally.
-These dependencies include all transitive dependencies too.
-The hadoop client depends on a vulnerable version of the snappy library which
-is then also downloaded even though a newer version is already on the system.
-
-This patch removes the vulnerable jars.
----
- distribution/pom.xml | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/distribution/pom.xml b/distribution/pom.xml
-index 08b4121287..ba08137c26 100644
---- a/distribution/pom.xml
-+++ b/distribution/pom.xml
-@@ -259,6 +259,20 @@
-
-
-
-+
-+ fix-cve-2023-34455-remove-snappy
-+ package
-+
-+ exec
-+
-+
-+ /usr/bin/rm
-+
-+ ${project.build.directory}/hadoop-dependencies/hadoop-client-api/3.3.6/snappy-java-1.1.8.2.jar
-+ ${project.build.directory}/hadoop-dependencies/hadoop-client-runtime/3.3.6/snappy-java-1.1.8.2.jar
-+
-+
-+
-
-
-
diff --git a/druid/stackable/patches/31.0.1/0010-Fix-CVE-2023-34455.patch b/druid/stackable/patches/31.0.1/0010-Fix-CVE-2023-34455.patch
deleted file mode 100644
index c69d2f85e..000000000
--- a/druid/stackable/patches/31.0.1/0010-Fix-CVE-2023-34455.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 90f6dd1211a4d4ced8b3a75b7549b1e68e4b6ee6 Mon Sep 17 00:00:00 2001
-From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com>
-Date: Tue, 28 Jan 2025 17:29:59 +0100
-Subject: Fix CVE-2023-34455
-
-see https://github.com/stackabletech/vulnerabilities/issues/558
-
-At the end of build process, Druid downloads dependencies directly from a remote
-Maven repository ignoring existing patches that have been applyed locally.
-These dependencies include all transitive dependencies too.
-The hadoop client depends on a vulnerable version of the snappy library which
-is then also downloaded even though a newer version is already on the system.
-
-This patch removes the vulnerable jars.
----
- distribution/pom.xml | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/distribution/pom.xml b/distribution/pom.xml
-index a28e34bb6a..4ab7837538 100644
---- a/distribution/pom.xml
-+++ b/distribution/pom.xml
-@@ -259,6 +259,20 @@
-
-
-
-+
-+ fix-cve-2023-34455-remove-snappy
-+ package
-+
-+ exec
-+
-+
-+ /usr/bin/rm
-+
-+ ${project.build.directory}/hadoop-dependencies/hadoop-client-api/3.3.6/snappy-java-1.1.8.2.jar
-+ ${project.build.directory}/hadoop-dependencies/hadoop-client-runtime/3.3.6/snappy-java-1.1.8.2.jar
-+
-+
-+
-
-
-
diff --git a/druid/stackable/patches/33.0.0/0010-Fix-CVE-2023-34455.patch b/druid/stackable/patches/33.0.0/0010-Fix-CVE-2023-34455.patch
deleted file mode 100644
index 8d6b57ebb..000000000
--- a/druid/stackable/patches/33.0.0/0010-Fix-CVE-2023-34455.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From cd10ee4dc6abb7131f28dbf6e8aceed4af2bc7f8 Mon Sep 17 00:00:00 2001
-From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com>
-Date: Tue, 28 Jan 2025 17:29:59 +0100
-Subject: Fix CVE-2023-34455
-
-see https://github.com/stackabletech/vulnerabilities/issues/558
-
-At the end of build process, Druid downloads dependencies directly from a remote
-Maven repository ignoring existing patches that have been applyed locally.
-These dependencies include all transitive dependencies too.
-The hadoop client depends on a vulnerable version of the snappy library which
-is then also downloaded even though a newer version is already on the system.
-
-This patch removes the vulnerable jars.
----
- distribution/pom.xml | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/distribution/pom.xml b/distribution/pom.xml
-index c8b7e13054..f93af34434 100644
---- a/distribution/pom.xml
-+++ b/distribution/pom.xml
-@@ -261,6 +261,20 @@
-
-
-
-+
-+ fix-cve-2023-34455-remove-snappy
-+ package
-+
-+ exec
-+
-+
-+ /usr/bin/rm
-+
-+ ${project.build.directory}/hadoop-dependencies/hadoop-client-api/3.3.6/snappy-java-1.1.8.2.jar
-+ ${project.build.directory}/hadoop-dependencies/hadoop-client-runtime/3.3.6/snappy-java-1.1.8.2.jar
-+
-+
-+
-
-
-
diff --git a/druid/versions.py b/druid/versions.py
index c881852b7..323868f1c 100644
--- a/druid/versions.py
+++ b/druid/versions.py
@@ -4,6 +4,7 @@
# https://druid.apache.org/docs/30.0.1/operations/java/
"java-base": "17",
"java-devel": "17",
+ "hadoop": "3.3.6",
"authorizer": "0.7.0",
},
{
@@ -11,6 +12,7 @@
# https://druid.apache.org/docs/31.0.1/operations/java/
"java-base": "17",
"java-devel": "17",
+ "hadoop": "3.3.6",
"authorizer": "0.7.0",
},
{
@@ -18,6 +20,7 @@
# https://druid.apache.org/docs/33.0.0/operations/java/
"java-base": "17",
"java-devel": "17",
+ "hadoop": "3.3.6",
"authorizer": "0.7.0",
},
]
diff --git a/hadoop/Dockerfile b/hadoop/Dockerfile
index 36ac0f903..3997fb1fb 100644
--- a/hadoop/Dockerfile
+++ b/hadoop/Dockerfile
@@ -4,6 +4,7 @@
FROM stackable/image/java-devel AS hadoop-builder
ARG PRODUCT
+ARG RELEASE
ARG ASYNC_PROFILER
ARG JMX_EXPORTER
ARG PROTOBUF
@@ -66,6 +67,7 @@ COPY --chown=${STACKABLE_USER_UID}:0 hadoop/stackable/patches/patchable.toml /bu
COPY --chown=${STACKABLE_USER_UID}:0 hadoop/stackable/patches/${PRODUCT} /build/src/hadoop/stackable/patches/${PRODUCT}
COPY --chown=${STACKABLE_USER_UID}:0 hadoop/stackable/fuse_dfs_wrapper /build
COPY --chown=${STACKABLE_USER_UID}:0 hadoop/stackable/jmx /stackable/jmx
+USER ${STACKABLE_USER_UID}
# Hadoop Pipes requires libtirpc to build, whose headers are not packaged in RedHat UBI, so skip building this module
# Build from source to enable FUSE module, and to apply custom patches.
# Also skip building the yarn, mapreduce and minicluster modules: this will result in the modules being excluded but not all
@@ -74,27 +76,42 @@ COPY --chown=${STACKABLE_USER_UID}:0 hadoop/stackable/jmx /stackable/jmx
RUN <hadoop-pipes<\/artifactId>/,/<\/dependency>/ { s/.*<\/version>/'"$ORIGINAL_VERSION"'<\/version>/ }' -i hadoop-tools/hadoop-tools-dist/pom.xml
+
# Create snapshot of the source code including custom patches
-tar -czf /stackable/hadoop-${PRODUCT}-src.tar.gz .
+tar -czf /stackable/hadoop-${NEW_VERSION}-src.tar.gz .
mvn \
--batch-mode \
--no-transfer-progress \
- clean package \
+ clean package install \
-Pdist,native \
- -pl '!hadoop-tools/hadoop-pipes,!hadoop-yarn-project,!hadoop-mapreduce-project,!hadoop-minicluster' \
+ -pl '!hadoop-tools/hadoop-pipes' \
+ -Dhadoop.version=${NEW_VERSION} \
-Drequire.fuse=true \
-DskipTests \
-Dmaven.javadoc.skip=true
-cp -r hadoop-dist/target/hadoop-${PRODUCT} /stackable/hadoop-${PRODUCT}
-mv hadoop-dist/target/bom.json /stackable/hadoop-${PRODUCT}/hadoop-${PRODUCT}.cdx.json
+mkdir -p /stackable/patched-libs/maven/org/apache
+cp -r /stackable/.m2/repository/org/apache/hadoop /stackable/patched-libs/maven/org/apache
+
+cp -r hadoop-dist/target/hadoop-${NEW_VERSION} /stackable/hadoop-${NEW_VERSION}
+sed -i "s/${NEW_VERSION}/${ORIGINAL_VERSION}/g" hadoop-dist/target/bom.json
+mv hadoop-dist/target/bom.json /stackable/hadoop-${NEW_VERSION}/hadoop-${NEW_VERSION}.cdx.json
# HDFS fuse-dfs is not part of the regular dist output, so we need to copy it in ourselves
-cp hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/fuse_dfs /stackable/hadoop-${PRODUCT}/bin
-rm -rf /build/hadoop-${PRODUCT}-src
+cp hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/fuse_dfs /stackable/hadoop-${NEW_VERSION}/bin
+
+# Remove source code
+(cd .. && rm -r ${PRODUCT})
-ln -s /stackable/hadoop-${PRODUCT} /stackable/hadoop
+ln -s /stackable/hadoop-${NEW_VERSION} /stackable/hadoop
mv /build/fuse_dfs_wrapper /stackable/hadoop/bin
@@ -109,9 +126,9 @@ rm -rf /stackable/hadoop/share/hadoop/tools/sources/
rm -rf /stackable/hadoop/share/hadoop/tools/lib/json-io-*.jar
rm -rf /stackable/hadoop/share/hadoop/tools/lib/hadoop-mapreduce-client-*.jar
rm -rf /stackable/hadoop/share/hadoop/tools/lib/hadoop-yarn-server*.jar
-find /stackable -name 'hadoop-minicluster-*.jar' -type f -delete
-find /stackable -name 'hadoop-client-minicluster-*.jar' -type f -delete
-find /stackable -name 'hadoop-*tests.jar' -type f -delete
+find /stackable/hadoop -name 'hadoop-minicluster-*.jar' -type f -delete
+find /stackable/hadoop -name 'hadoop-client-minicluster-*.jar' -type f -delete
+find /stackable/hadoop -name 'hadoop-*tests.jar' -type f -delete
rm -rf /stackable/.m2
# Set correct groups; make sure only required artifacts for the final image are located in /stackable
@@ -187,7 +204,7 @@ LABEL \
description="This image is deployed by the Stackable Operator for Apache Hadoop / HDFS."
COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-builder /stackable /stackable
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hdfs-utils-builder /stackable/hdfs-utils-${HDFS_UTILS}.jar /stackable/hadoop-${PRODUCT}/share/hadoop/common/lib/hdfs-utils-${HDFS_UTILS}.jar
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hdfs-utils-builder /stackable/hdfs-utils-${HDFS_UTILS}.jar /stackable/hadoop-${PRODUCT}-stackable${RELEASE}/share/hadoop/common/lib/hdfs-utils-${HDFS_UTILS}.jar
COPY --chown=${STACKABLE_USER_UID}:0 --from=hdfs-utils-builder /stackable/hdfs-utils-${HDFS_UTILS}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 hadoop/licenses /licenses
diff --git a/hadoop/stackable/patches/3.3.6/0007-Bump-Snappy-version-to-fix-CVEs.patch b/hadoop/stackable/patches/3.3.6/0006-Bump-Snappy-version-to-fix-CVEs.patch
similarity index 92%
rename from hadoop/stackable/patches/3.3.6/0007-Bump-Snappy-version-to-fix-CVEs.patch
rename to hadoop/stackable/patches/3.3.6/0006-Bump-Snappy-version-to-fix-CVEs.patch
index a6711920a..685784e11 100644
--- a/hadoop/stackable/patches/3.3.6/0007-Bump-Snappy-version-to-fix-CVEs.patch
+++ b/hadoop/stackable/patches/3.3.6/0006-Bump-Snappy-version-to-fix-CVEs.patch
@@ -1,4 +1,4 @@
-From 8cd8cdc424ff7cf410fb84941fd6d7777ec91913 Mon Sep 17 00:00:00 2001
+From 5ac18a1341bd8412c4f9cb40cef3aa13581d47a3 Mon Sep 17 00:00:00 2001
From: Andrew Kenworthy
Date: Thu, 16 May 2024 16:44:14 +0200
Subject: Bump Snappy version to fix CVEs
diff --git a/hadoop/stackable/patches/3.3.6/0006-HDFS-17378-Fix-missing-operationType-for-some-operat.patch b/hadoop/stackable/patches/3.3.6/0006-HDFS-17378-Fix-missing-operationType-for-some-operat.patch
deleted file mode 100644
index 2e3e33690..000000000
--- a/hadoop/stackable/patches/3.3.6/0006-HDFS-17378-Fix-missing-operationType-for-some-operat.patch
+++ /dev/null
@@ -1,201 +0,0 @@
-From 36ed6731ce3afa4ccacb40c1c82dfc81e0e80483 Mon Sep 17 00:00:00 2001
-From: Sebastian Bernauer
-Date: Thu, 15 Feb 2024 15:33:43 +0100
-Subject: HDFS-17378: Fix missing operationType for some operations in
- authorizer
-
----
- .../hdfs/server/namenode/FSNamesystem.java | 41 +++++++++++--------
- 1 file changed, 24 insertions(+), 17 deletions(-)
-
-diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
-index 9855b434e9..b3781ee1dd 100644
---- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
-+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
-@@ -2530,15 +2530,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- * @throws IOException
- */
- BlockStoragePolicy getStoragePolicy(String src) throws IOException {
-+ final String operationName = "getStoragePolicy";
- checkOperation(OperationCategory.READ);
- final FSPermissionChecker pc = getPermissionChecker();
-- FSPermissionChecker.setOperationType(null);
-+ FSPermissionChecker.setOperationType(operationName);
- readLock();
- try {
- checkOperation(OperationCategory.READ);
- return FSDirAttrOp.getStoragePolicy(dir, pc, blockManager, src);
- } finally {
-- readUnlock("getStoragePolicy");
-+ readUnlock(operationName);
- }
- }
-
-@@ -2558,15 +2559,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- }
-
- long getPreferredBlockSize(String src) throws IOException {
-+ final String operationName = "getPreferredBlockSize";
- checkOperation(OperationCategory.READ);
- final FSPermissionChecker pc = getPermissionChecker();
-- FSPermissionChecker.setOperationType(null);
-+ FSPermissionChecker.setOperationType(operationName);
- readLock();
- try {
- checkOperation(OperationCategory.READ);
- return FSDirAttrOp.getPreferredBlockSize(dir, pc, src);
- } finally {
-- readUnlock("getPreferredBlockSize");
-+ readUnlock(operationName);
- }
- }
-
-@@ -2619,7 +2621,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- boolean createParent, short replication, long blockSize,
- CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
- String storagePolicy, boolean logRetryCache) throws IOException {
--
- HdfsFileStatus status;
- try {
- status = startFileInt(src, permissions, holder, clientMachine, flag,
-@@ -2639,6 +2640,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- long blockSize, CryptoProtocolVersion[] supportedVersions,
- String ecPolicyName, String storagePolicy, boolean logRetryCache)
- throws IOException {
-+ final String operationName = "create";
- if (NameNode.stateChangeLog.isDebugEnabled()) {
- StringBuilder builder = new StringBuilder();
- builder.append("DIR* NameSystem.startFile: src=").append(src)
-@@ -2676,7 +2678,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
-
- checkOperation(OperationCategory.WRITE);
- final FSPermissionChecker pc = getPermissionChecker();
-- FSPermissionChecker.setOperationType(null);
-+ FSPermissionChecker.setOperationType(operationName);
- writeLock();
- try {
- checkOperation(OperationCategory.WRITE);
-@@ -2740,7 +2742,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- dir.writeUnlock();
- }
- } finally {
-- writeUnlock("create");
-+ writeUnlock(operationName);
- // There might be transactions logged while trying to recover the lease.
- // They need to be sync'ed even when an exception was thrown.
- if (!skipSync) {
-@@ -2769,10 +2771,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- */
- boolean recoverLease(String src, String holder, String clientMachine)
- throws IOException {
-+ final String operationName = "recoverLease";
- boolean skipSync = false;
- checkOperation(OperationCategory.WRITE);
- final FSPermissionChecker pc = getPermissionChecker();
-- FSPermissionChecker.setOperationType(null);
-+ FSPermissionChecker.setOperationType(operationName);
- writeLock();
- try {
- checkOperation(OperationCategory.WRITE);
-@@ -2793,7 +2796,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- skipSync = true;
- throw se;
- } finally {
-- writeUnlock("recoverLease");
-+ writeUnlock(operationName);
- // There might be transactions logged while trying to recover the lease.
- // They need to be sync'ed even when an exception was thrown.
- if (!skipSync) {
-@@ -3010,6 +3013,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- final Set excludes,
- final int numAdditionalNodes, final String clientName
- ) throws IOException {
-+ final String operationName = "getAdditionalDatanode";
- //check if the feature is enabled
- dtpReplaceDatanodeOnFailure.checkEnabled();
-
-@@ -3021,7 +3025,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- final BlockType blockType;
- checkOperation(OperationCategory.WRITE);
- final FSPermissionChecker pc = getPermissionChecker();
-- FSPermissionChecker.setOperationType(null);
-+ FSPermissionChecker.setOperationType(operationName);
- readLock();
- try {
- // Changing this operation category to WRITE instead of making getAdditionalDatanode as a
-@@ -3047,7 +3051,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s",
- src, fileId, blk, clientName, clientMachine));
- } finally {
-- readUnlock("getAdditionalDatanode");
-+ readUnlock(operationName);
- }
-
- if (clientnode == null) {
-@@ -3069,11 +3073,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- */
- void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
- throws IOException {
-+ final String operationName = "abandonBlock";
- NameNode.stateChangeLog.debug(
- "BLOCK* NameSystem.abandonBlock: {} of file {}", b, src);
- checkOperation(OperationCategory.WRITE);
- final FSPermissionChecker pc = getPermissionChecker();
-- FSPermissionChecker.setOperationType(null);
-+ FSPermissionChecker.setOperationType(operationName);
- writeLock();
- try {
- checkOperation(OperationCategory.WRITE);
-@@ -3082,7 +3087,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: {} is " +
- "removed from pendingCreates", b);
- } finally {
-- writeUnlock("abandonBlock");
-+ writeUnlock(operationName);
- }
- getEditLog().logSync();
- }
-@@ -3136,10 +3141,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- boolean completeFile(final String src, String holder,
- ExtendedBlock last, long fileId)
- throws IOException {
-+ final String operationName = "completeFile";
- boolean success = false;
- checkOperation(OperationCategory.WRITE);
- final FSPermissionChecker pc = getPermissionChecker();
-- FSPermissionChecker.setOperationType(null);
-+ FSPermissionChecker.setOperationType(operationName);
- writeLock();
- try {
- checkOperation(OperationCategory.WRITE);
-@@ -3147,7 +3153,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- success = FSDirWriteFileOp.completeFile(this, pc, src, holder, last,
- fileId);
- } finally {
-- writeUnlock("completeFile");
-+ writeUnlock(operationName);
- }
- getEditLog().logSync();
- if (success) {
-@@ -3572,10 +3578,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- */
- void fsync(String src, long fileId, String clientName, long lastBlockLength)
- throws IOException {
-+ final String operationName = "fsync";
- NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
- checkOperation(OperationCategory.WRITE);
- final FSPermissionChecker pc = getPermissionChecker();
-- FSPermissionChecker.setOperationType(null);
-+ FSPermissionChecker.setOperationType(operationName);
- writeLock();
- try {
- checkOperation(OperationCategory.WRITE);
-@@ -3589,7 +3596,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- }
- FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, false);
- } finally {
-- writeUnlock("fsync");
-+ writeUnlock(operationName);
- }
- getEditLog().logSync();
- }
diff --git a/hadoop/stackable/patches/3.3.6/0008-Update-CycloneDX-plugin.patch b/hadoop/stackable/patches/3.3.6/0007-Update-CycloneDX-plugin.patch
similarity index 96%
rename from hadoop/stackable/patches/3.3.6/0008-Update-CycloneDX-plugin.patch
rename to hadoop/stackable/patches/3.3.6/0007-Update-CycloneDX-plugin.patch
index ef27fb2a4..600065bbb 100644
--- a/hadoop/stackable/patches/3.3.6/0008-Update-CycloneDX-plugin.patch
+++ b/hadoop/stackable/patches/3.3.6/0007-Update-CycloneDX-plugin.patch
@@ -1,4 +1,4 @@
-From bb767718387bcc1c49e5780e5d1a7a79fde99f15 Mon Sep 17 00:00:00 2001
+From 2011e9efa3324ca33bb616c752010e7afe7c6e9b Mon Sep 17 00:00:00 2001
From: Lukas Voetmand
Date: Fri, 6 Sep 2024 17:53:52 +0200
Subject: Update CycloneDX plugin
diff --git a/hadoop/stackable/patches/3.3.6/0009-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch b/hadoop/stackable/patches/3.3.6/0008-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch
similarity index 99%
rename from hadoop/stackable/patches/3.3.6/0009-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch
rename to hadoop/stackable/patches/3.3.6/0008-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch
index 41d6c9447..ec03de243 100644
--- a/hadoop/stackable/patches/3.3.6/0009-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch
+++ b/hadoop/stackable/patches/3.3.6/0008-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch
@@ -1,4 +1,4 @@
-From 3864664a22a8c75d79774c77a7c88f5d54085f5d Mon Sep 17 00:00:00 2001
+From 35fe64a26c4061bc55d7ac67e0fbee9f2770686f Mon Sep 17 00:00:00 2001
From: Anuj Modi <128447756+anujmodi2021@users.noreply.github.com>
Date: Fri, 7 Jun 2024 19:03:23 +0530
Subject: HADOOP-18516: [ABFS][Authentication] Support Fixed SAS Token for ABFS
diff --git a/hadoop/stackable/patches/3.3.6/0009-Build-hadoop-client-modules-before-hadoop-dist.patch b/hadoop/stackable/patches/3.3.6/0009-Build-hadoop-client-modules-before-hadoop-dist.patch
new file mode 100644
index 000000000..3f4958a0a
--- /dev/null
+++ b/hadoop/stackable/patches/3.3.6/0009-Build-hadoop-client-modules-before-hadoop-dist.patch
@@ -0,0 +1,25 @@
+From 773274ac10c910e70b73fc0089e8340d3ec2c9c0 Mon Sep 17 00:00:00 2001
+From: dervoeti
+Date: Thu, 10 Apr 2025 11:11:44 +0200
+Subject: Build hadoop-client-modules before hadoop-dist
+
+This is needed, because dist depends on parts of client-modules. At least when specifying a custom version when building Hadoop, Maven for some reason does not build the client-modules before dist and the build fails.
+---
+ pom.xml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/pom.xml b/pom.xml
+index de001775ab..ccb15235c8 100644
+--- a/pom.xml
++++ b/pom.xml
+@@ -133,9 +133,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x
+ hadoop-yarn-project
+ hadoop-mapreduce-project
+ hadoop-tools
++ hadoop-client-modules
+ hadoop-dist
+ hadoop-minicluster
+- hadoop-client-modules
+ hadoop-build-tools
+ hadoop-cloud-storage-project
+
diff --git a/hadoop/stackable/patches/3.3.6/0010-HADOOP-18496.-Upgrade-okhttp3-and-dependencies-due-t.patch b/hadoop/stackable/patches/3.3.6/0010-HADOOP-18496.-Upgrade-okhttp3-and-dependencies-due-t.patch
new file mode 100644
index 000000000..4699d88a2
--- /dev/null
+++ b/hadoop/stackable/patches/3.3.6/0010-HADOOP-18496.-Upgrade-okhttp3-and-dependencies-due-t.patch
@@ -0,0 +1,147 @@
+From 7380a6d8a531bf3191726dcba85ea2d6c166ed59 Mon Sep 17 00:00:00 2001
+From: PJ Fanning
+Date: Sat, 12 Nov 2022 15:14:19 +0100
+Subject: HADOOP-18496. Upgrade okhttp3 and dependencies due to kotlin CVEs
+ (#5035)
+
+Updates okhttp3 and okio so their transitive dependency on Kotlin
+stdlib is free from recent CVEs.
+
+okhttp3:okhttp => 4.10.0
+okio:okio => 3.2.0
+kotlin stdlib => 1.6.20
+
+kotlin CVEs fixed:
+ CVE-2022-24329
+ CVE-2020-29582
+
+Contributed by PJ Fanning.
+---
+ LICENSE-binary | 4 +--
+ .../hadoop-client-runtime/pom.xml | 2 ++
+ hadoop-common-project/hadoop-common/pom.xml | 5 ++++
+ .../hadoop-hdfs-client/pom.xml | 10 +++++++
+ hadoop-project/pom.xml | 28 ++++++++++++++++---
+ 5 files changed, 43 insertions(+), 6 deletions(-)
+
+diff --git a/LICENSE-binary b/LICENSE-binary
+index 834eeb2625..0fe5f74a71 100644
+--- a/LICENSE-binary
++++ b/LICENSE-binary
+@@ -243,8 +243,8 @@ com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
+ com.google.j2objc:j2objc-annotations:1.3
+ com.microsoft.azure:azure-storage:7.0.1
+ com.nimbusds:nimbus-jose-jwt:9.8.1
+-com.squareup.okhttp3:okhttp:4.9.3
+-com.squareup.okio:okio:2.8.0
++com.squareup.okhttp3:okhttp:4.10.0
++com.squareup.okio:okio:3.2.0
+ com.yammer.metrics:metrics-core:2.2.0
+ com.zaxxer:HikariCP-java7:2.4.12
+ commons-beanutils:commons-beanutils:1.9.4
+diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
+index 440bbfcdc0..a44d696dff 100644
+--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
++++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
+@@ -163,6 +163,8 @@
+ org.bouncycastle:*
+
+ org.xerial.snappy:*
++
++ org.jetbrains.kotlin:*
+
+
+
+diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
+index 9c7657b53a..2add8220c5 100644
+--- a/hadoop-common-project/hadoop-common/pom.xml
++++ b/hadoop-common-project/hadoop-common/pom.xml
+@@ -373,6 +373,11 @@
+ mockwebserver
+ test
+
++
++ com.squareup.okio
++ okio-jvm
++ test
++
+
+ dnsjava
+ dnsjava
+diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+index 939da7643a..26475845d7 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
++++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+@@ -37,6 +37,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ com.squareup.okhttp3
+ okhttp
++
++
++ com.squareup.okio
++ okio-jvm
++
++
++
++
++ com.squareup.okio
++ okio-jvm
+
+
+ org.jetbrains.kotlin
+diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
+index da39c1e0ad..78c9bc4e9d 100644
+--- a/hadoop-project/pom.xml
++++ b/hadoop-project/pom.xml
+@@ -135,9 +135,10 @@
+ 2.4.12
+ 10.14.2.0
+ 6.2.1.jre7
+- 4.9.3
+- 1.4.10
+- 1.4.10
++ 4.10.0
++ 3.2.0
++ 1.6.20
++ 1.6.20
+ 1.1
+ 5.2.0
+ 2.9.0
+@@ -234,8 +235,17 @@
+ org.jetbrains.kotlin
+ kotlin-stdlib-common
+
++
++ com.squareup.okio
++ okio-jvm
++
+
+
++
++ com.squareup.okio
++ okio-jvm
++ ${okio.version}
++
+
+ org.jetbrains.kotlin
+ kotlin-stdlib
+@@ -255,8 +265,18 @@
+
+ com.squareup.okhttp3
+ mockwebserver
+- 4.9.3
++ ${okhttp3.version}
+ test
++
++
++ com.squareup.okio
++ okio-jvm
++
++
++ org.jetbrains.kotlin
++ kotlin-stdlib-jdk8
++
++
+
+
+ jdiff
diff --git a/hadoop/stackable/patches/3.3.6/0011-HADOOP-18837.-Upgrade-okio-to-3.4.0-due-to-CVE-2023-.patch b/hadoop/stackable/patches/3.3.6/0011-HADOOP-18837.-Upgrade-okio-to-3.4.0-due-to-CVE-2023-.patch
new file mode 100644
index 000000000..fb6beb09b
--- /dev/null
+++ b/hadoop/stackable/patches/3.3.6/0011-HADOOP-18837.-Upgrade-okio-to-3.4.0-due-to-CVE-2023-.patch
@@ -0,0 +1,37 @@
+From 592af8efd1ff709ddde6534e7cfb77fee6b6b2a9 Mon Sep 17 00:00:00 2001
+From: rohit-kb <115476286+rohit-kb@users.noreply.github.com>
+Date: Tue, 8 Aug 2023 18:07:20 +0530
+Subject: HADOOP-18837. Upgrade okio to 3.4.0 due to CVE-2023-3635. (#5914)
+
+Contributed by Rohit Kumar
+---
+ LICENSE-binary | 2 +-
+ hadoop-project/pom.xml | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/LICENSE-binary b/LICENSE-binary
+index 0fe5f74a71..ce9589a9e4 100644
+--- a/LICENSE-binary
++++ b/LICENSE-binary
+@@ -244,7 +244,7 @@ com.google.j2objc:j2objc-annotations:1.3
+ com.microsoft.azure:azure-storage:7.0.1
+ com.nimbusds:nimbus-jose-jwt:9.8.1
+ com.squareup.okhttp3:okhttp:4.10.0
+-com.squareup.okio:okio:3.2.0
++com.squareup.okio:okio:3.4.0
+ com.yammer.metrics:metrics-core:2.2.0
+ com.zaxxer:HikariCP-java7:2.4.12
+ commons-beanutils:commons-beanutils:1.9.4
+diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
+index 78c9bc4e9d..90b913755b 100644
+--- a/hadoop-project/pom.xml
++++ b/hadoop-project/pom.xml
+@@ -136,7 +136,7 @@
+ 10.14.2.0
+ 6.2.1.jre7
+ 4.10.0
+- 3.2.0
++ 3.4.0
+ 1.6.20
+ 1.6.20
+ 1.1
diff --git a/hadoop/stackable/patches/3.3.6/0012-Remove-Hadoop-benchmark.patch b/hadoop/stackable/patches/3.3.6/0012-Remove-Hadoop-benchmark.patch
new file mode 100644
index 000000000..f3a726de2
--- /dev/null
+++ b/hadoop/stackable/patches/3.3.6/0012-Remove-Hadoop-benchmark.patch
@@ -0,0 +1,21 @@
+From cd8588f77b1f603837a4e6a5686173308849be72 Mon Sep 17 00:00:00 2001
+From: dervoeti
+Date: Tue, 27 May 2025 16:43:48 +0200
+Subject: Remove Hadoop benchmark
+
+---
+ hadoop-tools/pom.xml | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
+index 88b3da867b..08811db902 100644
+--- a/hadoop-tools/pom.xml
++++ b/hadoop-tools/pom.xml
+@@ -50,7 +50,6 @@
+ hadoop-azure-datalake
+ hadoop-aliyun
+ hadoop-fs2img
+- hadoop-benchmark
+
+
+
diff --git a/hbase/Dockerfile b/hbase/Dockerfile
index 0d93e48b5..6f9b650c5 100644
--- a/hbase/Dockerfile
+++ b/hbase/Dockerfile
@@ -3,111 +3,20 @@
FROM stackable/image/hadoop AS hadoop-builder
+FROM stackable/image/hbase/hbase AS hbase-builder
+
FROM stackable/image/hbase/phoenix AS phoenix
FROM stackable/image/hbase/hbase-operator-tools AS hbase-operator-tools
FROM stackable/image/hbase/hbase-opa-authorizer AS hbase-opa-authorizer
-FROM stackable/image/java-devel AS hbase-builder
-
-ARG PRODUCT
-ARG HBASE_HBASE_OPERATOR_TOOLS
-ARG ASYNC_PROFILER
-ARG HBASE_PROFILE
-ARG HADOOP
-ARG TARGETARCH
-ARG TARGETOS
-ARG STACKABLE_USER_UID
-
-# Setting this to anything other than "true" will keep the cache folders around (e.g. for Maven, NPM etc.)
-# This can be used to speed up builds when disk space is of no concern.
-ARG DELETE_CACHES="true"
-
-COPY hbase/licenses /licenses
-
-USER ${STACKABLE_USER_UID}
-WORKDIR /stackable
-
-COPY --chown=${STACKABLE_USER_UID}:0 hbase/stackable/patches/patchable.toml /stackable/src/hbase/stackable/patches/patchable.toml
-COPY --chown=${STACKABLE_USER_UID}:0 hbase/stackable/patches/${PRODUCT} /stackable/src/hbase/stackable/patches/${PRODUCT}
-
-# Cache mounts are owned by root by default
-# We need to explicitly give the uid to use
-# And every cache needs its own id, we can't share them between stages because we might delete the caches
-# at the end of a run while other stages are still using it.
-# While this might work in theory it didn't in practice (FileNotFound exceptions etc.)
-
-# The cache id has to include the product version that we are building because otherwise
-# docker encounters race conditions when building multiple versions in parallel, as all
-# builder containers will share the same cache and the `rm -rf` commands will fail
-# with a "directory not empty" error on the first builder to finish, as other builders
-# are still working in the cache directory.
-RUN --mount=type=cache,id=maven-hbase-${PRODUCT},uid=${STACKABLE_USER_UID},target=/stackable/.m2/repository < /stackable/bin/export-snapshot-to-s3
+envsubst '${HBASE_HBASE}:${LIBS}' < /stackable/bin/export-snapshot-to-s3.env > /stackable/bin/export-snapshot-to-s3
chmod +x /stackable/bin/export-snapshot-to-s3
rm /stackable/bin/export-snapshot-to-s3.env
@@ -148,6 +57,7 @@ ARG PRODUCT
ARG RELEASE
ARG HADOOP
ARG HBASE_PROFILE
+ARG HBASE_HBASE
ARG HBASE_HBASE_OPERATOR_TOOLS
ARG HBASE_HBASE_OPA_AUTHORIZER
ARG HBASE_PHOENIX
@@ -175,31 +85,31 @@ LABEL io.openshift.tags="ubi9,stackable,hbase,sdp,nosql"
LABEL io.k8s.description="${DESCRIPTION}"
LABEL io.k8s.display-name="${NAME}"
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${PRODUCT} /stackable/hbase-${PRODUCT}/
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${PRODUCT}-src.tar.gz /stackable
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE} /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}/
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/async-profiler /stackable/async-profiler/
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS} /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS}/
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS}-src.tar.gz /stackable
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS}-stackable${RELEASE} /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS}-stackable${RELEASE}/
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS}-stackable${RELEASE}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/bin/hbck2 /stackable/bin/hbck2
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/bin/hbase-entrypoint.sh /stackable/hbase-${PRODUCT}/bin/hbase-entrypoint.sh
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/bin/hbase-entrypoint.sh /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}/bin/hbase-entrypoint.sh
COPY --chown=${STACKABLE_USER_UID}:0 --from=phoenix /stackable/phoenix /stackable/phoenix/
-COPY --chown=${STACKABLE_USER_UID}:0 --from=phoenix /stackable/phoenix-${HBASE_PHOENIX}-src.tar.gz /stackable
+COPY --chown=${STACKABLE_USER_UID}:0 --from=phoenix /stackable/phoenix-${HBASE_PHOENIX}-stackable${RELEASE}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-s3-builder /stackable/bin/export-snapshot-to-s3 /stackable/bin/export-snapshot-to-s3
COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-s3-builder /stackable/hadoop/share/hadoop/tools/lib/ /stackable/hadoop/share/hadoop/tools/lib/
# Copy the dependencies from Hadoop which are required for the Azure Data Lake
-# Storage (ADLS) to /stackable/hbase-${PRODUCT}/lib which is on the classpath.
+# Storage (ADLS) to /stackable/hbase-${HBASE_HBASE}/lib which is on the classpath.
# hadoop-azure-${HADOOP}.jar contains the AzureBlobFileSystem which is required
# by hadoop-common-${HADOOP}.jar if the scheme of a file system is "abfs://".
COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-builder \
- /stackable/hadoop/share/hadoop/tools/lib/hadoop-azure-${HADOOP}.jar \
- /stackable/hbase-${PRODUCT}/lib/
+ /stackable/hadoop/share/hadoop/tools/lib/hadoop-azure-${HADOOP}-stackable${RELEASE}.jar \
+ /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}/lib/
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-opa-authorizer /stackable/hbase-opa-authorizer-${HBASE_HBASE_OPA_AUTHORIZER}-src.tar.gz /stackable
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-opa-authorizer /stackable/hbase-opa-authorizer/target/hbase-opa-authorizer*.jar /stackable/hbase-${PRODUCT}/lib
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-opa-authorizer /stackable/hbase-opa-authorizer/target/hbase-opa-authorizer*.jar /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}/lib
RUN < /stackable/bin/hbck2
+PATCHED_HBASE_OPERATOR_TOOLS_VERSION=${NEW_VERSION}
+export PATCHED_HBASE_VERSION FULL_HBASE_OPERATOR_TOOLS_VERSION PATCHED_HBASE_OPERATOR_TOOLS_VERSION
+envsubst '${PATCHED_HBASE_VERSION}:${FULL_HBASE_OPERATOR_TOOLS_VERSION}:${PATCHED_HBASE_OPERATOR_TOOLS_VERSION}' < /stackable/bin/hbck2.env > /stackable/bin/hbck2
chmod +x /stackable/bin/hbck2
rm /stackable/bin/hbck2.env
diff --git a/hbase/hbase-operator-tools/versions.py b/hbase/hbase-operator-tools/versions.py
index 22a6aaf41..9fbee3e66 100644
--- a/hbase/hbase-operator-tools/versions.py
+++ b/hbase/hbase-operator-tools/versions.py
@@ -1,15 +1,19 @@
versions = [
{
- "product": "1.2.0",
- "hbase_thirdparty": "4.1.5",
- "hbase_version": "2.4.18",
+ "product": "1.3.0-fd5a5fb-hbase2.6.1",
+ "hbase_operator_tools_version": "1.3.0-fd5a5fb",
+ "hadoop": "3.3.6",
+ "hbase_thirdparty": "4.1.9",
+ "hbase/hbase": "2.6.1",
"java-devel": "11",
"delete_caches": "true",
},
{
- "product": "1.3.0-fd5a5fb",
+ "product": "1.3.0-fd5a5fb-hbase2.6.2",
+ "hbase_operator_tools_version": "1.3.0-fd5a5fb",
+ "hadoop": "3.4.1",
"hbase_thirdparty": "4.1.9",
- "hbase_version": "2.6.1",
+ "hbase/hbase": "2.6.2",
"java-devel": "11",
"delete_caches": "true",
},
diff --git a/hbase/hbase/Dockerfile b/hbase/hbase/Dockerfile
new file mode 100644
index 000000000..62e347260
--- /dev/null
+++ b/hbase/hbase/Dockerfile
@@ -0,0 +1,104 @@
+FROM stackable/image/hadoop AS hadoop-builder
+
+FROM stackable/image/java-devel AS hbase-builder
+
+ARG PRODUCT
+ARG RELEASE
+ARG ASYNC_PROFILER
+ARG HADOOP
+ARG TARGETARCH
+ARG TARGETOS
+ARG STACKABLE_USER_UID
+
+# Setting this to anything other than "true" will keep the cache folders around (e.g. for Maven, NPM etc.)
+# This can be used to speed up builds when disk space is of no concern.
+ARG DELETE_CACHES="true"
+
+COPY hbase/licenses /licenses
+
+USER ${STACKABLE_USER_UID}
+WORKDIR /stackable
+
+COPY --chown=${STACKABLE_USER_UID}:0 hbase/hbase/stackable/patches/patchable.toml /stackable/src/hbase/hbase/stackable/patches/patchable.toml
+COPY --chown=${STACKABLE_USER_UID}:0 hbase/hbase/stackable/patches/${PRODUCT} /stackable/src/hbase/hbase/stackable/patches/${PRODUCT}
+
+COPY --from=hadoop-builder --chown=${STACKABLE_USER_UID}:0 /stackable/patched-libs /stackable/patched-libs
+# Cache mounts are owned by root by default
+# We need to explicitly give the uid to use
+# And every cache needs its own id, we can't share them between stages because we might delete the caches
+# at the end of a run while other stages are still using it.
+# While this might work in theory it didn't in practice (FileNotFound exceptions etc.)
+
+# The cache id has to include the product version that we are building because otherwise
+# docker encounters race conditions when building multiple versions in parallel, as all
+# builder containers will share the same cache and the `rm -rf` commands will fail
+# with a "directory not empty" error on the first builder to finish, as other builders
+# are still working in the cache directory.
+RUN --mount=type=cache,id=maven-hbase-${PRODUCT},uid=${STACKABLE_USER_UID},target=/stackable/.m2/repository <
+Date: Tue, 15 Apr 2025 11:11:36 +0200
+Subject: Adjust version regex to match custom stackable versions
+
+---
+ phoenix-core-client/pom.xml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/phoenix-core-client/pom.xml b/phoenix-core-client/pom.xml
+index cbbebc9d5..ae0b37387 100644
+--- a/phoenix-core-client/pom.xml
++++ b/phoenix-core-client/pom.xml
+@@ -48,7 +48,7 @@
+ import java.util.regex.Pattern;
+ import java.lang.Integer;
+
+- versionPattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)[^.]*$");
++ versionPattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)[a-z0-9\\-\\.]*$");
+ versionMatcher = versionPattern.matcher("${hbase.version}");
+ versionMatcher.find();
+
diff --git a/hbase/phoenix/versions.py b/hbase/phoenix/versions.py
index d5949fc41..e66c11f00 100644
--- a/hbase/phoenix/versions.py
+++ b/hbase/phoenix/versions.py
@@ -1,8 +1,18 @@
versions = [
{
- "product": "5.2.1",
- "hbase_version": "2.6.1",
- "hadoop_version": "3.3.6",
+ "product": "5.2.1-hbase2.6.1",
+ "phoenix_version": "5.2.1",
+ "hbase/hbase": "2.6.1",
+ "hadoop": "3.3.6",
+ "java-devel": "11",
+ "hbase_profile": "2.6",
+ "delete_caches": "true",
+ },
+ {
+ "product": "5.2.1-hbase2.6.2",
+ "phoenix_version": "5.2.1",
+ "hbase/hbase": "2.6.2",
+ "hadoop": "3.4.1",
"java-devel": "11",
"hbase_profile": "2.6",
"delete_caches": "true",
diff --git a/hbase/stackable/bin/hbck2.env b/hbase/stackable/bin/hbck2.env
deleted file mode 100755
index 5049d0db8..000000000
--- a/hbase/stackable/bin/hbck2.env
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-exec /stackable/hbase-${PRODUCT}/bin/hbase hbck \
- -j /stackable/hbase-operator-tools-${HBASE_OPERATOR_TOOLS}/hbase-hbck2/hbase-hbck2-${HBASE_OPERATOR_TOOLS}.jar \
- "$@"
diff --git a/hbase/versions.py b/hbase/versions.py
index b787609c4..c2f9fdfbf 100644
--- a/hbase/versions.py
+++ b/hbase/versions.py
@@ -3,26 +3,26 @@
# hbase-thirdparty is used to build the hbase-operator-tools and should be set to the version defined in the POM of HBase.
{
"product": "2.6.1",
- "hbase/hbase-operator-tools": "1.3.0-fd5a5fb",
- "hbase/phoenix": "5.2.1",
+ "hbase/hbase": "2.6.1",
+ "hbase/hbase-operator-tools": "1.3.0-fd5a5fb-hbase2.6.1",
+ "hbase/phoenix": "5.2.1-hbase2.6.1",
"hbase/hbase-opa-authorizer": "0.1.0", # only for HBase 2.6.1
"hadoop": "3.3.6",
"java-base": "11",
"java-devel": "11",
"hbase_profile": "2.6",
- "async_profiler": "2.9",
"delete_caches": "true",
},
{
"product": "2.6.2",
- "hbase/hbase-operator-tools": "1.3.0-fd5a5fb",
- "hbase/phoenix": "5.2.1",
+ "hbase/hbase": "2.6.2",
+ "hbase/hbase-operator-tools": "1.3.0-fd5a5fb-hbase2.6.2",
+ "hbase/phoenix": "5.2.1-hbase2.6.2",
"hbase/hbase-opa-authorizer": "0.1.0", # only for HBase 2.6.1
"hadoop": "3.4.1",
"java-base": "11",
"java-devel": "11",
"hbase_profile": "2.6",
- "async_profiler": "2.9",
"delete_caches": "true",
},
]
diff --git a/hive/Dockerfile b/hive/Dockerfile
index 0fb56e4b8..9dc52d8b3 100644
--- a/hive/Dockerfile
+++ b/hive/Dockerfile
@@ -5,12 +5,14 @@ FROM stackable/image/hadoop AS hadoop-builder
FROM stackable/image/java-devel AS hive-builder
+
# Apache Hive up to 4.0.x(!) officially requires Java 8 (there is no distinction between building and running).
# As of 2024-04-15 we for sure need Java 8 for building, but we used a Java 11 runtime for months now without any problems.
# As we got weird TLS errors (https://stackable-workspace.slack.com/archives/C031A5BEFS7/p1713185172557459) with a
# Java 8 runtime we bumped the Runtime to Java 11 again.
ARG PRODUCT
+ARG RELEASE
ARG HADOOP
ARG JMX_EXPORTER
ARG AWS_JAVA_SDK_BUNDLE
@@ -28,7 +30,8 @@ COPY --chown=${STACKABLE_USER_UID}:0 hive/stackable/patches/${PRODUCT} /stackabl
# It is useful to see which version of Hadoop is used at a glance
# Therefore the use of the full name here
# TODO: Do we really need all of Hadoop in here?
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-builder /stackable/hadoop /stackable/hadoop-${HADOOP}
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-builder /stackable/hadoop /stackable/hadoop-${HADOOP}-stackable${RELEASE}
+COPY --from=hadoop-builder --chown=${STACKABLE_USER_UID}:0 /stackable/patched-libs /stackable/patched-libs
USER ${STACKABLE_USER_UID}
WORKDIR /stackable
@@ -39,26 +42,33 @@ RUN --mount=type=cache,id=maven-hive-${PRODUCT},uid=${STACKABLE_USER_UID},target
BUILD_SRC_DIR="$(/stackable/patchable --images-repo-root=src checkout hive ${PRODUCT})"
cd "$BUILD_SRC_DIR"
+# Make Maven aware of custom Stackable libraries
+cp -r /stackable/patched-libs/maven/* /stackable/.m2/repository
+
+ORIGINAL_VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout)
+NEW_VERSION="${PRODUCT}-stackable${RELEASE}"
+
+mvn versions:set -DnewVersion=$NEW_VERSION -DartifactId=* -DgroupId=*
+
# Create snapshot of the source code including custom patches
-tar -czf /stackable/hive-${PRODUCT}-src.tar.gz .
+tar -czf /stackable/hive-${NEW_VERSION}-src.tar.gz .
if [[ "${PRODUCT}" == "3.1.3" ]] ; then
mvn --batch-mode --no-transfer-progress clean package -DskipTests --projects standalone-metastore
- mv standalone-metastore/target/apache-hive-metastore-${PRODUCT}-bin/apache-hive-metastore-${PRODUCT}-bin /stackable
- mv standalone-metastore/target/bom.json /stackable/apache-hive-metastore-${PRODUCT}-bin/apache-hive-metastore-${PRODUCT}.cdx.json
+ mv standalone-metastore/target/apache-hive-metastore-${NEW_VERSION}-bin/apache-hive-metastore-${NEW_VERSION}-bin /stackable
+ mv standalone-metastore/target/bom.json /stackable/apache-hive-metastore-${NEW_VERSION}-bin/apache-hive-metastore-${NEW_VERSION}.cdx.json
else
(
# https://issues.apache.org/jira/browse/HIVE-20451 switched the metastore server packaging starting with 4.0.0
- cd standalone-metastore
- mvn --batch-mode --no-transfer-progress clean package -DskipTests --projects metastore-server
+ mvn --batch-mode --no-transfer-progress clean package -DskipTests -Dhadoop.version=${HADOOP}-stackable${RELEASE}
# We only seem to get a .tar.gz archive, so let's extract that to the correct location
- tar --extract --directory=/stackable -f metastore-server/target/apache-hive-standalone-metastore-server-${PRODUCT}-bin.tar.gz
- mv metastore-server/target/bom.json /stackable/apache-hive-metastore-${PRODUCT}-bin/apache-hive-metastore-${PRODUCT}.cdx.json
+ tar --extract --directory=/stackable -f standalone-metastore/metastore-server/target/apache-hive-standalone-metastore-server-${NEW_VERSION}-bin.tar.gz
+ mv standalone-metastore/metastore-server/target/bom.json /stackable/apache-hive-metastore-${NEW_VERSION}-bin/apache-hive-metastore-${NEW_VERSION}.cdx.json
# TODO: Remove once the fix https://github.com/apache/hive/pull/5419 is merged and released
# The schemaTool.sh is still pointing to the class location from Hive < 4.0.0, it seems like it was forgotten to update it
- sed -i -e 's/CLASS=org.apache.hadoop.hive.metastore.tools.MetastoreSchemaTool/CLASS=org.apache.hadoop.hive.metastore.tools.schematool.MetastoreSchemaTool/' /stackable/apache-hive-metastore-${PRODUCT}-bin/bin/ext/schemaTool.sh
+ sed -i -e 's/CLASS=org.apache.hadoop.hive.metastore.tools.MetastoreSchemaTool/CLASS=org.apache.hadoop.hive.metastore.tools.schematool.MetastoreSchemaTool/' /stackable/apache-hive-metastore-${NEW_VERSION}-bin/bin/ext/schemaTool.sh
)
fi
@@ -74,17 +84,17 @@ ln -s "/stackable/jmx/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar" /stackable/j
# This way the build will fail should one of the files not be available anymore in a later Hadoop version!
# Add S3 Support for Hive (support for s3a://)
-cp /stackable/hadoop-${HADOOP}/share/hadoop/tools/lib/hadoop-aws-${HADOOP}.jar /stackable/apache-hive-metastore-${PRODUCT}-bin/lib/
+cp /stackable/hadoop-${HADOOP}-stackable${RELEASE}/share/hadoop/tools/lib/hadoop-aws-${HADOOP}-stackable${RELEASE}.jar /stackable/apache-hive-metastore-${PRODUCT}-stackable${RELEASE}-bin/lib/
# According to https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/aws_sdk_upgrade.html, the jar filename has changed from
# aws-java-sdk-bundle-${AWS_JAVA_SDK_BUNDLE}.jar to bundle-${AWS_JAVA_SDK_BUNDLE}.jar. In future, you might need to do:
-# cp /stackable/hadoop-${HADOOP}/share/hadoop/tools/lib/bundle-${AWS_JAVA_SDK_BUNDLE}.jar /stackable/apache-hive-metastore-${PRODUCT}-bin/lib/
-cp /stackable/hadoop-${HADOOP}/share/hadoop/tools/lib/aws-java-sdk-bundle-${AWS_JAVA_SDK_BUNDLE}.jar /stackable/apache-hive-metastore-${PRODUCT}-bin/lib/
+# cp /stackable/hadoop-${HADOOP}-stackable${RELEASE}/share/hadoop/tools/lib/bundle-${AWS_JAVA_SDK_BUNDLE}.jar /stackable/apache-hive-metastore-${PRODUCT}-stackable${RELEASE}-bin/lib/
+cp /stackable/hadoop-${HADOOP}-stackable${RELEASE}/share/hadoop/tools/lib/aws-java-sdk-bundle-${AWS_JAVA_SDK_BUNDLE}.jar /stackable/apache-hive-metastore-${PRODUCT}-stackable${RELEASE}-bin/lib/
# Add Azure ABFS support (support for abfs://)
-cp /stackable/hadoop-${HADOOP}/share/hadoop/tools/lib/hadoop-azure-${HADOOP}.jar /stackable/apache-hive-metastore-${PRODUCT}-bin/lib/
-cp /stackable/hadoop-${HADOOP}/share/hadoop/tools/lib/azure-storage-${AZURE_STORAGE}.jar /stackable/apache-hive-metastore-${PRODUCT}-bin/lib/
-cp /stackable/hadoop-${HADOOP}/share/hadoop/tools/lib/azure-keyvault-core-${AZURE_KEYVAULT_CORE}.jar /stackable/apache-hive-metastore-${PRODUCT}-bin/lib/
+cp /stackable/hadoop-${HADOOP}-stackable${RELEASE}/share/hadoop/tools/lib/hadoop-azure-${HADOOP}-stackable${RELEASE}.jar /stackable/apache-hive-metastore-${NEW_VERSION}-bin/lib/
+cp /stackable/hadoop-${HADOOP}-stackable${RELEASE}/share/hadoop/tools/lib/azure-storage-${AZURE_STORAGE}.jar /stackable/apache-hive-metastore-${NEW_VERSION}-bin/lib/
+cp /stackable/hadoop-${HADOOP}-stackable${RELEASE}/share/hadoop/tools/lib/azure-keyvault-core-${AZURE_KEYVAULT_CORE}.jar /stackable/apache-hive-metastore-${NEW_VERSION}-bin/lib/
# We're removing these to make the intermediate layer smaller
# This can be necessary even though it's only a builder image because the GitHub Action Runners only have very limited space available
@@ -133,13 +143,13 @@ LABEL io.k8s.display-name="${NAME}"
WORKDIR /stackable
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hive-builder /stackable/apache-hive-metastore-${PRODUCT}-bin /stackable/apache-hive-metastore-${PRODUCT}-bin
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hive-builder /stackable/hive-${PRODUCT}-src.tar.gz /stackable
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hive-builder /stackable/hadoop-${HADOOP} /stackable/hadoop-${HADOOP}
-COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-builder /stackable/hadoop-${HADOOP}-src.tar.gz /stackable
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hive-builder /stackable/apache-hive-metastore-${PRODUCT}-stackable${RELEASE}-bin /stackable/apache-hive-metastore-${PRODUCT}-stackable${RELEASE}-bin
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hive-builder /stackable/hive-${PRODUCT}-stackable${RELEASE}-src.tar.gz /stackable
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hive-builder /stackable/hadoop-${HADOOP}-stackable${RELEASE} /stackable/hadoop-${HADOOP}-stackable${RELEASE}
+COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-builder /stackable/hadoop-${HADOOP}-stackable${RELEASE}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=hive-builder /stackable/jmx /stackable/jmx
COPY --chown=${STACKABLE_USER_UID}:0 hive/stackable/jmx /stackable/jmx
-COPY --chown=${STACKABLE_USER_UID}:0 hive/stackable/bin/start-metastore /stackable/apache-hive-metastore-${PRODUCT}-bin/bin
+COPY --chown=${STACKABLE_USER_UID}:0 hive/stackable/bin/start-metastore /stackable/apache-hive-metastore-${PRODUCT}-stackable${RELEASE}-bin/bin
COPY hive/licenses /licenses
@@ -151,12 +161,12 @@ chown ${STACKABLE_USER_UID}:0 /stackable/package_manifest.txt
chmod g=u /stackable/package_manifest.txt
rm -rf /var/cache/yum
-chmod g=u /stackable/apache-hive-metastore-${PRODUCT}-bin/bin/start-metastore
+chmod g=u /stackable/apache-hive-metastore-${PRODUCT}-stackable${RELEASE}-bin/bin/start-metastore
-ln -s /stackable/apache-hive-metastore-${PRODUCT}-bin /stackable/hive-metastore
+ln -s /stackable/apache-hive-metastore-${PRODUCT}-stackable${RELEASE}-bin /stackable/hive-metastore
chown -h ${STACKABLE_USER_UID}:0 /stackable/hive-metastore
chmod g=u /stackable/hive-metastore
-ln -s /stackable/hadoop-${HADOOP} /stackable/hadoop
+ln -s /stackable/hadoop-${HADOOP}-stackable${RELEASE} /stackable/hadoop
chown -h ${STACKABLE_USER_UID}:0 /stackable/hadoop
chmod g=u /stackable/hadoop
chmod g=u /stackable/*-src.tar.gz
diff --git a/kafka/Dockerfile b/kafka/Dockerfile
index 21cfa10f1..b819c276d 100644
--- a/kafka/Dockerfile
+++ b/kafka/Dockerfile
@@ -6,6 +6,7 @@ FROM stackable/image/kafka/kcat AS kcat
FROM stackable/image/java-devel AS kafka-builder
ARG PRODUCT
+ARG RELEASE
ARG SCALA
ARG OPA_AUTHORIZER
ARG JMX_EXPORTER
@@ -21,22 +22,34 @@ COPY --chown=${STACKABLE_USER_UID}:0 kafka/stackable/patches/${PRODUCT} /stackab
RUN <${ORIGINAL_VERSION}<\/kafka.version>/${NEW_VERSION}<\/kafka.version>/g" streams/quickstart/java/src/main/resources/archetype-resources/pom.xml
+sed -i "s/${ORIGINAL_VERSION}<\/version>/${NEW_VERSION}<\/version>/g" streams/quickstart/pom.xml
+sed -i "s/${ORIGINAL_VERSION}<\/version>/${NEW_VERSION}<\/version>/g" streams/quickstart/java/pom.xml
+
# Create snapshot of the source code including custom patches
-tar -czf /stackable/kafka-${PRODUCT}-src.tar.gz .
+tar -czf /stackable/kafka-${NEW_VERSION}-src.tar.gz .
# TODO: Try to install gradle via package manager (if possible) instead of fetching it from the internet
# We don't specify "-x test" to skip the tests, as we might bump some Kafka internal dependencies in the future and
# it's a good idea to run the tests in this case.
./gradlew clean releaseTarGz
./gradlew cyclonedxBom
-tar -xf core/build/distributions/kafka_${SCALA}-${PRODUCT}.tgz -C /stackable
-cp build/reports/bom.json /stackable/kafka_${SCALA}-${PRODUCT}.cdx.json
-rm -rf /stackable/kafka_${SCALA}-${PRODUCT}/site-docs/
+tar -xf core/build/distributions/kafka_${SCALA}-${NEW_VERSION}.tgz -C /stackable
+cp build/reports/bom.json /stackable/kafka_${SCALA}-${NEW_VERSION}.cdx.json
+rm -rf /stackable/kafka_${SCALA}-${NEW_VERSION}/site-docs/
(cd .. && rm -rf ${PRODUCT})
# TODO (@NickLarsenNZ): Compile from source: https://github.com/StyraInc/opa-kafka-plugin
curl https://repo.stackable.tech/repository/packages/kafka-opa-authorizer/opa-authorizer-${OPA_AUTHORIZER}-all.jar \
- -o /stackable/kafka_${SCALA}-${PRODUCT}/libs/opa-authorizer-${OPA_AUTHORIZER}-all.jar
+ -o /stackable/kafka_${SCALA}-${NEW_VERSION}/libs/opa-authorizer-${OPA_AUTHORIZER}-all.jar
# JMX exporter
curl https://repo.stackable.tech/repository/packages/jmx-exporter/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar \
@@ -65,9 +78,9 @@ LABEL \
summary="The Stackable image for Apache Kafka." \
description="This image is deployed by the Stackable Operator for Apache Kafka."
-COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/kafka_${SCALA}-${PRODUCT} /stackable/kafka_${SCALA}-${PRODUCT}
-COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/kafka_${SCALA}-${PRODUCT}.cdx.json /stackable/kafka_${SCALA}-${PRODUCT}/kafka_${SCALA}-${PRODUCT}.cdx.json
-COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/kafka-${PRODUCT}-src.tar.gz /stackable
+COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/kafka_${SCALA}-${PRODUCT}-stackable${RELEASE} /stackable/kafka_${SCALA}-${PRODUCT}-stackable${RELEASE}
+COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/kafka_${SCALA}-${PRODUCT}-stackable${RELEASE}.cdx.json /stackable/kafka_${SCALA}-${PRODUCT}-stackable${RELEASE}/kafka_${SCALA}-${PRODUCT}-stackable${RELEASE}.cdx.json
+COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/kafka-${PRODUCT}-stackable${RELEASE}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/jmx/ /stackable/jmx/
COPY --chown=${STACKABLE_USER_UID}:0 --from=kcat /stackable/kcat /stackable/bin/kcat-${KAFKA_KCAT}
COPY --chown=${STACKABLE_USER_UID}:0 --from=kcat /stackable/kcat-${KAFKA_KCAT}-src.tar.gz /stackable
@@ -94,13 +107,13 @@ chown -h ${STACKABLE_USER_UID}:0 /stackable/bin/kcat
# kcat was located in /stackable/kcat - legacy
ln -s /stackable/bin/kcat /stackable/kcat
chown -h ${STACKABLE_USER_UID}:0 /stackable/kcat
-ln -s /stackable/kafka_${SCALA}-${PRODUCT} /stackable/kafka
+ln -s /stackable/kafka_${SCALA}-${PRODUCT}-stackable${RELEASE} /stackable/kafka
chown -h ${STACKABLE_USER_UID}:0 /stackable/kafka
# fix missing permissions
chmod g=u /stackable/bin
chmod g=u /stackable/jmx
-chmod g=u /stackable/kafka_${SCALA}-${PRODUCT}
+chmod g=u /stackable/kafka_${SCALA}-${PRODUCT}-stackable${RELEASE}
chmod g=u /stackable/*-src.tar.gz
EOF
diff --git a/nifi/Dockerfile b/nifi/Dockerfile
index 6257daab0..6eb722818 100644
--- a/nifi/Dockerfile
+++ b/nifi/Dockerfile
@@ -8,6 +8,7 @@ FROM oci.stackable.tech/sdp/git-sync/git-sync:${GIT_SYNC} AS git-sync-image
FROM stackable/image/java-devel AS nifi-builder
ARG PRODUCT
+ARG RELEASE
ARG MAVEN_VERSION="3.9.8"
ARG STACKABLE_USER_UID
@@ -45,8 +46,13 @@ curl 'https://repo.stackable.tech/repository/m2/tech/stackable/nifi/stackable-bc
cd "$(/stackable/patchable --images-repo-root=src checkout nifi ${PRODUCT})"
+ORIGINAL_VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout)
+NEW_VERSION="${PRODUCT}-stackable${RELEASE}"
+
+mvn versions:set -DnewVersion=$NEW_VERSION
+
# Create snapshot of the source code including custom patches
-tar -czf /stackable/nifi-${PRODUCT}-src.tar.gz .
+tar -czf /stackable/nifi-${NEW_VERSION}-src.tar.gz .
# NOTE: Since NiFi 2.0.0 PutIceberg Processor and services were removed, so including the `include-iceberg` profile does nothing.
# Additionally some modules were moved to optional build profiles, so we need to add `include-hadoop` to get `nifi-parquet-nar` for example.
@@ -57,13 +63,14 @@ else
fi
# Copy the binaries to the /stackable folder
-mv nifi-assembly/target/nifi-${PRODUCT}-bin/nifi-${PRODUCT} /stackable/nifi-${PRODUCT}
+mv nifi-assembly/target/nifi-${NEW_VERSION}-bin/nifi-${NEW_VERSION} /stackable/nifi-${NEW_VERSION}
# Copy the SBOM as well
-mv nifi-assembly/target/bom.json /stackable/nifi-${PRODUCT}/nifi-${PRODUCT}.cdx.json
+sed -i "s/${NEW_VERSION}/${ORIGINAL_VERSION}/g" nifi-assembly/target/bom.json
+mv nifi-assembly/target/bom.json /stackable/nifi-${NEW_VERSION}/nifi-${NEW_VERSION}.cdx.json
# Get a list of NARs
-export NARS=$(ls /stackable/nifi-${PRODUCT}/lib/*.nar | awk -F '/' '{ print $5 }' | sed "s/\-${PRODUCT}.nar\$//g")
+export NARS=$(ls /stackable/nifi-${NEW_VERSION}/lib/*.nar | awk -F '/' '{ print $5 }' | sed "s/\-${NEW_VERSION}.nar\$//g")
# Get a list of SBOMs
find . -name bom.json > bomlist.txt
@@ -72,7 +79,7 @@ for nar in $NARS; do
match=$(grep "\/$nar\/target\/bom.json" bomlist.txt || true)
if [[ -n "$match" ]]; then
# Copy the SBOM of the NAR
- cp "$match" "/stackable/nifi-${PRODUCT}/$nar.cdx.json"
+ cp "$match" "/stackable/nifi-${NEW_VERSION}/$nar.cdx.json"
fi
done
@@ -80,7 +87,7 @@ done
(cd .. && rm -r ${PRODUCT})
# Remove generated docs in binary
-rm -rf /stackable/nifi-${PRODUCT}/docs
+rm -rf /stackable/nifi-${NEW_VERSION}/docs
# Set correct permissions
chmod -R g=u /stackable
@@ -182,15 +189,15 @@ LABEL name="Apache NiFi" \
summary="The Stackable image for Apache NiFi." \
description="This image is deployed by the Stackable Operator for Apache NiFi."
-COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-builder /stackable/nifi-${PRODUCT} /stackable/nifi-${PRODUCT}/
-COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-builder /stackable/nifi-${PRODUCT}-src.tar.gz /stackable
+COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-builder /stackable/nifi-${PRODUCT}-stackable${RELEASE} /stackable/nifi-${PRODUCT}-stackable${RELEASE}/
+COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-builder /stackable/nifi-${PRODUCT}-stackable${RELEASE}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-builder /stackable/stackable-bcrypt.jar /stackable/stackable-bcrypt.jar
-COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-iceberg-bundle-builder /stackable/*.nar /stackable/nifi-${PRODUCT}/lib/
-COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-iceberg-bundle-builder /stackable/*.cdx.json /stackable/nifi-${PRODUCT}/lib/
+COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-iceberg-bundle-builder /stackable/*.nar /stackable/nifi-${PRODUCT}-stackable${RELEASE}/lib/
+COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-iceberg-bundle-builder /stackable/*.cdx.json /stackable/nifi-${PRODUCT}-stackable${RELEASE}/lib/
COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-iceberg-bundle-builder /stackable/*-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=nifi-builder /stackable/git-sync /stackable/git-sync
-COPY --chown=${STACKABLE_USER_UID}:0 --from=opa-authorizer-builder /stackable/opa-authorizer.nar /stackable/nifi-${PRODUCT}/extensions/opa-authorizer.nar
+COPY --chown=${STACKABLE_USER_UID}:0 --from=opa-authorizer-builder /stackable/opa-authorizer.nar /stackable/nifi-${PRODUCT}-stackable${RELEASE}/extensions/opa-authorizer.nar
COPY --chown=${STACKABLE_USER_UID}:0 --from=opa-authorizer-builder /stackable/nifi-opa-plugin-${NIFI_OPA_AUTHORIZER_PLUGIN}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=opa-authorizer-builder /stackable/LICENSE /licenses/NIFI_OPA_PLUGIN_LICENSE
COPY --chown=${STACKABLE_USER_UID}:0 nifi/stackable/bin /stackable/bin
@@ -219,13 +226,13 @@ pip install --no-cache-dir \
# This can be removed once older versions / operators using this are no longer supported
ln -s /stackable/stackable-bcrypt.jar /bin/stackable-bcrypt.jar
-ln -s /stackable/nifi-${PRODUCT} /stackable/nifi
+ln -s /stackable/nifi-${PRODUCT}-stackable${RELEASE} /stackable/nifi
# fix missing permissions / ownership
chown --no-dereference ${STACKABLE_USER_UID}:0 /stackable/nifi
chmod --recursive g=u /stackable/python
chmod --recursive g=u /stackable/bin
-chmod g=u /stackable/nifi-${PRODUCT}
+chmod g=u /stackable/nifi-${PRODUCT}-stackable${RELEASE}
chmod g=u /stackable/*-src.tar.gz
EOF
diff --git a/omid/Dockerfile b/omid/Dockerfile
index e1349fe11..fb7922b94 100644
--- a/omid/Dockerfile
+++ b/omid/Dockerfile
@@ -4,6 +4,7 @@
FROM stackable/image/java-devel AS builder
ARG PRODUCT
+ARG RELEASE
ARG DELETE_CACHES="true"
ARG STACKABLE_USER_UID
@@ -27,13 +28,21 @@ COPY --chown=${STACKABLE_USER_UID}:0 omid/stackable/patches/${PRODUCT} /stackabl
RUN --mount=type=cache,id=maven-omid-${PRODUCT},uid=${STACKABLE_USER_UID},target=/stackable/.m2/repository <>> Build spark
# Compiling the tests takes a lot of time, so we skip them
@@ -148,28 +161,37 @@ COPY --chown=${STACKABLE_USER_UID}:0 --from=spark-source-builder \
#
# This will download it's own version of maven because the UBI version is too old:
# 134.0 [ERROR] Detected Maven Version: 3.6.3 is not in the allowed range [3.8.8,)
-RUN export MAVEN_OPTS="-Xss64m -Xmx2g -XX:ReservedCodeCacheSize=1g" \
+RUN <