diff --git a/.github/workflows/build_hello-world.yaml b/.github/workflows/build_hello-world.yaml deleted file mode 100644 index 5bf41506e..000000000 --- a/.github/workflows/build_hello-world.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -name: Build Hello-World -run-name: | - Build Hello-World (attempt #${{ github.run_attempt }}) - -on: - workflow_dispatch: - schedule: - - cron: '0 1 2/2 * *' # https://crontab.guru/#0_1_2/2_*_* - push: - branches: [main] - tags: ['*'] - paths: - # To check dependencies, run this ( you will need to consider transitive dependencies) - # bake --product PRODUCT -d | grep -v 'docker buildx bake' | jq '.target | keys[]' - - hello-world/** - - .github/actions/** - - .github/workflows/build_hello-world.yaml - - .github/workflows/reusable_build_image.yaml - -jobs: - build_image: - name: Reusable Workflow - uses: ./.github/workflows/reusable_build_image.yaml - secrets: - harbor-robot-secret: ${{ secrets.HARBOR_ROBOT_SDP_GITHUB_ACTION_BUILD_SECRET }} - slack-token: ${{ secrets.SLACK_CONTAINER_IMAGE_TOKEN }} - with: - product-name: hello-world - sdp-version: ${{ github.ref_type == 'tag' && github.ref_name || '0.0.0-dev' }} - registry-namespace: sdp diff --git a/.scripts/update_readme_badges.sh b/.scripts/update_readme_badges.sh index bf5944bac..88e16cc34 100755 --- a/.scripts/update_readme_badges.sh +++ b/.scripts/update_readme_badges.sh @@ -54,12 +54,15 @@ for BUILD_WORKFLOW_FILE in .github/workflows/build_*.yaml; do echo >> "$BADGES_TMP" fi done -# This needs to add the remaning empty columns of the last row in the table -# This is a hack to fix the status quo and make markdownlint happy. -for _ in $(seq 0 $((COLS - 1))); do - echo -n "| " >> "$BADGES_TMP" -done -echo "|" >> "$BADGES_TMP" + +# Add remaining empty columns to complete the last row if needed +# "if needed" is the first if here: It'll only run when we're NOT on the last column (0 indexed) +if [ ${CURRENT_COLUMN} -ne $((COLS - 1)) ]; then + for _ in $(seq $((CURRENT_COLUMN + 1)) $((COLS - 1))); do + echo -n "| " >> "$BADGES_TMP" + done + echo "|" >> "$BADGES_TMP" +fi echo -n "" >> "$BADGES_TMP" # Print the image and link shortcuts. Eg: diff --git a/CHANGELOG.md b/CHANGELOG.md index 2fdbc7f2c..8bf0d8695 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ All notable changes to this project will be documented in this file. `check-permissions-ownership.sh` provided in stackable-base image ([#1029]). - hbase: check for correct permissions and ownerships in /stackable folder via `check-permissions-ownership.sh` provided in stackable-base image ([#1028]). +- hbase: provide patches to implement listener endpoints ([#1159]). - hive: check for correct permissions and ownerships in /stackable folder via `check-permissions-ownership.sh` provided in stackable-base image ([#1040]). - spark-connect-client: A new image for Spark connect tests and demos ([#1034]) @@ -58,6 +59,7 @@ All notable changes to this project will be documented in this file. - zookeeper: bump jetty version for CVE-2024-13009 in 3.9.3 ([#1179]) - zookeeper: bump netty version for CVE-2025-24970 in 3.9.3 ([#1180]) - hadoop: backport HADOOP-19352, HADOOP-19335, HADOOP-19465, HADOOP-19456 and HADOOP-19225 to fix vulnerabilities in Hadoop `3.4.1` ([#1184]) +- hadoop: Backport HADOOP-18583 to make OpenSSL 3.x work with the native hadoop libraries ([#1209]). ### Changed @@ -91,7 +93,10 @@ All notable changes to this project will be documented in this file. - opa: Enable custom versions ([#1170]). - use custom product versions for Hadoop, HBase, Phoenix, hbase-operator-tools, Druid, Hive and Spark ([#1173]). - hbase: Bump dependencies to the latest patch level for HBase `2.6.1` and `2.6.2` ([#1185]). -- Changed default user & group IDs from 1000/1000 to 782252253/574654813 ([#1164]) +- hadoop: Separate Dockerfiles for Hadoop build and HDFS image ([#1186]). +- ubi-rust-builder: Bump Rust toolchain to 1.87.0, cargo-auditable to 0.7.0 and protoc to 31.1 ([#1197]). +- stackable-base, stackable-devel, ubi-rust-builder: Update `ubi-minimal` base image ([#1197]). +- testing-tools: Update `python` 3.12-slim-bullseye base image ([#1197]). ### Fixed @@ -199,8 +204,8 @@ All notable changes to this project will be documented in this file. [#1151]: https://github.com/stackabletech/docker-images/pull/1151 [#1152]: https://github.com/stackabletech/docker-images/pull/1152 [#1156]: https://github.com/stackabletech/docker-images/pull/1156 +[#1159]: https://github.com/stackabletech/docker-images/pull/1159 [#1163]: https://github.com/stackabletech/docker-images/pull/1163 -[#1164]: https://github.com/stackabletech/docker-images/pull/1164 [#1165]: https://github.com/stackabletech/docker-images/pull/1165 [#1168]: https://github.com/stackabletech/docker-images/pull/1168 [#1169]: https://github.com/stackabletech/docker-images/pull/1169 @@ -213,8 +218,11 @@ All notable changes to this project will be documented in this file. [#1180]: https://github.com/stackabletech/docker-images/pull/1180 [#1184]: https://github.com/stackabletech/docker-images/pull/1184 [#1185]: https://github.com/stackabletech/docker-images/pull/1185 +[#1186]: https://github.com/stackabletech/docker-images/pull/1186 [#1188]: https://github.com/stackabletech/docker-images/pull/1188 [#1189]: https://github.com/stackabletech/docker-images/pull/1189 +[#1197]: https://github.com/stackabletech/docker-images/pull/1197 +[#1209]: https://github.com/stackabletech/docker-images/pull/1209 ## [25.3.0] - 2025-03-21 diff --git a/README.md b/README.md index d0df4da04..1d8ee7a44 100644 --- a/README.md +++ b/README.md @@ -6,12 +6,11 @@ This repository contains Dockerfiles and scripts to build base images for use wi | | | | | | -: | -: | -: | -: | | [![Build Airflow]][build_airflow.yaml] | [![Build Druid]][build_druid.yaml] | [![Build Hadoop]][build_hadoop.yaml] | [![Build HBase]][build_hbase.yaml] | -| [![Build Hello-World]][build_hello-world.yaml] | [![Build Hive]][build_hive.yaml] | [![Build Java Base]][build_java-base.yaml] | [![Build Java Development]][build_java-devel.yaml] | -| [![Build Kafka Testing Tools]][build_kafka-testing-tools.yaml] | [![Build Kafka]][build_kafka.yaml] | [![Build Krb5]][build_krb5.yaml] | [![Build NiFi]][build_nifi.yaml] | -| [![Build Omid]][build_omid.yaml] | [![Build OPA]][build_opa.yaml] | [![Build Spark Connect Client]][build_spark-connect-client.yaml] | [![Build Spark K8s]][build_spark-k8s.yaml] | -| [![Build Stackable Base]][build_stackable-base.yaml] | [![Build Superset]][build_superset.yaml] | [![Build Testing Tools]][build_testing-tools.yaml] | [![Build Tools]][build_tools.yaml] | -| [![Build Trino CLI]][build_trino-cli.yaml] | [![Build Trino]][build_trino.yaml] | [![Build Vector]][build_vector.yaml] | [![Build ZooKeeper]][build_zookeeper.yaml] | -| | | | | +| [![Build Hive]][build_hive.yaml] | [![Build Java Base]][build_java-base.yaml] | [![Build Java Development]][build_java-devel.yaml] | [![Build Kafka Testing Tools]][build_kafka-testing-tools.yaml] | +| [![Build Kafka]][build_kafka.yaml] | [![Build Krb5]][build_krb5.yaml] | [![Build NiFi]][build_nifi.yaml] | [![Build Omid]][build_omid.yaml] | +| [![Build OPA]][build_opa.yaml] | [![Build Spark Connect Client]][build_spark-connect-client.yaml] | [![Build Spark K8s]][build_spark-k8s.yaml] | [![Build Stackable Base]][build_stackable-base.yaml] | +| [![Build Superset]][build_superset.yaml] | [![Build Testing Tools]][build_testing-tools.yaml] | [![Build Tools]][build_tools.yaml] | [![Build Trino CLI]][build_trino-cli.yaml] | +| [![Build Trino]][build_trino.yaml] | [![Build Vector]][build_vector.yaml] | [![Build ZooKeeper]][build_zookeeper.yaml] | | ## Prerequisites @@ -222,8 +221,6 @@ ENTRYPOINT ["/stackable-zookeeper-operator"] [build_hadoop.yaml]: https://github.com/stackabletech/docker-images/actions/workflows/build_hadoop.yaml [Build HBase]: https://github.com/stackabletech/docker-images/actions/workflows/build_hbase.yaml/badge.svg [build_hbase.yaml]: https://github.com/stackabletech/docker-images/actions/workflows/build_hbase.yaml -[Build Hello-World]: https://github.com/stackabletech/docker-images/actions/workflows/build_hello-world.yaml/badge.svg -[build_hello-world.yaml]: https://github.com/stackabletech/docker-images/actions/workflows/build_hello-world.yaml [Build Hive]: https://github.com/stackabletech/docker-images/actions/workflows/build_hive.yaml/badge.svg [build_hive.yaml]: https://github.com/stackabletech/docker-images/actions/workflows/build_hive.yaml [Build Java Base]: https://github.com/stackabletech/docker-images/actions/workflows/build_java-base.yaml/badge.svg diff --git a/conf.py b/conf.py index ae57a1c84..ef31294b3 100644 --- a/conf.py +++ b/conf.py @@ -13,12 +13,12 @@ airflow = importlib.import_module("airflow.versions") druid = importlib.import_module("druid.versions") hadoop = importlib.import_module("hadoop.versions") +hadoop_jars = importlib.import_module("hadoop.hadoop.versions") hbase = importlib.import_module("hbase.versions") hbase_jars = importlib.import_module("hbase.hbase.versions") hbase_phoenix = importlib.import_module("hbase.phoenix.versions") hbase_opa_authorizer = importlib.import_module("hbase.hbase-opa-authorizer.versions") hbase_operator_tools = importlib.import_module("hbase.hbase-operator-tools.versions") -hello_world = importlib.import_module("hello-world.versions") hive = importlib.import_module("hive.versions") java_base = importlib.import_module("java-base.versions") java_devel = importlib.import_module("java-devel.versions") @@ -49,12 +49,12 @@ {"name": "airflow", "versions": airflow.versions}, {"name": "druid", "versions": druid.versions}, {"name": "hadoop", "versions": hadoop.versions}, + {"name": "hadoop/hadoop", "versions": hadoop_jars.versions}, {"name": "hbase", "versions": hbase.versions}, {"name": "hbase/hbase", "versions": hbase_jars.versions}, {"name": "hbase/phoenix", "versions": hbase_phoenix.versions}, {"name": "hbase/hbase-opa-authorizer", "versions": hbase_opa_authorizer.versions}, {"name": "hbase/hbase-operator-tools", "versions": hbase_operator_tools.versions}, - {"name": "hello-world", "versions": hello_world.versions}, {"name": "hive", "versions": hive.versions}, {"name": "java-base", "versions": java_base.versions}, {"name": "java-devel", "versions": java_devel.versions}, @@ -110,7 +110,7 @@ args = { "STACKABLE_USER_NAME": "stackable", - "STACKABLE_USER_UID": "782252253", # This is a random high id to not conflict with any existing user - "STACKABLE_USER_GID": "574654813", # This is a random high id to not conflict with any existing group + "STACKABLE_USER_UID": "1000", + "STACKABLE_USER_GID": "1000", "DELETE_CACHES": "true", } diff --git a/druid/Dockerfile b/druid/Dockerfile index ba5638b9f..f4caf5645 100644 --- a/druid/Dockerfile +++ b/druid/Dockerfile @@ -1,7 +1,7 @@ # syntax=docker/dockerfile:1.16.0@sha256:e2dd261f92e4b763d789984f6eab84be66ab4f5f08052316d8eb8f173593acf7 # check=error=true -FROM stackable/image/hadoop AS hadoop-builder +FROM stackable/image/hadoop/hadoop AS hadoop-builder FROM stackable/image/java-devel AS druid-builder @@ -12,7 +12,9 @@ ARG STAX2_API ARG WOODSTOX_CORE ARG AUTHORIZER ARG STACKABLE_USER_UID -ARG HADOOP +ARG HADOOP_HADOOP +# Reassign the arg to `HADOOP_VERSION` for better readability. +ENV HADOOP_VERSION=${HADOOP_HADOOP} # Setting this to anything other than "true" will keep the cache folders around (e.g. for Maven, NPM etc.) # This can be used to speed up builds when disk space is of no concern. @@ -41,7 +43,7 @@ COPY --chown=${STACKABLE_USER_UID}:0 druid/stackable/patches/${PRODUCT} /stackab COPY --from=hadoop-builder --chown=${STACKABLE_USER_UID}:0 /stackable/patched-libs /stackable/patched-libs # Cache mounts are owned by root by default -# We need to explicitly give the uid to use which is hardcoded to "1000" in stackable-base +# We need to explicitly give the uid to use. # The cache id has to include the product version that we are building because otherwise # docker encounters race conditions when building multiple versions in parallel, as all # builder containers will share the same cache and the `rm -rf` commands will fail @@ -75,7 +77,7 @@ mvn \ --no-transfer-progress \ clean install \ -Pdist,stackable-bundle-contrib-exts \ - -Dhadoop.compile.version=${HADOOP}-stackable${RELEASE} \ + -Dhadoop.compile.version=${HADOOP_VERSION}-stackable${RELEASE} \ -DskipTests `# Skip test execution` \ -Dcheckstyle.skip `# Skip checkstyle checks. We dont care if the code is properly formatted, it just wastes time` \ -Dmaven.javadoc.skip=true `# Dont generate javadoc` \ diff --git a/druid/versions.py b/druid/versions.py index 323868f1c..e380f7c81 100644 --- a/druid/versions.py +++ b/druid/versions.py @@ -4,7 +4,7 @@ # https://druid.apache.org/docs/30.0.1/operations/java/ "java-base": "17", "java-devel": "17", - "hadoop": "3.3.6", + "hadoop/hadoop": "3.3.6", "authorizer": "0.7.0", }, { @@ -12,7 +12,7 @@ # https://druid.apache.org/docs/31.0.1/operations/java/ "java-base": "17", "java-devel": "17", - "hadoop": "3.3.6", + "hadoop/hadoop": "3.3.6", "authorizer": "0.7.0", }, { @@ -20,7 +20,7 @@ # https://druid.apache.org/docs/33.0.0/operations/java/ "java-base": "17", "java-devel": "17", - "hadoop": "3.3.6", + "hadoop/hadoop": "3.3.6", "authorizer": "0.7.0", }, ] diff --git a/hadoop/Dockerfile b/hadoop/Dockerfile index 3997fb1fb..5647d6c57 100644 --- a/hadoop/Dockerfile +++ b/hadoop/Dockerfile @@ -1,145 +1,19 @@ # syntax=docker/dockerfile:1.16.0@sha256:e2dd261f92e4b763d789984f6eab84be66ab4f5f08052316d8eb8f173593acf7 # check=error=true -FROM stackable/image/java-devel AS hadoop-builder - -ARG PRODUCT -ARG RELEASE -ARG ASYNC_PROFILER -ARG JMX_EXPORTER -ARG PROTOBUF -ARG TARGETARCH -ARG TARGETOS -ARG STACKABLE_USER_UID - -WORKDIR /stackable - -COPY --chown=${STACKABLE_USER_UID}:0 shared/protobuf/stackable/patches/patchable.toml /stackable/src/shared/protobuf/stackable/patches/patchable.toml -COPY --chown=${STACKABLE_USER_UID}:0 shared/protobuf/stackable/patches/${PROTOBUF} /stackable/src/shared/protobuf/stackable/patches/${PROTOBUF} - -RUN <hadoop-pipes<\/artifactId>/,/<\/dependency>/ { s/.*<\/version>/'"$ORIGINAL_VERSION"'<\/version>/ }' -i hadoop-tools/hadoop-tools-dist/pom.xml - -# Create snapshot of the source code including custom patches -tar -czf /stackable/hadoop-${NEW_VERSION}-src.tar.gz . - -mvn \ - --batch-mode \ - --no-transfer-progress \ - clean package install \ - -Pdist,native \ - -pl '!hadoop-tools/hadoop-pipes' \ - -Dhadoop.version=${NEW_VERSION} \ - -Drequire.fuse=true \ - -DskipTests \ - -Dmaven.javadoc.skip=true - -mkdir -p /stackable/patched-libs/maven/org/apache -cp -r /stackable/.m2/repository/org/apache/hadoop /stackable/patched-libs/maven/org/apache - -cp -r hadoop-dist/target/hadoop-${NEW_VERSION} /stackable/hadoop-${NEW_VERSION} -sed -i "s/${NEW_VERSION}/${ORIGINAL_VERSION}/g" hadoop-dist/target/bom.json -mv hadoop-dist/target/bom.json /stackable/hadoop-${NEW_VERSION}/hadoop-${NEW_VERSION}.cdx.json - -# HDFS fuse-dfs is not part of the regular dist output, so we need to copy it in ourselves -cp hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/fuse_dfs /stackable/hadoop-${NEW_VERSION}/bin - -# Remove source code -(cd .. && rm -r ${PRODUCT}) - -ln -s /stackable/hadoop-${NEW_VERSION} /stackable/hadoop - -mv /build/fuse_dfs_wrapper /stackable/hadoop/bin - -# Remove unneeded binaries: -# - code sources -# - mapreduce/yarn binaries that were built as cross-project dependencies -# - minicluster (only used for testing) and test .jars -# - json-io: this is a transitive dependency pulled in by cedarsoft/java-utils/json-io and is excluded in 3.4.0. See CVE-2023-34610. -rm -rf /stackable/hadoop/share/hadoop/common/sources/ -rm -rf /stackable/hadoop/share/hadoop/hdfs/sources/ -rm -rf /stackable/hadoop/share/hadoop/tools/sources/ -rm -rf /stackable/hadoop/share/hadoop/tools/lib/json-io-*.jar -rm -rf /stackable/hadoop/share/hadoop/tools/lib/hadoop-mapreduce-client-*.jar -rm -rf /stackable/hadoop/share/hadoop/tools/lib/hadoop-yarn-server*.jar -find /stackable/hadoop -name 'hadoop-minicluster-*.jar' -type f -delete -find /stackable/hadoop -name 'hadoop-client-minicluster-*.jar' -type f -delete -find /stackable/hadoop -name 'hadoop-*tests.jar' -type f -delete -rm -rf /stackable/.m2 - -# Set correct groups; make sure only required artifacts for the final image are located in /stackable -chmod -R g=u /stackable -EOF +FROM stackable/image/hadoop/hadoop AS hadoop-builder FROM stackable/image/java-devel AS hdfs-utils-builder ARG HDFS_UTILS ARG PRODUCT +ARG RELEASE ARG STACKABLE_USER_UID +ARG HADOOP_HADOOP +# Reassign the arg to `HADOOP_VERSION` for better readability. +# It is passed as `HADOOP_HADOOP`, because versions.py has to contain `hadoop/hadoop` to establish a dependency on the Hadoop builder. +# The value of `hadoop/hadoop` is transformed by `bake` and automatically passed as `HADOOP_HADOOP` arg. +ENV HADOOP_VERSION=${HADOOP_HADOOP} # Starting with hdfs-utils 0.4.0 we need to use Java 17 for compilation. # We can not simply use java-devel with Java 17, as it is also used to compile Hadoop in this @@ -161,6 +35,8 @@ WORKDIR /stackable COPY --chown=${STACKABLE_USER_UID}:0 hadoop/hdfs-utils/stackable/patches/patchable.toml /stackable/src/hadoop/hdfs-utils/stackable/patches/patchable.toml COPY --chown=${STACKABLE_USER_UID}:0 hadoop/hdfs-utils/stackable/patches/${HDFS_UTILS} /stackable/src/hadoop/hdfs-utils/stackable/patches/${HDFS_UTILS} +COPY --from=hadoop-builder --chown=${STACKABLE_USER_UID}:0 /stackable/patched-libs /stackable/patched-libs + # The Stackable HDFS utils contain an OPA authorizer, group mapper & topology provider. # The topology provider provides rack awareness functionality for HDFS by allowing users to specify Kubernetes # labels to build a rackID from. @@ -168,6 +44,10 @@ COPY --chown=${STACKABLE_USER_UID}:0 hadoop/hdfs-utils/stackable/patches/${HDFS_ RUN < /etc/fuse.conf + +ln -s "/stackable/hadoop-${HADOOP_VERSION}-stackable${RELEASE}" /stackable/hadoop + +# async-profiler +ARCH="${TARGETARCH/amd64/x64}" +curl "https://repo.stackable.tech/repository/packages/async-profiler/async-profiler-${ASYNC_PROFILER}-${TARGETOS}-${ARCH}.tar.gz" | tar -xzC /stackable +ln -s "/stackable/async-profiler-${ASYNC_PROFILER}-${TARGETOS}-${ARCH}" /stackable/async-profiler + +# JMX Exporter +curl "https://repo.stackable.tech/repository/packages/jmx-exporter/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar" -o "/stackable/jmx/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar" +chmod -x "/stackable/jmx/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar" +ln -s "/stackable/jmx/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar" /stackable/jmx/jmx_prometheus_javaagent.jar + +# Set correct permissions and ownerships +chown --recursive ${STACKABLE_USER_UID}:0 /stackable/hadoop /stackable/jmx /stackable/async-profiler "/stackable/async-profiler-${ASYNC_PROFILER}-${TARGETOS}-${ARCH}" +chmod --recursive g=u /stackable/jmx /stackable/async-profiler "/stackable/hadoop-${HADOOP_VERSION}-stackable${RELEASE}" + +# Workaround for https://issues.apache.org/jira/browse/HADOOP-12845 +# The problem is that our stackable-devel image does contain the openssl-devel package +# That package creates a symlink from /usr/lib/libcrypto.so to the real libcrypto +# The non -devel package, which is used in this image, does NOT create this symlink. +# That's why the Hadoop build works even with the 'require.openssl' flag but in the production +# image the 'hadoop checknative' tool still fails because it can't find the 'libcrypto.so' symlink. +# Therefore we create this symlink here. +ln -s /usr/lib64/libcrypto.so.3 /usr/lib64/libcrypto.so EOF +RUN <hadoop-pipes<\/artifactId>/,/<\/dependency>/ { s/.*<\/version>/'"$ORIGINAL_VERSION"'<\/version>/ }' -i hadoop-tools/hadoop-tools-dist/pom.xml + +# Create snapshot of the source code including custom patches +tar -czf /stackable/hadoop-${NEW_VERSION}-src.tar.gz . + +# We do not pass require.snappy because that is only built in to the MapReduce client and we don't need that +# +# Passing require.openssl SHOULD make the build fail if OpenSSL is not present. +# This does not work properly however because this builder image contains the openssl-devel package which creates a symlink from /usr/lib64/libcrypto.so to the real version. +# Therefore, this build does work but the final image does NOT contain the openssl-devel package which is why it fails there which is why we have to create the symlink over there manually. +# We still leave this flag in to automatically fail should anything with the packages or symlinks ever fail. +mvn \ + --batch-mode \ + --no-transfer-progress \ + clean package install \ + -Pdist,native \ + -pl '!hadoop-tools/hadoop-pipes' \ + -Dhadoop.version=${NEW_VERSION} \ + -Drequire.fuse=true \ + -Drequire.openssl=true \ + -DskipTests \ + -Dmaven.javadoc.skip=true + +mkdir -p /stackable/patched-libs/maven/org/apache +cp -r /stackable/.m2/repository/org/apache/hadoop /stackable/patched-libs/maven/org/apache + +rm -rf hadoop-dist/target/hadoop-${NEW_VERSION}/share/hadoop/yarn +rm -rf hadoop-dist/target/hadoop-${NEW_VERSION}/share/hadoop/mapreduce +rm hadoop-dist/target/hadoop-${NEW_VERSION}/share/hadoop/client/hadoop-client-minicluster-*.jar +rm hadoop-dist/target/hadoop-${NEW_VERSION}/share/hadoop/tools/lib/hadoop-minicluster-*.jar + +cp -r hadoop-dist/target/hadoop-${NEW_VERSION} /stackable/hadoop-${NEW_VERSION} +sed -i "s/${NEW_VERSION}/${ORIGINAL_VERSION}/g" hadoop-dist/target/bom.json +mv hadoop-dist/target/bom.json /stackable/hadoop-${NEW_VERSION}/hadoop-${NEW_VERSION}.cdx.json + +# HDFS fuse-dfs is not part of the regular dist output, so we need to copy it in ourselves +cp hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/fuse_dfs /stackable/hadoop-${NEW_VERSION}/bin + +# Remove source code +(cd .. && rm -r ${PRODUCT}) + +ln -s /stackable/hadoop-${NEW_VERSION} /stackable/hadoop + +mv /build/fuse_dfs_wrapper /stackable/hadoop/bin + +# Remove unneeded binaries: +# - code sources +# - mapreduce/yarn binaries that were built as cross-project dependencies +# - minicluster (only used for testing) and test .jars +# - json-io: this is a transitive dependency pulled in by cedarsoft/java-utils/json-io and is excluded in 3.4.0. See CVE-2023-34610. +rm -rf /stackable/hadoop/share/hadoop/common/sources/ +rm -rf /stackable/hadoop/share/hadoop/hdfs/sources/ +rm -rf /stackable/hadoop/share/hadoop/tools/sources/ +rm -rf /stackable/hadoop/share/hadoop/tools/lib/json-io-*.jar +rm -rf /stackable/hadoop/share/hadoop/tools/lib/hadoop-mapreduce-client-*.jar +rm -rf /stackable/hadoop/share/hadoop/tools/lib/hadoop-yarn-server*.jar +find /stackable/hadoop -name 'hadoop-minicluster-*.jar' -type f -delete +find /stackable/hadoop -name 'hadoop-client-minicluster-*.jar' -type f -delete +find /stackable/hadoop -name 'hadoop-*tests.jar' -type f -delete +rm -rf /stackable/.m2 + +# Set correct groups; make sure only required artifacts for the final image are located in /stackable +chmod -R g=u /stackable +EOF diff --git a/hadoop/stackable/fuse_dfs_wrapper b/hadoop/hadoop/stackable/fuse_dfs_wrapper similarity index 100% rename from hadoop/stackable/fuse_dfs_wrapper rename to hadoop/hadoop/stackable/fuse_dfs_wrapper diff --git a/hadoop/stackable/patches/3.3.6/0001-YARN-11527-Update-node.js.patch b/hadoop/hadoop/stackable/patches/3.3.6/0001-YARN-11527-Update-node.js.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0001-YARN-11527-Update-node.js.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0001-YARN-11527-Update-node.js.patch diff --git a/hadoop/stackable/patches/3.3.6/0002-Allow-overriding-datanode-registration-addresses.patch b/hadoop/hadoop/stackable/patches/3.3.6/0002-Allow-overriding-datanode-registration-addresses.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0002-Allow-overriding-datanode-registration-addresses.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0002-Allow-overriding-datanode-registration-addresses.patch diff --git a/hadoop/stackable/patches/3.3.6/0003-HADOOP-18055-Add-async-profiler.patch b/hadoop/hadoop/stackable/patches/3.3.6/0003-HADOOP-18055-Add-async-profiler.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0003-HADOOP-18055-Add-async-profiler.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0003-HADOOP-18055-Add-async-profiler.patch diff --git a/hadoop/stackable/patches/3.3.6/0004-Backport-HADOOP-18077.patch b/hadoop/hadoop/stackable/patches/3.3.6/0004-Backport-HADOOP-18077.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0004-Backport-HADOOP-18077.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0004-Backport-HADOOP-18077.patch diff --git a/hadoop/stackable/patches/3.3.6/0005-Async-profiler-also-grab-itimer-events.patch b/hadoop/hadoop/stackable/patches/3.3.6/0005-Async-profiler-also-grab-itimer-events.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0005-Async-profiler-also-grab-itimer-events.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0005-Async-profiler-also-grab-itimer-events.patch diff --git a/hadoop/stackable/patches/3.3.6/0006-HDFS-17378-Fix-missing-operationType-for-some-operat.patch b/hadoop/hadoop/stackable/patches/3.3.6/0006-HDFS-17378-Fix-missing-operationType-for-some-operat.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0006-HDFS-17378-Fix-missing-operationType-for-some-operat.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0006-HDFS-17378-Fix-missing-operationType-for-some-operat.patch diff --git a/hadoop/stackable/patches/3.3.6/0007-Bump-Snappy-version-to-fix-CVEs.patch b/hadoop/hadoop/stackable/patches/3.3.6/0007-Bump-Snappy-version-to-fix-CVEs.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0007-Bump-Snappy-version-to-fix-CVEs.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0007-Bump-Snappy-version-to-fix-CVEs.patch diff --git a/hadoop/stackable/patches/3.3.6/0008-Update-CycloneDX-plugin.patch b/hadoop/hadoop/stackable/patches/3.3.6/0008-Update-CycloneDX-plugin.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0008-Update-CycloneDX-plugin.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0008-Update-CycloneDX-plugin.patch diff --git a/hadoop/stackable/patches/3.3.6/0009-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch b/hadoop/hadoop/stackable/patches/3.3.6/0009-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0009-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0009-HADOOP-18516-ABFS-Authentication-Support-Fixed-SAS-T.patch diff --git a/hadoop/stackable/patches/3.3.6/0010-Build-hadoop-client-modules-before-hadoop-dist.patch b/hadoop/hadoop/stackable/patches/3.3.6/0010-Build-hadoop-client-modules-before-hadoop-dist.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0010-Build-hadoop-client-modules-before-hadoop-dist.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0010-Build-hadoop-client-modules-before-hadoop-dist.patch diff --git a/hadoop/stackable/patches/3.3.6/0011-Remove-Hadoop-benchmark.patch b/hadoop/hadoop/stackable/patches/3.3.6/0011-Remove-Hadoop-benchmark.patch similarity index 100% rename from hadoop/stackable/patches/3.3.6/0011-Remove-Hadoop-benchmark.patch rename to hadoop/hadoop/stackable/patches/3.3.6/0011-Remove-Hadoop-benchmark.patch diff --git a/hadoop/hadoop/stackable/patches/3.3.6/0012-HADOOP-18583.-Fix-loading-of-OpenSSL-3.x-symbols-525.patch b/hadoop/hadoop/stackable/patches/3.3.6/0012-HADOOP-18583.-Fix-loading-of-OpenSSL-3.x-symbols-525.patch new file mode 100644 index 000000000..97c1eac8d --- /dev/null +++ b/hadoop/hadoop/stackable/patches/3.3.6/0012-HADOOP-18583.-Fix-loading-of-OpenSSL-3.x-symbols-525.patch @@ -0,0 +1,115 @@ +From baa7ec826f3f6d044f5307efe4b5d3bdd111bf4e Mon Sep 17 00:00:00 2001 +From: Sebastian Klemke <3669903+packet23@users.noreply.github.com> +Date: Thu, 7 Nov 2024 19:14:13 +0100 +Subject: HADOOP-18583. Fix loading of OpenSSL 3.x symbols (#5256) (#7149) + +Contributed by Sebastian Klemke +--- + .../org/apache/hadoop/crypto/OpensslCipher.c | 68 +++++++++++++++++-- + 1 file changed, 64 insertions(+), 4 deletions(-) + +diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c +index abff7ea5f1..f17169dec2 100644 +--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c ++++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c +@@ -24,6 +24,57 @@ + + #include "org_apache_hadoop_crypto_OpensslCipher.h" + ++/* ++ # OpenSSL ABI Symbols ++ ++ Available on all OpenSSL versions: ++ ++ | Function | 1.0 | 1.1 | 3.0 | ++ |--------------------------------|-----|-----|-----| ++ | EVP_CIPHER_CTX_new | YES | YES | YES | ++ | EVP_CIPHER_CTX_free | YES | YES | YES | ++ | EVP_CIPHER_CTX_set_padding | YES | YES | YES | ++ | EVP_CIPHER_CTX_test_flags | YES | YES | YES | ++ | EVP_CipherInit_ex | YES | YES | YES | ++ | EVP_CipherUpdate | YES | YES | YES | ++ | EVP_CipherFinal_ex | YES | YES | YES | ++ | ENGINE_by_id | YES | YES | YES | ++ | ENGINE_free | YES | YES | YES | ++ | EVP_aes_256_ctr | YES | YES | YES | ++ | EVP_aes_128_ctr | YES | YES | YES | ++ ++ Available on old versions: ++ ++ | Function | 1.0 | 1.1 | 3.0 | ++ |--------------------------------|-----|-----|-----| ++ | EVP_CIPHER_CTX_cleanup | YES | --- | --- | ++ | EVP_CIPHER_CTX_init | YES | --- | --- | ++ | EVP_CIPHER_CTX_block_size | YES | YES | --- | ++ | EVP_CIPHER_CTX_encrypting | --- | YES | --- | ++ ++ Available on new versions: ++ ++ | Function | 1.0 | 1.1 | 3.0 | ++ |--------------------------------|-----|-----|-----| ++ | OPENSSL_init_crypto | --- | YES | YES | ++ | EVP_CIPHER_CTX_reset | --- | YES | YES | ++ | EVP_CIPHER_CTX_get_block_size | --- | --- | YES | ++ | EVP_CIPHER_CTX_is_encrypting | --- | --- | YES | ++ ++ Optionally available on new versions: ++ ++ | Function | 1.0 | 1.1 | 3.0 | ++ |--------------------------------|-----|-----|-----| ++ | EVP_sm4_ctr | --- | opt | opt | ++ ++ Name changes: ++ ++ | < 3.0 name | >= 3.0 name | ++ |----------------------------|--------------------------------| ++ | EVP_CIPHER_CTX_block_size | EVP_CIPHER_CTX_get_block_size | ++ | EVP_CIPHER_CTX_encrypting | EVP_CIPHER_CTX_is_encrypting | ++ */ ++ + #ifdef UNIX + static EVP_CIPHER_CTX * (*dlsym_EVP_CIPHER_CTX_new)(void); + static void (*dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *); +@@ -87,6 +138,15 @@ static __dlsym_EVP_aes_128_ctr dlsym_EVP_aes_128_ctr; + static HMODULE openssl; + #endif + ++// names changed in OpenSSL 3 ABI - see History section in EVP_EncryptInit(3) ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++#define CIPHER_CTX_BLOCK_SIZE "EVP_CIPHER_CTX_get_block_size" ++#define CIPHER_CTX_ENCRYPTING "EVP_CIPHER_CTX_is_encrypting" ++#else ++#define CIPHER_CTX_BLOCK_SIZE "EVP_CIPHER_CTX_block_size" ++#define CIPHER_CTX_ENCRYPTING "EVP_CIPHER_CTX_encrypting" ++#endif /* OPENSSL_VERSION_NUMBER >= 0x30000000L */ ++ + static void loadAesCtr(JNIEnv *env) + { + #ifdef UNIX +@@ -142,10 +202,10 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_test_flags, env, openssl, \ + "EVP_CIPHER_CTX_test_flags"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_block_size, env, openssl, \ +- "EVP_CIPHER_CTX_block_size"); ++ CIPHER_CTX_BLOCK_SIZE); + #if OPENSSL_VERSION_NUMBER >= 0x10100000L + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_encrypting, env, openssl, \ +- "EVP_CIPHER_CTX_encrypting"); ++ CIPHER_CTX_ENCRYPTING); + #endif + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherInit_ex, env, openssl, \ + "EVP_CipherInit_ex"); +@@ -173,11 +233,11 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs + openssl, "EVP_CIPHER_CTX_test_flags"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_block_size, \ + dlsym_EVP_CIPHER_CTX_block_size, env, \ +- openssl, "EVP_CIPHER_CTX_block_size"); ++ openssl, CIPHER_CTX_BLOCK_SIZE); + #if OPENSSL_VERSION_NUMBER >= 0x10100000L + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_encrypting, \ + dlsym_EVP_CIPHER_CTX_encrypting, env, \ +- openssl, "EVP_CIPHER_CTX_encrypting"); ++ openssl, CIPHER_CTX_ENCRYPTING); + #endif + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherInit_ex, dlsym_EVP_CipherInit_ex, \ + env, openssl, "EVP_CipherInit_ex"); diff --git a/hadoop/stackable/patches/3.3.6/patchable.toml b/hadoop/hadoop/stackable/patches/3.3.6/patchable.toml similarity index 100% rename from hadoop/stackable/patches/3.3.6/patchable.toml rename to hadoop/hadoop/stackable/patches/3.3.6/patchable.toml diff --git a/hadoop/stackable/patches/3.4.1/0001-YARN-11527-Update-node.js.patch b/hadoop/hadoop/stackable/patches/3.4.1/0001-YARN-11527-Update-node.js.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0001-YARN-11527-Update-node.js.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0001-YARN-11527-Update-node.js.patch diff --git a/hadoop/stackable/patches/3.4.1/0002-Allow-overriding-datanode-registration-addresses.patch b/hadoop/hadoop/stackable/patches/3.4.1/0002-Allow-overriding-datanode-registration-addresses.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0002-Allow-overriding-datanode-registration-addresses.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0002-Allow-overriding-datanode-registration-addresses.patch diff --git a/hadoop/stackable/patches/3.4.1/0003-Async-profiler-also-grab-itimer-events.patch b/hadoop/hadoop/stackable/patches/3.4.1/0003-Async-profiler-also-grab-itimer-events.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0003-Async-profiler-also-grab-itimer-events.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0003-Async-profiler-also-grab-itimer-events.patch diff --git a/hadoop/stackable/patches/3.4.1/0004-HDFS-17378-Fix-missing-operationType-for-some-operat.patch b/hadoop/hadoop/stackable/patches/3.4.1/0004-HDFS-17378-Fix-missing-operationType-for-some-operat.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0004-HDFS-17378-Fix-missing-operationType-for-some-operat.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0004-HDFS-17378-Fix-missing-operationType-for-some-operat.patch diff --git a/hadoop/stackable/patches/3.4.1/0005-Update-CycloneDX-plugin.patch b/hadoop/hadoop/stackable/patches/3.4.1/0005-Update-CycloneDX-plugin.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0005-Update-CycloneDX-plugin.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0005-Update-CycloneDX-plugin.patch diff --git a/hadoop/stackable/patches/3.4.1/0006-HADOOP-19352.-Hadoop-OSS-Connector-adds-support-for-.patch b/hadoop/hadoop/stackable/patches/3.4.1/0006-HADOOP-19352.-Hadoop-OSS-Connector-adds-support-for-.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0006-HADOOP-19352.-Hadoop-OSS-Connector-adds-support-for-.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0006-HADOOP-19352.-Hadoop-OSS-Connector-adds-support-for-.patch diff --git a/hadoop/stackable/patches/3.4.1/0007-HADOOP-19335.-Bump-netty-to-4.1.116-due-to-CVE-2024-.patch b/hadoop/hadoop/stackable/patches/3.4.1/0007-HADOOP-19335.-Bump-netty-to-4.1.116-due-to-CVE-2024-.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0007-HADOOP-19335.-Bump-netty-to-4.1.116-due-to-CVE-2024-.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0007-HADOOP-19335.-Bump-netty-to-4.1.116-due-to-CVE-2024-.patch diff --git a/hadoop/stackable/patches/3.4.1/0008-HADOOP-19465.-Upgrade-to-netty-4.1.118-due-to-CVE-20.patch b/hadoop/hadoop/stackable/patches/3.4.1/0008-HADOOP-19465.-Upgrade-to-netty-4.1.118-due-to-CVE-20.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0008-HADOOP-19465.-Upgrade-to-netty-4.1.118-due-to-CVE-20.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0008-HADOOP-19465.-Upgrade-to-netty-4.1.118-due-to-CVE-20.patch diff --git a/hadoop/stackable/patches/3.4.1/0009-HADOOP-19456.-Upgrade-kafka-to-3.9.0-to-fix-CVE-2024.patch b/hadoop/hadoop/stackable/patches/3.4.1/0009-HADOOP-19456.-Upgrade-kafka-to-3.9.0-to-fix-CVE-2024.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0009-HADOOP-19456.-Upgrade-kafka-to-3.9.0-to-fix-CVE-2024.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0009-HADOOP-19456.-Upgrade-kafka-to-3.9.0-to-fix-CVE-2024.patch diff --git a/hadoop/stackable/patches/3.4.1/0010-HADOOP-19225.-Upgrade-Jetty-to-9.4.57.v20241219-due-.patch b/hadoop/hadoop/stackable/patches/3.4.1/0010-HADOOP-19225.-Upgrade-Jetty-to-9.4.57.v20241219-due-.patch similarity index 100% rename from hadoop/stackable/patches/3.4.1/0010-HADOOP-19225.-Upgrade-Jetty-to-9.4.57.v20241219-due-.patch rename to hadoop/hadoop/stackable/patches/3.4.1/0010-HADOOP-19225.-Upgrade-Jetty-to-9.4.57.v20241219-due-.patch diff --git a/hadoop/hadoop/stackable/patches/3.4.1/0011-HADOOP-18583.-Fix-loading-of-OpenSSL-3.x-symbols-525.patch b/hadoop/hadoop/stackable/patches/3.4.1/0011-HADOOP-18583.-Fix-loading-of-OpenSSL-3.x-symbols-525.patch new file mode 100644 index 000000000..bc331db1f --- /dev/null +++ b/hadoop/hadoop/stackable/patches/3.4.1/0011-HADOOP-18583.-Fix-loading-of-OpenSSL-3.x-symbols-525.patch @@ -0,0 +1,115 @@ +From cd1c23ea5bddd2796caf2590fef467e488c3bcbf Mon Sep 17 00:00:00 2001 +From: Sebastian Klemke <3669903+packet23@users.noreply.github.com> +Date: Thu, 7 Nov 2024 19:14:13 +0100 +Subject: HADOOP-18583. Fix loading of OpenSSL 3.x symbols (#5256) (#7149) + +Contributed by Sebastian Klemke +--- + .../org/apache/hadoop/crypto/OpensslCipher.c | 68 +++++++++++++++++-- + 1 file changed, 64 insertions(+), 4 deletions(-) + +diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c +index 976bf135ce..33be4a394f 100644 +--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c ++++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c +@@ -24,6 +24,57 @@ + + #include "org_apache_hadoop_crypto_OpensslCipher.h" + ++/* ++ # OpenSSL ABI Symbols ++ ++ Available on all OpenSSL versions: ++ ++ | Function | 1.0 | 1.1 | 3.0 | ++ |--------------------------------|-----|-----|-----| ++ | EVP_CIPHER_CTX_new | YES | YES | YES | ++ | EVP_CIPHER_CTX_free | YES | YES | YES | ++ | EVP_CIPHER_CTX_set_padding | YES | YES | YES | ++ | EVP_CIPHER_CTX_test_flags | YES | YES | YES | ++ | EVP_CipherInit_ex | YES | YES | YES | ++ | EVP_CipherUpdate | YES | YES | YES | ++ | EVP_CipherFinal_ex | YES | YES | YES | ++ | ENGINE_by_id | YES | YES | YES | ++ | ENGINE_free | YES | YES | YES | ++ | EVP_aes_256_ctr | YES | YES | YES | ++ | EVP_aes_128_ctr | YES | YES | YES | ++ ++ Available on old versions: ++ ++ | Function | 1.0 | 1.1 | 3.0 | ++ |--------------------------------|-----|-----|-----| ++ | EVP_CIPHER_CTX_cleanup | YES | --- | --- | ++ | EVP_CIPHER_CTX_init | YES | --- | --- | ++ | EVP_CIPHER_CTX_block_size | YES | YES | --- | ++ | EVP_CIPHER_CTX_encrypting | --- | YES | --- | ++ ++ Available on new versions: ++ ++ | Function | 1.0 | 1.1 | 3.0 | ++ |--------------------------------|-----|-----|-----| ++ | OPENSSL_init_crypto | --- | YES | YES | ++ | EVP_CIPHER_CTX_reset | --- | YES | YES | ++ | EVP_CIPHER_CTX_get_block_size | --- | --- | YES | ++ | EVP_CIPHER_CTX_is_encrypting | --- | --- | YES | ++ ++ Optionally available on new versions: ++ ++ | Function | 1.0 | 1.1 | 3.0 | ++ |--------------------------------|-----|-----|-----| ++ | EVP_sm4_ctr | --- | opt | opt | ++ ++ Name changes: ++ ++ | < 3.0 name | >= 3.0 name | ++ |----------------------------|--------------------------------| ++ | EVP_CIPHER_CTX_block_size | EVP_CIPHER_CTX_get_block_size | ++ | EVP_CIPHER_CTX_encrypting | EVP_CIPHER_CTX_is_encrypting | ++ */ ++ + #ifdef UNIX + static EVP_CIPHER_CTX * (*dlsym_EVP_CIPHER_CTX_new)(void); + static void (*dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *); +@@ -106,6 +157,15 @@ static __dlsym_ENGINE_free dlsym_ENGINE_free; + static HMODULE openssl; + #endif + ++// names changed in OpenSSL 3 ABI - see History section in EVP_EncryptInit(3) ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++#define CIPHER_CTX_BLOCK_SIZE "EVP_CIPHER_CTX_get_block_size" ++#define CIPHER_CTX_ENCRYPTING "EVP_CIPHER_CTX_is_encrypting" ++#else ++#define CIPHER_CTX_BLOCK_SIZE "EVP_CIPHER_CTX_block_size" ++#define CIPHER_CTX_ENCRYPTING "EVP_CIPHER_CTX_encrypting" ++#endif /* OPENSSL_VERSION_NUMBER >= 0x30000000L */ ++ + static void loadAesCtr(JNIEnv *env) + { + #ifdef UNIX +@@ -170,10 +230,10 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_test_flags, env, openssl, \ + "EVP_CIPHER_CTX_test_flags"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_block_size, env, openssl, \ +- "EVP_CIPHER_CTX_block_size"); ++ CIPHER_CTX_BLOCK_SIZE); + #if OPENSSL_VERSION_NUMBER >= 0x10100000L + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_encrypting, env, openssl, \ +- "EVP_CIPHER_CTX_encrypting"); ++ CIPHER_CTX_ENCRYPTING); + #endif + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherInit_ex, env, openssl, \ + "EVP_CipherInit_ex"); +@@ -209,11 +269,11 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs + openssl, "EVP_CIPHER_CTX_test_flags"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_block_size, \ + dlsym_EVP_CIPHER_CTX_block_size, env, \ +- openssl, "EVP_CIPHER_CTX_block_size"); ++ openssl, CIPHER_CTX_BLOCK_SIZE); + #if OPENSSL_VERSION_NUMBER >= 0x10100000L + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_encrypting, \ + dlsym_EVP_CIPHER_CTX_encrypting, env, \ +- openssl, "EVP_CIPHER_CTX_encrypting"); ++ openssl, CIPHER_CTX_ENCRYPTING); + #endif + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherInit_ex, dlsym_EVP_CipherInit_ex, \ + env, openssl, "EVP_CipherInit_ex"); diff --git a/hadoop/stackable/patches/3.4.1/patchable.toml b/hadoop/hadoop/stackable/patches/3.4.1/patchable.toml similarity index 100% rename from hadoop/stackable/patches/3.4.1/patchable.toml rename to hadoop/hadoop/stackable/patches/3.4.1/patchable.toml diff --git a/hadoop/stackable/patches/patchable.toml b/hadoop/hadoop/stackable/patches/patchable.toml similarity index 100% rename from hadoop/stackable/patches/patchable.toml rename to hadoop/hadoop/stackable/patches/patchable.toml diff --git a/hadoop/hadoop/versions.py b/hadoop/hadoop/versions.py new file mode 100644 index 000000000..d6907a86f --- /dev/null +++ b/hadoop/hadoop/versions.py @@ -0,0 +1,13 @@ +versions = [ + { + # Not part of SDP 25.7.0, but still required for hbase, hive, spark-k8s + "product": "3.3.6", + "java-devel": "11", + "protobuf": "3.7.1", + }, + { + "product": "3.4.1", + "java-devel": "11", + "protobuf": "3.7.1", + }, +] diff --git a/hadoop/versions.py b/hadoop/versions.py index f1653a05f..e88be1aca 100644 --- a/hadoop/versions.py +++ b/hadoop/versions.py @@ -2,20 +2,20 @@ { # Not part of SDP 25.7.0, but still required for hbase, hive, spark-k8s "product": "3.3.6", + "hadoop/hadoop": "3.3.6", "java-base": "11", "java-devel": "11", "async_profiler": "2.9", "jmx_exporter": "1.3.0", - "protobuf": "3.7.1", "hdfs_utils": "0.4.0", }, { "product": "3.4.1", + "hadoop/hadoop": "3.4.1", "java-base": "11", "java-devel": "11", "async_profiler": "2.9", "jmx_exporter": "1.3.0", - "protobuf": "3.7.1", "hdfs_utils": "0.4.1", }, ] diff --git a/hbase/Dockerfile b/hbase/Dockerfile index cf255a29d..e7a97557d 100644 --- a/hbase/Dockerfile +++ b/hbase/Dockerfile @@ -1,7 +1,7 @@ # syntax=docker/dockerfile:1.16.0@sha256:e2dd261f92e4b763d789984f6eab84be66ab4f5f08052316d8eb8f173593acf7 # check=error=true -FROM stackable/image/hadoop AS hadoop-builder +FROM stackable/image/hadoop/hadoop AS hadoop-builder FROM stackable/image/hbase/hbase AS hbase-builder @@ -17,8 +17,14 @@ FROM stackable/image/java-devel AS hadoop-s3-builder ARG PRODUCT ARG RELEASE -ARG HADOOP +ARG HADOOP_HADOOP +# Reassign the arg to `HADOOP_VERSION` for better readability. +ENV HADOOP_VERSION=${HADOOP_HADOOP} ARG HBASE_HBASE +# Reassign the arg to `HBASE_VERSION` for better readability. +# It is passed as `HBASE_HBASE`, because versions.py has to contain `hbase/hbase` to establish a dependency on the HBase builder. +# The value of `hbase/hbase` is transformed by `bake` and automatically passed as `HBASE_HBASE` arg. +ENV HBASE_VERSION=${HBASE_HBASE} ARG STACKABLE_USER_UID USER ${STACKABLE_USER_UID} @@ -31,7 +37,7 @@ COPY --from=hadoop-builder --chown=${STACKABLE_USER_UID}:0 \ # So we try to copy both and if one of them doesn't exist buildx will just ignore it :) /stackable/hadoop/share/hadoop/tools/lib/bundle-*.jar \ /stackable/hadoop/share/hadoop/tools/lib/aws-java-sdk-bundle-*.jar \ - /stackable/hadoop/share/hadoop/tools/lib/hadoop-aws-${HADOOP}-stackable${RELEASE}.jar \ + /stackable/hadoop/share/hadoop/tools/lib/hadoop-aws-${HADOOP_VERSION}-stackable${RELEASE}.jar \ /stackable/hadoop/share/hadoop/tools/lib/ COPY --chown=${STACKABLE_USER_UID}:0 hbase/hbase/stackable/bin/export-snapshot-to-s3.env /stackable/bin/ @@ -43,7 +49,7 @@ export LIBS=$(find /stackable/hadoop/share/hadoop -name '*.jar' -printf '%p:' | # The variable names are intentionally passed to envsubst in single-quotes, # so that they are not expanded. Disabling ShellCheck rules in a Dockerfile # does not work, so please ignore the according warning (SC2016). -envsubst '${HBASE_HBASE}:${RELEASE}:${LIBS}' < /stackable/bin/export-snapshot-to-s3.env > /stackable/bin/export-snapshot-to-s3 +envsubst '${HBASE_VERSION}:${RELEASE}:${LIBS}' < /stackable/bin/export-snapshot-to-s3.env > /stackable/bin/export-snapshot-to-s3 chmod +x /stackable/bin/export-snapshot-to-s3 rm /stackable/bin/export-snapshot-to-s3.env @@ -56,9 +62,13 @@ FROM stackable/image/java-base AS final ARG PRODUCT ARG RELEASE -ARG HADOOP +ARG HADOOP_HADOOP +# Reassign the arg to `HADOOP_VERSION` for better readability. +ENV HADOOP_VERSION=${HADOOP_HADOOP} ARG HBASE_PROFILE ARG HBASE_HBASE +# Reassign the arg to `HBASE_VERSION` for better readability. +ENV HBASE_VERSION=${HBASE_HBASE} ARG HBASE_HBASE_OPERATOR_TOOLS ARG HBASE_HBASE_OPA_AUTHORIZER ARG HBASE_PHOENIX @@ -86,14 +96,14 @@ LABEL io.openshift.tags="ubi9,stackable,hbase,sdp,nosql" LABEL io.k8s.description="${DESCRIPTION}" LABEL io.k8s.display-name="${NAME}" -COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE} /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}/ -COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}-src.tar.gz /stackable +COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE} /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE}/ +COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE}-src.tar.gz /stackable COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/async-profiler /stackable/async-profiler/ COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS}-stackable${RELEASE} /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS}-stackable${RELEASE}/ COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS}-stackable${RELEASE}-src.tar.gz /stackable COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/bin/hbck2 /stackable/bin/hbck2 -COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/bin/hbase-entrypoint.sh /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}/bin/hbase-entrypoint.sh +COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/bin/hbase-entrypoint.sh /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE}/bin/hbase-entrypoint.sh COPY --chown=${STACKABLE_USER_UID}:0 --from=phoenix /stackable/phoenix /stackable/phoenix/ COPY --chown=${STACKABLE_USER_UID}:0 --from=phoenix /stackable/phoenix-${HBASE_PHOENIX}-stackable${RELEASE}-src.tar.gz /stackable @@ -102,15 +112,15 @@ COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-s3-builder /stackable/bin/exp COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-s3-builder /stackable/hadoop/share/hadoop/tools/lib/ /stackable/hadoop/share/hadoop/tools/lib/ # Copy the dependencies from Hadoop which are required for the Azure Data Lake -# Storage (ADLS) to /stackable/hbase-${HBASE_HBASE}/lib which is on the classpath. +# Storage (ADLS) to /stackable/hbase-${HBASE_VERSION}/lib which is on the classpath. # hadoop-azure-${HADOOP}.jar contains the AzureBlobFileSystem which is required # by hadoop-common-${HADOOP}.jar if the scheme of a file system is "abfs://". COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-builder \ - /stackable/hadoop/share/hadoop/tools/lib/hadoop-azure-${HADOOP}-stackable${RELEASE}.jar \ - /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}/lib/ + /stackable/hadoop/share/hadoop/tools/lib/hadoop-azure-${HADOOP_VERSION}-stackable${RELEASE}.jar \ + /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE}/lib/ COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-opa-authorizer /stackable/hbase-opa-authorizer-${HBASE_HBASE_OPA_AUTHORIZER}-src.tar.gz /stackable -COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-opa-authorizer /stackable/hbase-opa-authorizer/target/hbase-opa-authorizer*.jar /stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}/lib +COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-opa-authorizer /stackable/hbase-opa-authorizer/target/hbase-opa-authorizer*.jar /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE}/lib RUN </{ # `hbase snapshot export` which results in the error # 'No FileSystem for scheme "hdfs"'. Passsing the argument # `--internal-classpath` solves this problem. -/stackable/hbase-${HBASE_HBASE}-stackable${RELEASE}/bin/hbase \ +/stackable/hbase-${HBASE_VERSION}-stackable${RELEASE}/bin/hbase \ --config "$CONF_DIR" \ --internal-classpath \ snapshot export "$@" diff --git a/hbase/hbase/stackable/bin/hbase-entrypoint.sh b/hbase/hbase/stackable/bin/hbase-entrypoint.sh index 3b5192016..04a9cc16f 100755 --- a/hbase/hbase/stackable/bin/hbase-entrypoint.sh +++ b/hbase/hbase/stackable/bin/hbase-entrypoint.sh @@ -11,15 +11,17 @@ set -euo pipefail # master, regionserver, rest HBASE_ROLE_NAME="$1" -# k8s service name for this role+group combo -# ..svc.cluster.local -HBASE_ROLE_SERVICE_NAME="$2" # 16010 for master, 16020 for regionservers etc. -HBASE_ROLE_SERVICE_PORT="$3" +HBASE_ROLE_SERVICE_PORT="$2" +# master, regionserver, rest_http, rest_https +HBASE_PORT_NAME="$3" +# ui-http or ui-https +HBASE_UI_PORT_NAME="$4" -HBASE_ROLE_SERVICE_HOST="${HOSTNAME}.${HBASE_ROLE_SERVICE_NAME}" +# Needed for regionmover service and for hbase-site.xml (see below) +HBASE_SERVICE_HOST=$(cat /stackable/listener/default-address/address) -REGION_MOVER_OPTS="--regionserverhost ${HBASE_ROLE_SERVICE_HOST}:${HBASE_ROLE_SERVICE_PORT} --operation unload ${REGION_MOVER_OPTS}" +REGION_MOVER_OPTS="--regionserverhost ${HBASE_SERVICE_HOST}:${HBASE_ROLE_SERVICE_PORT} --operation unload ${REGION_MOVER_OPTS}" prepare_signal_handlers() { unset term_child_pid @@ -64,6 +66,7 @@ cp /stackable/tmp/hdfs/core-site.xml /stackable/conf cp /stackable/tmp/hbase/* /stackable/conf cp /stackable/tmp/log_config/log4j* /stackable/conf +# Kerberos if [ -f /stackable/kerberos/krb5.conf ]; then KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /stackable/kerberos/krb5.conf) export KERBEROS_REALM @@ -72,6 +75,16 @@ if [ -f /stackable/kerberos/krb5.conf ]; then sed -i -e s/\$\{env\.KERBEROS_REALM\}/"${KERBEROS_REALM}"/g /stackable/conf/hdfs-site.xml fi +# Service endpoints +HBASE_SERVICE_PORT=$(cat /stackable/listener/default-address/ports/"${HBASE_PORT_NAME}") +HBASE_INFO_PORT=$(cat /stackable/listener/default-address/ports/"${HBASE_UI_PORT_NAME}") +HBASE_LISTENER_ENDPOINT="$HBASE_SERVICE_HOST:$HBASE_INFO_PORT" + +sed -i -e s/\$\{HBASE_SERVICE_HOST\}/"${HBASE_SERVICE_HOST}"/g /stackable/conf/hbase-site.xml +sed -i -e s/\$\{HBASE_SERVICE_PORT\}/"${HBASE_SERVICE_PORT}"/g /stackable/conf/hbase-site.xml +sed -i -e s/\$\{HBASE_LISTENER_ENDPOINT\}/"${HBASE_LISTENER_ENDPOINT}"/g /stackable/conf/hbase-site.xml +sed -i -e s/\$\{HBASE_INFO_PORT\}/"${HBASE_INFO_PORT}"/g /stackable/conf/hbase-site.xml + rm -f "${STACKABLE_LOG_DIR}/_vector/shutdown" prepare_signal_handlers /stackable/containerdebug --output="${STACKABLE_LOG_DIR}/containerdebug-state.json" --loop & diff --git a/hbase/hbase/stackable/patches/2.6.1/0005-Allow-overriding-ipc-bind-port-and-use-alternative-p.patch b/hbase/hbase/stackable/patches/2.6.1/0005-Allow-overriding-ipc-bind-port-and-use-alternative-p.patch new file mode 100644 index 000000000..76ef0961b --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.1/0005-Allow-overriding-ipc-bind-port-and-use-alternative-p.patch @@ -0,0 +1,310 @@ +From e84ed39191101b7dac7a6970afafc00dcec0f135 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Natalie=20Klestrup=20R=C3=B6ijezon?= +Date: Fri, 30 May 2025 14:26:26 +0200 +Subject: Allow overriding ipc bind port and use alternative port from listener + +--- + .../org/apache/hadoop/hbase/HConstants.java | 29 +++++++++++-- + .../apache/hadoop/hbase/master/HMaster.java | 20 +++++++-- + .../hbase/regionserver/HRegionServer.java | 41 +++++++++++++++---- + .../hbase/regionserver/RSRpcServices.java | 8 +++- + 4 files changed, 80 insertions(+), 18 deletions(-) + +diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +index 3b2a58827f..ea96ff8fce 100644 +--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java ++++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +@@ -194,18 +194,27 @@ public final class HConstants { + /** default host address */ + public static final String DEFAULT_HOST = "0.0.0.0"; + +- /** Parameter name for port master listens on. */ ++ /** Parameter name for port master advertises as listening on. */ + public static final String MASTER_PORT = "hbase.master.port"; + ++ /** Parameter name for IPC address that master listens on. (Defaults to hostname.) */ ++ public static final String MASTER_IPC_ADDRESS = "hbase.master.ipc.address"; ++ ++ /** Parameter name for IPC port that master listens on. (Defaults to MASTER_PORT.) */ ++ public static final String MASTER_IPC_PORT = "hbase.master.ipc.port"; ++ + /** default port that the master listens on */ + public static final int DEFAULT_MASTER_PORT = 16000; + + /** default port for master web api */ + public static final int DEFAULT_MASTER_INFOPORT = 16010; + +- /** Configuration key for master web API port */ ++ /** Configuration key for advertised master web API port */ + public static final String MASTER_INFO_PORT = "hbase.master.info.port"; + ++ /** Configuration key for bound master web API port. (Defaults to MASTER_INFO_PORT.) */ ++ public static final String MASTER_BOUND_INFO_PORT = "hbase.master.bound.info.port"; ++ + /** Configuration key for the list of master host:ports **/ + public static final String MASTER_ADDRS_KEY = "hbase.masters"; + +@@ -313,18 +322,27 @@ public final class HConstants { + /** Default value for ZooKeeper session timeout */ + public static final int DEFAULT_ZK_SESSION_TIMEOUT = 90 * 1000; + +- /** Parameter name for port region server listens on. */ ++ /** Parameter name for port region server advertises as listening on. */ + public static final String REGIONSERVER_PORT = "hbase.regionserver.port"; + ++ /** Parameter name for IPC address that region server listens on. (Defaults to hostname.) */ ++ public static final String REGIONSERVER_IPC_ADDRESS = "hbase.regionserver.ipc.address"; ++ ++ /** Parameter name for IPC port that region server listens on. (Defaults to REGIONSERVER_PORT.) */ ++ public static final String REGIONSERVER_IPC_PORT = "hbase.regionserver.ipc.port"; ++ + /** Default port region server listens on. */ + public static final int DEFAULT_REGIONSERVER_PORT = 16020; + + /** default port for region server web api */ + public static final int DEFAULT_REGIONSERVER_INFOPORT = 16030; + +- /** A configuration key for regionserver info port */ ++ /** Configuration key for advertised region server web API port */ + public static final String REGIONSERVER_INFO_PORT = "hbase.regionserver.info.port"; + ++ /** Configuration key for bound region server web API port. (Defaults to REGIONSERVER_INFO_PORT.) */ ++ public static final String REGIONSERVER_BOUND_INFO_PORT = "hbase.regionserver.bound.info.port"; ++ + /** A flag that enables automatic selection of regionserver info port */ + public static final String REGIONSERVER_INFO_PORT_AUTO = REGIONSERVER_INFO_PORT + ".auto"; + +@@ -1392,6 +1410,9 @@ public final class HConstants { + /** Configuration key for setting RPC codec class name */ + public static final String RPC_CODEC_CONF_KEY = "hbase.client.rpc.codec"; + ++ /** Configuration key for setting that the RPC client should bind the client address. This forces outgoing RPC traffic to happen from the same network interface that the RPC server is bound on. */ ++ public static final String RPC_CLIENT_BIND_ADDRESS = "hbase.client.rpc.bind.address"; ++ + /** Configuration key for setting replication codec class name */ + public static final String REPLICATION_CODEC_CONF_KEY = "hbase.replication.rpc.codec"; + +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +index 3fe5abac27..2f323518da 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.master; + import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; ++import static org.apache.hadoop.hbase.HConstants.MASTER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.MASTER_PORT; + import static org.apache.hadoop.hbase.master.cleaner.HFileCleaner.CUSTOM_POOL_SIZE; + import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY; + +@@ -559,6 +561,18 @@ public class HMaster extends HRegionServer implements MasterServices { + return conf.get(MASTER_HOSTNAME_KEY); + } + ++ @Override ++ protected int getUseThisPortInstead(Configuration conf) { ++ int port = conf.getInt(MASTER_PORT, 0); ++ return port != 0 ? port : this.rpcServices.getSocketAddress().getPort(); ++ } ++ ++ @Override ++ protected int getUseThisInfoPortInstead(Configuration conf) { ++ int port = conf.getInt(MASTER_BOUND_INFO_PORT, 0); ++ return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; ++ } ++ + private void registerConfigurationObservers() { + configurationManager.registerObserver(this.rpcServices); + configurationManager.registerObserver(this); +@@ -586,8 +600,8 @@ public class HMaster extends HRegionServer implements MasterServices { + registerConfigurationObservers(); + Threads.setDaemonThreadRunning(new Thread(() -> TraceUtil.trace(() -> { + try { +- int infoPort = putUpJettyServer(); +- startActiveMasterManager(infoPort); ++ putUpJettyServer(); ++ startActiveMasterManager(useThisInfoPortInstead); + } catch (Throwable t) { + // Make sure we log the exception. + String error = "Failed to become Active Master"; +@@ -2991,7 +3005,7 @@ public class HMaster extends HRegionServer implements MasterServices { + } + case MASTER_INFO_PORT: { + if (infoServer != null) { +- builder.setMasterInfoPort(infoServer.getPort()); ++ builder.setMasterInfoPort(useThisInfoPortInstead); + } + break; + } +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +index 27bcef2f06..11bd1e58b5 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +@@ -24,6 +24,9 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_WAL_MAX_SPL + import static org.apache.hadoop.hbase.HConstants.DEFAULT_SLOW_LOG_SYS_TABLE_CHORE_DURATION; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_PORT; ++import static org.apache.hadoop.hbase.HConstants.RPC_CLIENT_BIND_ADDRESS; + import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_DEFAULT; + import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_KEY; + import static org.apache.hadoop.hbase.namequeues.NamedQueueServiceChore.NAMED_QUEUE_CHORE_DURATION_DEFAULT; +@@ -505,6 +508,10 @@ public class HRegionServer extends Thread + */ + protected String useThisHostnameInstead; + ++ protected int useThisPortInstead; ++ ++ protected int useThisInfoPortInstead; ++ + /** + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link HRegionServer#UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY} instead. +@@ -669,6 +676,8 @@ public class HRegionServer extends Thread + this.namedQueueRecorder = NamedQueueRecorder.getInstance(this.conf); + rpcServices = createRpcServices(); + useThisHostnameInstead = getUseThisHostnameInstead(conf); ++ useThisPortInstead = getUseThisPortInstead(conf); ++ useThisInfoPortInstead = getUseThisInfoPortInstead(conf); + + // if use-ip is enabled, we will use ip to expose Master/RS service for client, + // see HBASE-27304 for details. +@@ -678,7 +687,7 @@ public class HRegionServer extends Thread + useIp ? rpcServices.isa.getAddress().getHostAddress() : rpcServices.isa.getHostName(); + String hostName = + StringUtils.isBlank(useThisHostnameInstead) ? isaHostName : useThisHostnameInstead; +- serverName = ServerName.valueOf(hostName, this.rpcServices.isa.getPort(), this.startcode); ++ serverName = ServerName.valueOf(hostName, useThisPortInstead, this.startcode); + + rpcControllerFactory = RpcControllerFactory.instantiate(this.conf); + rpcRetryingCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf, +@@ -715,7 +724,7 @@ public class HRegionServer extends Thread + + // Some unit tests don't need a cluster, so no zookeeper at all + // Open connection to zookeeper and set primary watcher +- zooKeeper = new ZKWatcher(conf, getProcessName() + ":" + rpcServices.isa.getPort(), this, ++ zooKeeper = new ZKWatcher(conf, getProcessName() + ":" + useThisPortInstead, this, + canCreateBaseZNode()); + // If no master in cluster, skip trying to track one or look for a cluster status. + if (!this.masterless) { +@@ -776,6 +785,16 @@ public class HRegionServer extends Thread + } + } + ++ protected int getUseThisPortInstead(Configuration conf) { ++ int port = conf.getInt(REGIONSERVER_PORT, 0); ++ return port != 0 ? port : this.rpcServices.isa.getPort(); ++ } ++ ++ protected int getUseThisInfoPortInstead(Configuration conf) { ++ int port = conf.getInt(REGIONSERVER_BOUND_INFO_PORT, 0); ++ return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; ++ } ++ + private void setupSignalHandlers() { + if (!SystemUtils.IS_OS_WINDOWS) { + HBasePlatformDependent.handle("HUP", (number, name) -> { +@@ -957,8 +976,7 @@ public class HRegionServer extends Thread + bootstrapNodeManager = new BootstrapNodeManager(clusterConnection, masterAddressTracker); + } + // Setup RPC client for master communication +- this.rpcClient = RpcClientFactory.createClient(conf, clusterId, +- new InetSocketAddress(this.rpcServices.isa.getAddress(), 0), ++ this.rpcClient = RpcClientFactory.createClient(conf, clusterId, getInetSocketAddress(this.conf), + clusterConnection.getConnectionMetrics(), Collections.emptyMap()); + span.setStatus(StatusCode.OK); + } catch (Throwable t) { +@@ -972,6 +990,11 @@ public class HRegionServer extends Thread + } + } + ++ private InetSocketAddress getInetSocketAddress(Configuration conf) { ++ return conf.getBoolean(RPC_CLIENT_BIND_ADDRESS, true) ? ++ new InetSocketAddress(this.rpcServices.isa.getAddress(), 0) : new InetSocketAddress(0); ++ } ++ + /** + * Bring up connection to zk ensemble and then wait until a master for this cluster and then after + * that, wait until cluster 'up' flag has been set. This is the order in which master does things. +@@ -1533,6 +1556,7 @@ public class HRegionServer extends Thread + } else { + serverLoad.setInfoServerPort(-1); + } ++ serverLoad.setInfoServerPort(useThisInfoPortInstead); + MetricsUserAggregateSource userSource = + metricsRegionServer.getMetricsUserAggregate().getSource(); + if (userSource != null) { +@@ -1688,7 +1712,7 @@ public class HRegionServer extends Thread + if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) { + String hostnameFromMasterPOV = e.getValue(); + this.serverName = ServerName.valueOf(hostnameFromMasterPOV, +- rpcServices.getSocketAddress().getPort(), this.startcode); ++ useThisPortInstead, this.startcode); + String expectedHostName = rpcServices.getSocketAddress().getHostName(); + // if Master use-ip is enabled, RegionServer use-ip will be enabled by default even if it + // is set to disable. so we will use the ip of the RegionServer to compare with the +@@ -1814,7 +1838,7 @@ public class HRegionServer extends Thread + + private void createMyEphemeralNode() throws KeeperException { + RegionServerInfo.Builder rsInfo = RegionServerInfo.newBuilder(); +- rsInfo.setInfoPort(infoServer != null ? infoServer.getPort() : -1); ++ rsInfo.setInfoPort(infoServer != null ? useThisInfoPortInstead : -1); + rsInfo.setVersionInfo(ProtobufUtil.getVersionInfo()); + byte[] data = ProtobufUtil.prependPBMagic(rsInfo.build().toByteArray()); + ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper, getMyEphemeralNodePath(), data); +@@ -2479,7 +2503,7 @@ public class HRegionServer extends Thread + LOG.info("Retry starting http info server with port: " + port); + } + } +- port = this.infoServer.getPort(); ++ port = useThisInfoPortInstead; + conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port); + int masterInfoPort = + conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); +@@ -3073,12 +3097,11 @@ public class HRegionServer extends Thread + LOG.info("reportForDuty to master=" + masterServerName + " with isa=" + rpcServices.isa + + ", startcode=" + this.startcode); + long now = EnvironmentEdgeManager.currentTime(); +- int port = rpcServices.isa.getPort(); + RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); + if (!StringUtils.isBlank(useThisHostnameInstead)) { + request.setUseThisHostnameInstead(useThisHostnameInstead); + } +- request.setPort(port); ++ request.setPort(useThisPortInstead); + request.setServerStartCode(this.startcode); + request.setServerCurrentTime(now); + result = rss.regionServerStartup(null, request.build()); +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +index b77fcf338a..a86cd273ff 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +@@ -280,6 +280,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescr + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor; ++import static org.apache.hadoop.hbase.HConstants.MASTER_IPC_ADDRESS; ++import static org.apache.hadoop.hbase.HConstants.MASTER_IPC_PORT; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_IPC_ADDRESS; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_IPC_PORT; + + /** + * Implements the regionserver RPC services. +@@ -1270,14 +1274,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler, AdminService.Blockin + int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); + // Creation of a HSA will force a resolve. + initialIsa = new InetSocketAddress(hostname, port); +- bindAddress = new InetSocketAddress(conf.get("hbase.master.ipc.address", hostname), port); ++ bindAddress = new InetSocketAddress(conf.get(MASTER_IPC_ADDRESS, hostname), conf.getInt(MASTER_IPC_PORT, port)); + } else { + String hostname = DNS.getHostname(conf, DNS.ServerType.REGIONSERVER); + int port = conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT); + // Creation of a HSA will force a resolve. + initialIsa = new InetSocketAddress(hostname, port); + bindAddress = +- new InetSocketAddress(conf.get("hbase.regionserver.ipc.address", hostname), port); ++ new InetSocketAddress(conf.get(REGIONSERVER_IPC_ADDRESS, hostname), conf.getInt(REGIONSERVER_IPC_PORT, port)); + } + if (initialIsa.getAddress() == null) { + throw new IllegalArgumentException("Failed resolve of " + initialIsa); diff --git a/hbase/hbase/stackable/patches/2.6.1/0006-Update-property-usage-for-bound-ports.patch b/hbase/hbase/stackable/patches/2.6.1/0006-Update-property-usage-for-bound-ports.patch new file mode 100644 index 000000000..2b6c3d582 --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.1/0006-Update-property-usage-for-bound-ports.patch @@ -0,0 +1,155 @@ +From 5cc38b12c2bfc5fa3850e13e3eb87086d5f1737a Mon Sep 17 00:00:00 2001 +From: Andrew Kenworthy +Date: Thu, 26 Jun 2025 14:59:01 +0200 +Subject: Update property usage for bound ports + +--- + .../org/apache/hadoop/hbase/HConstants.java | 4 ++-- + .../hadoop/hbase/LocalHBaseCluster.java | 12 +++++------ + .../apache/hadoop/hbase/master/HMaster.java | 6 +++--- + .../hbase/regionserver/HRegionServer.java | 21 +++++++++++++------ + 4 files changed, 26 insertions(+), 17 deletions(-) + +diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +index ea96ff8fce..054beb10d3 100644 +--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java ++++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +@@ -212,7 +212,7 @@ public final class HConstants { + /** Configuration key for advertised master web API port */ + public static final String MASTER_INFO_PORT = "hbase.master.info.port"; + +- /** Configuration key for bound master web API port. (Defaults to MASTER_INFO_PORT.) */ ++ /** Configuration key for bound master web API port */ + public static final String MASTER_BOUND_INFO_PORT = "hbase.master.bound.info.port"; + + /** Configuration key for the list of master host:ports **/ +@@ -340,7 +340,7 @@ public final class HConstants { + /** Configuration key for advertised region server web API port */ + public static final String REGIONSERVER_INFO_PORT = "hbase.regionserver.info.port"; + +- /** Configuration key for bound region server web API port. (Defaults to REGIONSERVER_INFO_PORT.) */ ++ /** Configuration key for bound region server web API port */ + public static final String REGIONSERVER_BOUND_INFO_PORT = "hbase.regionserver.bound.info.port"; + + /** A flag that enables automatic selection of regionserver info port */ +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +index 816ef997cb..2114725986 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +@@ -144,20 +144,20 @@ public class LocalHBaseCluster { + // treat info ports special; expressly don't change '-1' (keep off) + // in case we make that the default behavior. + if ( +- conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1 +- && conf.getInt(HConstants.REGIONSERVER_INFO_PORT, ++ conf.getInt(HConstants.REGIONSERVER_BOUND_INFO_PORT, 0) != -1 ++ && conf.getInt(HConstants.REGIONSERVER_BOUND_INFO_PORT, + HConstants.DEFAULT_REGIONSERVER_INFOPORT) == HConstants.DEFAULT_REGIONSERVER_INFOPORT + ) { + LOG.debug("Setting RS InfoServer Port to random."); +- conf.set(HConstants.REGIONSERVER_INFO_PORT, "0"); ++ conf.set(HConstants.REGIONSERVER_BOUND_INFO_PORT, "0"); + } + if ( +- conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1 +- && conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) ++ conf.getInt(HConstants.MASTER_BOUND_INFO_PORT, 0) != -1 ++ && conf.getInt(HConstants.MASTER_BOUND_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) + == HConstants.DEFAULT_MASTER_INFOPORT + ) { + LOG.debug("Setting Master InfoServer Port to random."); +- conf.set(HConstants.MASTER_INFO_PORT, "0"); ++ conf.set(HConstants.MASTER_BOUND_INFO_PORT, "0"); + } + } + +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +index 2f323518da..37cd7b3afd 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.master; + import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; +-import static org.apache.hadoop.hbase.HConstants.MASTER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.MASTER_INFO_PORT; + import static org.apache.hadoop.hbase.HConstants.MASTER_PORT; + import static org.apache.hadoop.hbase.master.cleaner.HFileCleaner.CUSTOM_POOL_SIZE; + import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY; +@@ -569,7 +569,7 @@ public class HMaster extends HRegionServer implements MasterServices { + + @Override + protected int getUseThisInfoPortInstead(Configuration conf) { +- int port = conf.getInt(MASTER_BOUND_INFO_PORT, 0); ++ int port = conf.getInt(MASTER_INFO_PORT, 0); + return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; + } + +@@ -3143,7 +3143,7 @@ public class HMaster extends HRegionServer implements MasterServices { + public int getRegionServerInfoPort(final ServerName sn) { + int port = this.serverManager.getInfoPort(sn); + return port == 0 +- ? conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) ++ ? conf.getInt(HConstants.REGIONSERVER_BOUND_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) + : port; + } + +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +index 11bd1e58b5..358ce486f1 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +@@ -25,6 +25,7 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_SLOW_LOG_SYS_TABLE_CHOR + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; + import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_INFO_PORT; + import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_PORT; + import static org.apache.hadoop.hbase.HConstants.RPC_CLIENT_BIND_ADDRESS; + import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_DEFAULT; +@@ -791,7 +792,7 @@ public class HRegionServer extends Thread + } + + protected int getUseThisInfoPortInstead(Configuration conf) { +- int port = conf.getInt(REGIONSERVER_BOUND_INFO_PORT, 0); ++ int port = conf.getInt(REGIONSERVER_INFO_PORT, 0); + return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; + } + +@@ -2463,11 +2464,13 @@ public class HRegionServer extends Thread + */ + private void putUpWebUI() throws IOException { + int port = +- this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT); ++ this.conf.getInt(REGIONSERVER_BOUND_INFO_PORT, ++ this.conf.getInt(REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT)); + String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0"); + + if (this instanceof HMaster) { +- port = conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); ++ port = conf.getInt(HConstants.MASTER_BOUND_INFO_PORT, ++ this.conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT)); + addr = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0"); + } + // -1 is for disabling info server +@@ -2503,12 +2506,18 @@ public class HRegionServer extends Thread + LOG.info("Retry starting http info server with port: " + port); + } + } +- port = useThisInfoPortInstead; +- conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port); ++ ++ // update bound ports ++ port = this.infoServer.getPort(); ++ conf.setInt(REGIONSERVER_BOUND_INFO_PORT, port); ++ conf.setInt(HConstants.MASTER_BOUND_INFO_PORT, port); ++ ++ // set advertised ports ++ conf.setInt(REGIONSERVER_INFO_PORT, useThisInfoPortInstead); + int masterInfoPort = + conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); + conf.setInt("hbase.master.info.port.orig", masterInfoPort); +- conf.setInt(HConstants.MASTER_INFO_PORT, port); ++ conf.setInt(HConstants.MASTER_INFO_PORT, useThisInfoPortInstead); + } + + /* diff --git a/hbase/hbase/stackable/patches/2.6.2/0005-Allow-overriding-ipc-bind-port-and-use-alternative-p.patch b/hbase/hbase/stackable/patches/2.6.2/0005-Allow-overriding-ipc-bind-port-and-use-alternative-p.patch new file mode 100644 index 000000000..7f9dc023e --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.2/0005-Allow-overriding-ipc-bind-port-and-use-alternative-p.patch @@ -0,0 +1,314 @@ +From d8b2c245ad1ee6f79060875cc76049e97f7b459b Mon Sep 17 00:00:00 2001 +From: Andrew Kenworthy +Date: Mon, 16 Jun 2025 14:44:32 +0200 +Subject: Allow overriding ipc bind port and use alternative port from listener + +--- + .../org/apache/hadoop/hbase/HConstants.java | 29 ++++++++++-- + .../apache/hadoop/hbase/master/HMaster.java | 20 +++++++-- + .../hbase/regionserver/HRegionServer.java | 45 +++++++++++++------ + .../hbase/regionserver/RSRpcServices.java | 8 +++- + 4 files changed, 80 insertions(+), 22 deletions(-) + +diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +index 12f8bc6df0..4d892755d2 100644 +--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java ++++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +@@ -194,18 +194,27 @@ public final class HConstants { + /** default host address */ + public static final String DEFAULT_HOST = "0.0.0.0"; + +- /** Parameter name for port master listens on. */ ++ /** Parameter name for port master advertises as listening on. */ + public static final String MASTER_PORT = "hbase.master.port"; + ++ /** Parameter name for IPC address that master listens on. (Defaults to hostname.) */ ++ public static final String MASTER_IPC_ADDRESS = "hbase.master.ipc.address"; ++ ++ /** Parameter name for IPC port that master listens on. (Defaults to MASTER_PORT.) */ ++ public static final String MASTER_IPC_PORT = "hbase.master.ipc.port"; ++ + /** default port that the master listens on */ + public static final int DEFAULT_MASTER_PORT = 16000; + + /** default port for master web api */ + public static final int DEFAULT_MASTER_INFOPORT = 16010; + +- /** Configuration key for master web API port */ ++ /** Configuration key for advertised master web API port */ + public static final String MASTER_INFO_PORT = "hbase.master.info.port"; + ++ /** Configuration key for bound master web API port. (Defaults to MASTER_INFO_PORT.) */ ++ public static final String MASTER_BOUND_INFO_PORT = "hbase.master.bound.info.port"; ++ + /** Configuration key for the list of master host:ports **/ + public static final String MASTER_ADDRS_KEY = "hbase.masters"; + +@@ -313,18 +322,27 @@ public final class HConstants { + /** Default value for ZooKeeper session timeout */ + public static final int DEFAULT_ZK_SESSION_TIMEOUT = 90 * 1000; + +- /** Parameter name for port region server listens on. */ ++ /** Parameter name for port region server advertises as listening on. */ + public static final String REGIONSERVER_PORT = "hbase.regionserver.port"; + ++ /** Parameter name for IPC address that region server listens on. (Defaults to hostname.) */ ++ public static final String REGIONSERVER_IPC_ADDRESS = "hbase.regionserver.ipc.address"; ++ ++ /** Parameter name for IPC port that region server listens on. (Defaults to REGIONSERVER_PORT.) */ ++ public static final String REGIONSERVER_IPC_PORT = "hbase.regionserver.ipc.port"; ++ + /** Default port region server listens on. */ + public static final int DEFAULT_REGIONSERVER_PORT = 16020; + + /** default port for region server web api */ + public static final int DEFAULT_REGIONSERVER_INFOPORT = 16030; + +- /** A configuration key for regionserver info port */ ++ /** Configuration key for advertised region server web API port */ + public static final String REGIONSERVER_INFO_PORT = "hbase.regionserver.info.port"; + ++ /** Configuration key for bound region server web API port. (Defaults to REGIONSERVER_INFO_PORT.) */ ++ public static final String REGIONSERVER_BOUND_INFO_PORT = "hbase.regionserver.bound.info.port"; ++ + /** A flag that enables automatic selection of regionserver info port */ + public static final String REGIONSERVER_INFO_PORT_AUTO = REGIONSERVER_INFO_PORT + ".auto"; + +@@ -1397,6 +1415,9 @@ public final class HConstants { + /** Configuration key for setting RPC codec class name */ + public static final String RPC_CODEC_CONF_KEY = "hbase.client.rpc.codec"; + ++ /** Configuration key for setting that the RPC client should bind the client address. This forces outgoing RPC traffic to happen from the same network interface that the RPC server is bound on. */ ++ public static final String RPC_CLIENT_BIND_ADDRESS = "hbase.client.rpc.bind.address"; ++ + /** Configuration key for setting replication codec class name */ + public static final String REPLICATION_CODEC_CONF_KEY = "hbase.replication.rpc.codec"; + +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +index 9cafbb7cbf..313124d1d0 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +@@ -17,6 +17,8 @@ + */ + package org.apache.hadoop.hbase.master; + ++import static org.apache.hadoop.hbase.HConstants.MASTER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.MASTER_PORT; + import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; +@@ -570,6 +572,18 @@ public class HMaster extends HRegionServer implements MasterServices { + return conf.get(MASTER_HOSTNAME_KEY); + } + ++ @Override ++ protected int getUseThisPortInstead(Configuration conf) { ++ int port = conf.getInt(MASTER_PORT, 0); ++ return port != 0 ? port : this.rpcServices.getSocketAddress().getPort(); ++ } ++ ++ @Override ++ protected int getUseThisInfoPortInstead(Configuration conf) { ++ int port = conf.getInt(MASTER_BOUND_INFO_PORT, 0); ++ return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; ++ } ++ + private void registerConfigurationObservers() { + configurationManager.registerObserver(this.rpcServices); + configurationManager.registerObserver(this); +@@ -597,8 +611,8 @@ public class HMaster extends HRegionServer implements MasterServices { + registerConfigurationObservers(); + Threads.setDaemonThreadRunning(new Thread(() -> TraceUtil.trace(() -> { + try { +- int infoPort = putUpJettyServer(); +- startActiveMasterManager(infoPort); ++ putUpJettyServer(); ++ startActiveMasterManager(useThisInfoPortInstead); + } catch (Throwable t) { + // Make sure we log the exception. + String error = "Failed to become Active Master"; +@@ -3006,7 +3020,7 @@ public class HMaster extends HRegionServer implements MasterServices { + } + case MASTER_INFO_PORT: { + if (infoServer != null) { +- builder.setMasterInfoPort(infoServer.getPort()); ++ builder.setMasterInfoPort(useThisInfoPortInstead); + } + break; + } +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +index 351b4fef19..68f56ab796 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +@@ -24,6 +24,9 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_WAL_MAX_SPL + import static org.apache.hadoop.hbase.HConstants.DEFAULT_SLOW_LOG_SYS_TABLE_CHORE_DURATION; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_PORT; ++import static org.apache.hadoop.hbase.HConstants.RPC_CLIENT_BIND_ADDRESS; + import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_DEFAULT; + import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_KEY; + import static org.apache.hadoop.hbase.namequeues.NamedQueueServiceChore.NAMED_QUEUE_CHORE_DURATION_DEFAULT; +@@ -505,6 +508,10 @@ public class HRegionServer extends Thread + */ + protected String useThisHostnameInstead; + ++ protected int useThisPortInstead; ++ ++ protected int useThisInfoPortInstead; ++ + /** + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link HRegionServer#UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY} instead. +@@ -669,6 +676,8 @@ public class HRegionServer extends Thread + this.namedQueueRecorder = NamedQueueRecorder.getInstance(this.conf); + rpcServices = createRpcServices(); + useThisHostnameInstead = getUseThisHostnameInstead(conf); ++ useThisPortInstead = getUseThisPortInstead(conf); ++ useThisInfoPortInstead = getUseThisInfoPortInstead(conf); + + // if use-ip is enabled, we will use ip to expose Master/RS service for client, + // see HBASE-27304 for details. +@@ -678,7 +687,7 @@ public class HRegionServer extends Thread + useIp ? rpcServices.isa.getAddress().getHostAddress() : rpcServices.isa.getHostName(); + String hostName = + StringUtils.isBlank(useThisHostnameInstead) ? isaHostName : useThisHostnameInstead; +- serverName = ServerName.valueOf(hostName, this.rpcServices.isa.getPort(), this.startcode); ++ serverName = ServerName.valueOf(hostName, useThisPortInstead, this.startcode); + + rpcControllerFactory = RpcControllerFactory.instantiate(this.conf); + rpcRetryingCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf, +@@ -715,7 +724,7 @@ public class HRegionServer extends Thread + + // Some unit tests don't need a cluster, so no zookeeper at all + // Open connection to zookeeper and set primary watcher +- zooKeeper = new ZKWatcher(conf, getProcessName() + ":" + rpcServices.isa.getPort(), this, ++ zooKeeper = new ZKWatcher(conf, getProcessName() + ":" + useThisPortInstead, this, + canCreateBaseZNode()); + // If no master in cluster, skip trying to track one or look for a cluster status. + if (!this.masterless) { +@@ -776,6 +785,16 @@ public class HRegionServer extends Thread + } + } + ++ protected int getUseThisPortInstead(Configuration conf) { ++ int port = conf.getInt(REGIONSERVER_PORT, 0); ++ return port != 0 ? port : this.rpcServices.isa.getPort(); ++ } ++ ++ protected int getUseThisInfoPortInstead(Configuration conf) { ++ int port = conf.getInt(REGIONSERVER_BOUND_INFO_PORT, 0); ++ return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; ++ } ++ + private void setupSignalHandlers() { + if (!SystemUtils.IS_OS_WINDOWS) { + HBasePlatformDependent.handle("HUP", (number, name) -> { +@@ -958,7 +977,7 @@ public class HRegionServer extends Thread + } + // Setup RPC client for master communication + this.rpcClient = RpcClientFactory.createClient(conf, clusterId, +- new InetSocketAddress(this.rpcServices.isa.getAddress(), 0), ++ getInetSocketAddress(this.conf), + clusterConnection.getConnectionMetrics(), Collections.emptyMap()); + span.setStatus(StatusCode.OK); + } catch (Throwable t) { +@@ -972,6 +991,11 @@ public class HRegionServer extends Thread + } + } + ++ private InetSocketAddress getInetSocketAddress(Configuration conf) { ++ return conf.getBoolean(RPC_CLIENT_BIND_ADDRESS, true) ? ++ new InetSocketAddress(this.rpcServices.isa.getAddress(), 0) : new InetSocketAddress(0); ++ } ++ + /** + * Bring up connection to zk ensemble and then wait until a master for this cluster and then after + * that, wait until cluster 'up' flag has been set. This is the order in which master does things. +@@ -1528,11 +1552,7 @@ public class HRegionServer extends Thread + + serverLoad.setReportStartTime(reportStartTime); + serverLoad.setReportEndTime(reportEndTime); +- if (this.infoServer != null) { +- serverLoad.setInfoServerPort(this.infoServer.getPort()); +- } else { +- serverLoad.setInfoServerPort(-1); +- } ++ serverLoad.setInfoServerPort(useThisInfoPortInstead); + MetricsUserAggregateSource userSource = + metricsRegionServer.getMetricsUserAggregate().getSource(); + if (userSource != null) { +@@ -1688,7 +1708,7 @@ public class HRegionServer extends Thread + if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) { + String hostnameFromMasterPOV = e.getValue(); + this.serverName = ServerName.valueOf(hostnameFromMasterPOV, +- rpcServices.getSocketAddress().getPort(), this.startcode); ++ useThisPortInstead, this.startcode); + String expectedHostName = rpcServices.getSocketAddress().getHostName(); + // if Master use-ip is enabled, RegionServer use-ip will be enabled by default even if it + // is set to disable. so we will use the ip of the RegionServer to compare with the +@@ -1814,7 +1834,7 @@ public class HRegionServer extends Thread + + private void createMyEphemeralNode() throws KeeperException { + RegionServerInfo.Builder rsInfo = RegionServerInfo.newBuilder(); +- rsInfo.setInfoPort(infoServer != null ? infoServer.getPort() : -1); ++ rsInfo.setInfoPort(infoServer != null ? useThisInfoPortInstead : -1); + rsInfo.setVersionInfo(ProtobufUtil.getVersionInfo()); + byte[] data = ProtobufUtil.prependPBMagic(rsInfo.build().toByteArray()); + ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper, getMyEphemeralNodePath(), data); +@@ -2481,7 +2501,7 @@ public class HRegionServer extends Thread + LOG.info("Retry starting http info server with port: " + port); + } + } +- port = this.infoServer.getPort(); ++ port = useThisInfoPortInstead; + conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port); + int masterInfoPort = + conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); +@@ -3075,12 +3095,11 @@ public class HRegionServer extends Thread + LOG.info("reportForDuty to master=" + masterServerName + " with isa=" + rpcServices.isa + + ", startcode=" + this.startcode); + long now = EnvironmentEdgeManager.currentTime(); +- int port = rpcServices.isa.getPort(); + RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); + if (!StringUtils.isBlank(useThisHostnameInstead)) { + request.setUseThisHostnameInstead(useThisHostnameInstead); + } +- request.setPort(port); ++ request.setPort(useThisPortInstead); + request.setServerStartCode(this.startcode); + request.setServerCurrentTime(now); + result = rss.regionServerStartup(null, request.build()); +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +index b77fcf338a..a86cd273ff 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +@@ -280,6 +280,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescr + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor; ++import static org.apache.hadoop.hbase.HConstants.MASTER_IPC_ADDRESS; ++import static org.apache.hadoop.hbase.HConstants.MASTER_IPC_PORT; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_IPC_ADDRESS; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_IPC_PORT; + + /** + * Implements the regionserver RPC services. +@@ -1270,14 +1274,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler, AdminService.Blockin + int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); + // Creation of a HSA will force a resolve. + initialIsa = new InetSocketAddress(hostname, port); +- bindAddress = new InetSocketAddress(conf.get("hbase.master.ipc.address", hostname), port); ++ bindAddress = new InetSocketAddress(conf.get(MASTER_IPC_ADDRESS, hostname), conf.getInt(MASTER_IPC_PORT, port)); + } else { + String hostname = DNS.getHostname(conf, DNS.ServerType.REGIONSERVER); + int port = conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT); + // Creation of a HSA will force a resolve. + initialIsa = new InetSocketAddress(hostname, port); + bindAddress = +- new InetSocketAddress(conf.get("hbase.regionserver.ipc.address", hostname), port); ++ new InetSocketAddress(conf.get(REGIONSERVER_IPC_ADDRESS, hostname), conf.getInt(REGIONSERVER_IPC_PORT, port)); + } + if (initialIsa.getAddress() == null) { + throw new IllegalArgumentException("Failed resolve of " + initialIsa); diff --git a/hbase/hbase/stackable/patches/2.6.2/0006-Update-property-usage-for-bound-ports.patch b/hbase/hbase/stackable/patches/2.6.2/0006-Update-property-usage-for-bound-ports.patch new file mode 100644 index 000000000..9a5cfae42 --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.2/0006-Update-property-usage-for-bound-ports.patch @@ -0,0 +1,156 @@ +From a36c936d8132bad255bb2be40e4b1dde2a44f478 Mon Sep 17 00:00:00 2001 +From: Andrew Kenworthy +Date: Thu, 26 Jun 2025 16:58:47 +0200 +Subject: Update property usage for bound ports + +--- + .../org/apache/hadoop/hbase/HConstants.java | 4 ++-- + .../hadoop/hbase/LocalHBaseCluster.java | 12 +++++------ + .../apache/hadoop/hbase/master/HMaster.java | 6 +++--- + .../hbase/regionserver/HRegionServer.java | 21 +++++++++++++------ + 4 files changed, 26 insertions(+), 17 deletions(-) + +diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +index 4d892755d2..3f852e7acc 100644 +--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java ++++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +@@ -212,7 +212,7 @@ public final class HConstants { + /** Configuration key for advertised master web API port */ + public static final String MASTER_INFO_PORT = "hbase.master.info.port"; + +- /** Configuration key for bound master web API port. (Defaults to MASTER_INFO_PORT.) */ ++ /** Configuration key for bound master web API port */ + public static final String MASTER_BOUND_INFO_PORT = "hbase.master.bound.info.port"; + + /** Configuration key for the list of master host:ports **/ +@@ -340,7 +340,7 @@ public final class HConstants { + /** Configuration key for advertised region server web API port */ + public static final String REGIONSERVER_INFO_PORT = "hbase.regionserver.info.port"; + +- /** Configuration key for bound region server web API port. (Defaults to REGIONSERVER_INFO_PORT.) */ ++ /** Configuration key for bound region server web API port */ + public static final String REGIONSERVER_BOUND_INFO_PORT = "hbase.regionserver.bound.info.port"; + + /** A flag that enables automatic selection of regionserver info port */ +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +index 816ef997cb..2114725986 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +@@ -144,20 +144,20 @@ public class LocalHBaseCluster { + // treat info ports special; expressly don't change '-1' (keep off) + // in case we make that the default behavior. + if ( +- conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1 +- && conf.getInt(HConstants.REGIONSERVER_INFO_PORT, ++ conf.getInt(HConstants.REGIONSERVER_BOUND_INFO_PORT, 0) != -1 ++ && conf.getInt(HConstants.REGIONSERVER_BOUND_INFO_PORT, + HConstants.DEFAULT_REGIONSERVER_INFOPORT) == HConstants.DEFAULT_REGIONSERVER_INFOPORT + ) { + LOG.debug("Setting RS InfoServer Port to random."); +- conf.set(HConstants.REGIONSERVER_INFO_PORT, "0"); ++ conf.set(HConstants.REGIONSERVER_BOUND_INFO_PORT, "0"); + } + if ( +- conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1 +- && conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) ++ conf.getInt(HConstants.MASTER_BOUND_INFO_PORT, 0) != -1 ++ && conf.getInt(HConstants.MASTER_BOUND_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) + == HConstants.DEFAULT_MASTER_INFOPORT + ) { + LOG.debug("Setting Master InfoServer Port to random."); +- conf.set(HConstants.MASTER_INFO_PORT, "0"); ++ conf.set(HConstants.MASTER_BOUND_INFO_PORT, "0"); + } + } + +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +index 313124d1d0..00e01c116e 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +@@ -17,7 +17,7 @@ + */ + package org.apache.hadoop.hbase.master; + +-import static org.apache.hadoop.hbase.HConstants.MASTER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.MASTER_INFO_PORT; + import static org.apache.hadoop.hbase.HConstants.MASTER_PORT; + import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; +@@ -580,7 +580,7 @@ public class HMaster extends HRegionServer implements MasterServices { + + @Override + protected int getUseThisInfoPortInstead(Configuration conf) { +- int port = conf.getInt(MASTER_BOUND_INFO_PORT, 0); ++ int port = conf.getInt(MASTER_INFO_PORT, 0); + return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; + } + +@@ -3158,7 +3158,7 @@ public class HMaster extends HRegionServer implements MasterServices { + public int getRegionServerInfoPort(final ServerName sn) { + int port = this.serverManager.getInfoPort(sn); + return port == 0 +- ? conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) ++ ? conf.getInt(HConstants.REGIONSERVER_BOUND_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) + : port; + } + +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +index 68f56ab796..b610d11651 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +@@ -25,6 +25,7 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_SLOW_LOG_SYS_TABLE_CHOR + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; + import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_INFO_PORT; + import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_PORT; + import static org.apache.hadoop.hbase.HConstants.RPC_CLIENT_BIND_ADDRESS; + import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_DEFAULT; +@@ -791,7 +792,7 @@ public class HRegionServer extends Thread + } + + protected int getUseThisInfoPortInstead(Configuration conf) { +- int port = conf.getInt(REGIONSERVER_BOUND_INFO_PORT, 0); ++ int port = conf.getInt(REGIONSERVER_INFO_PORT, 0); + return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; + } + +@@ -2459,12 +2460,14 @@ public class HRegionServer extends Thread + */ + private void putUpWebUI() throws IOException { + int port = +- this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT); ++ this.conf.getInt(REGIONSERVER_BOUND_INFO_PORT, ++ this.conf.getInt(REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT)); + String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0"); + + boolean isMaster = false; + if (this instanceof HMaster) { +- port = conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); ++ port = conf.getInt(HConstants.MASTER_BOUND_INFO_PORT, ++ this.conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT)); + addr = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0"); + isMaster = true; + } +@@ -2501,12 +2504,18 @@ public class HRegionServer extends Thread + LOG.info("Retry starting http info server with port: " + port); + } + } +- port = useThisInfoPortInstead; +- conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port); ++ ++ // update bound ports ++ port = this.infoServer.getPort(); ++ conf.setInt(REGIONSERVER_BOUND_INFO_PORT, port); ++ conf.setInt(HConstants.MASTER_BOUND_INFO_PORT, port); ++ ++ // set advertised ports ++ conf.setInt(REGIONSERVER_INFO_PORT, useThisInfoPortInstead); + int masterInfoPort = + conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); + conf.setInt("hbase.master.info.port.orig", masterInfoPort); +- conf.setInt(HConstants.MASTER_INFO_PORT, port); ++ conf.setInt(HConstants.MASTER_INFO_PORT, useThisInfoPortInstead); + } + + /* diff --git a/hbase/hbase/versions.py b/hbase/hbase/versions.py index 136179d58..205cedacb 100644 --- a/hbase/hbase/versions.py +++ b/hbase/hbase/versions.py @@ -3,7 +3,7 @@ # hbase-thirdparty is used to build the hbase-operator-tools and should be set to the version defined in the POM of HBase. { "product": "2.6.1", - "hadoop": "3.3.6", + "hadoop/hadoop": "3.3.6", "java-base": "11", "java-devel": "11", "async_profiler": "2.9", @@ -11,7 +11,7 @@ }, { "product": "2.6.2", - "hadoop": "3.4.1", + "hadoop/hadoop": "3.4.1", "java-base": "11", "java-devel": "11", "async_profiler": "2.9", diff --git a/hbase/phoenix/Dockerfile b/hbase/phoenix/Dockerfile index faff4faa6..9205a76c4 100644 --- a/hbase/phoenix/Dockerfile +++ b/hbase/phoenix/Dockerfile @@ -1,4 +1,4 @@ -FROM stackable/image/hadoop AS hadoop-builder +FROM stackable/image/hadoop/hadoop AS hadoop-builder FROM stackable/image/hbase/hbase AS hbase-builder @@ -8,8 +8,12 @@ ARG PRODUCT ARG RELEASE ARG ASYNC_PROFILER ARG HBASE_HBASE +# Reassign the arg to `HBASE_VERSION` for better readability. +ENV HBASE_VERSION=${HBASE_HBASE} ARG HBASE_PROFILE -ARG HADOOP +ARG HADOOP_HADOOP +# Reassign the arg to `HADOOP_VERSION` for better readability. +ENV HADOOP_VERSION=${HADOOP_HADOOP} ARG STACKABLE_USER_UID ARG PHOENIX_VERSION @@ -45,9 +49,9 @@ tar -czf /stackable/phoenix-${PRODUCT}-stackable${RELEASE}-src.tar.gz . mvn \ --batch-mode \ --no-transfer-progress \ - -Dhbase.version=${HBASE_HBASE}-stackable${RELEASE} \ + -Dhbase.version=${HBASE_VERSION}-stackable${RELEASE} \ -Dhbase.profile=${HBASE_PROFILE} \ - -Dhadoop.version=${HADOOP}-stackable${RELEASE} \ + -Dhadoop.version=${HADOOP_VERSION}-stackable${RELEASE} \ -DskipTests \ -Dcheckstyle.skip=true \ clean \ diff --git a/hbase/phoenix/versions.py b/hbase/phoenix/versions.py index e66c11f00..ed0e304e2 100644 --- a/hbase/phoenix/versions.py +++ b/hbase/phoenix/versions.py @@ -3,7 +3,7 @@ "product": "5.2.1-hbase2.6.1", "phoenix_version": "5.2.1", "hbase/hbase": "2.6.1", - "hadoop": "3.3.6", + "hadoop/hadoop": "3.3.6", "java-devel": "11", "hbase_profile": "2.6", "delete_caches": "true", @@ -12,7 +12,7 @@ "product": "5.2.1-hbase2.6.2", "phoenix_version": "5.2.1", "hbase/hbase": "2.6.2", - "hadoop": "3.4.1", + "hadoop/hadoop": "3.4.1", "java-devel": "11", "hbase_profile": "2.6", "delete_caches": "true", diff --git a/hbase/versions.py b/hbase/versions.py index c2f9fdfbf..3ea6dcdbf 100644 --- a/hbase/versions.py +++ b/hbase/versions.py @@ -7,7 +7,7 @@ "hbase/hbase-operator-tools": "1.3.0-fd5a5fb-hbase2.6.1", "hbase/phoenix": "5.2.1-hbase2.6.1", "hbase/hbase-opa-authorizer": "0.1.0", # only for HBase 2.6.1 - "hadoop": "3.3.6", + "hadoop/hadoop": "3.3.6", "java-base": "11", "java-devel": "11", "hbase_profile": "2.6", @@ -19,7 +19,7 @@ "hbase/hbase-operator-tools": "1.3.0-fd5a5fb-hbase2.6.2", "hbase/phoenix": "5.2.1-hbase2.6.2", "hbase/hbase-opa-authorizer": "0.1.0", # only for HBase 2.6.1 - "hadoop": "3.4.1", + "hadoop/hadoop": "3.4.1", "java-base": "11", "java-devel": "11", "hbase_profile": "2.6", diff --git a/hello-world/Dockerfile b/hello-world/Dockerfile deleted file mode 100644 index f664612ee..000000000 --- a/hello-world/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -# syntax=docker/dockerfile:1.16.0@sha256:e2dd261f92e4b763d789984f6eab84be66ab4f5f08052316d8eb8f173593acf7 -# check=error=true - -FROM stackable/image/java-base - -ARG PRODUCT -ARG RELEASE -ARG STACKABLE_USER_UID - -LABEL name="Hello World" \ - maintainer="info@stackable.tech" \ - vendor="Stackable GmbH" \ - version="${PRODUCT}" \ - release="${RELEASE}" \ - summary="The Stackable image for the Stackable Hello World Operator" \ - description="This image is deployed by the Stackable Hello World Operator." - -RUN < /dev/null; then - echo "unzip not found. unzip is required to run this script." - exit 1 -fi - -if ! command -v zgrep &> /dev/null; then - echo "zgrep not found. zgrep is required to run this script." - exit 1 -fi - -if ! command -v zip &> /dev/null; then - echo "zip not found. zip is required to run this script." - exit 1 -fi - -for targetdir in ${1} -do - echo "Removing JNDI from jar files in $targetdir" - delete_jndi_from_jar_files "$targetdir" -done - -for targetdir in ${1} -do - echo "Removing JNDI from tar.gz files in $targetdir" - # shellcheck disable=SC2044 # the local function is not available when using find with -exec - for targzfile in $(find "$targetdir" -name '*.tar.gz') ; do - delete_jndi_from_targz_file "$targzfile" - done -done - -echo "Run successful" diff --git a/shared/log4shell_1.6.1-log4shell_Linux_aarch64 b/shared/log4shell_1.6.1-log4shell_Linux_aarch64 deleted file mode 100755 index 2726de411..000000000 Binary files a/shared/log4shell_1.6.1-log4shell_Linux_aarch64 and /dev/null differ diff --git a/shared/log4shell_1.6.1-log4shell_Linux_x86_64 b/shared/log4shell_1.6.1-log4shell_Linux_x86_64 deleted file mode 100755 index c5897f357..000000000 Binary files a/shared/log4shell_1.6.1-log4shell_Linux_x86_64 and /dev/null differ diff --git a/shared/log4shell_scanner b/shared/log4shell_scanner deleted file mode 100755 index 1fa82e653..000000000 --- a/shared/log4shell_scanner +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -ARCH="$(uname --machine)" -/bin/log4shell_scanner_"$ARCH" "$@" diff --git a/spark-connect-client/versions.py b/spark-connect-client/versions.py index 2311806bc..98188f344 100644 --- a/spark-connect-client/versions.py +++ b/spark-connect-client/versions.py @@ -1,7 +1,13 @@ versions = [ { "product": "3.5.6", - "spark-k8s": "3.5.5", + "spark-k8s": "3.5.6", + "java-base": "17", + "python": "3.11", + }, + { + "product": "4.0.0", + "spark-k8s": "4.0.0", "java-base": "17", "python": "3.11", }, diff --git a/spark-k8s/Dockerfile b/spark-k8s/Dockerfile index 9e198757d..87cd5217a 100644 --- a/spark-k8s/Dockerfile +++ b/spark-k8s/Dockerfile @@ -2,7 +2,7 @@ # check=error=true # hadoop-builder: Provides Hadoop libraries -FROM stackable/image/hadoop AS hadoop-builder +FROM stackable/image/hadoop/hadoop AS hadoop-builder # hbase-builder: Provides HBase libraries FROM stackable/image/hbase AS hbase-builder @@ -37,7 +37,9 @@ FROM stackable/image/java-devel AS hbase-connectors-builder ARG PRODUCT ARG RELEASE -ARG HADOOP +ARG HADOOP_HADOOP +# Reassign the arg to `HADOOP_VERSION` for better readability. +ENV HADOOP_VERSION=${HADOOP_HADOOP} ARG HBASE ARG HBASE_CONNECTOR ARG STACKABLE_USER_UID @@ -59,6 +61,17 @@ COPY --chown=${STACKABLE_USER_UID}:0 spark-k8s/hbase-connectors/stackable/patche COPY --chown=${STACKABLE_USER_UID}:0 spark-k8s/hbase-connectors/stackable/patches/${HBASE_CONNECTOR} /stackable/src/spark-k8s/hbase-connectors/stackable/patches/${HBASE_CONNECTOR} RUN <>> Build spark -# Compiling the tests takes a lot of time, so we skip them -# -Dmaven.test.skip=true skips both the compilation and execution of tests -# -DskipTests skips only the execution RUN <]' '{print $3}') mkdir -p dist/connect cd dist/connect - cp "/stackable/spark-${PRODUCT}-stackable${RELEASE}/connector/connect/server/target/spark-connect_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" . - cp "/stackable/spark-${PRODUCT}-stackable${RELEASE}/connector/connect/common/target/spark-connect-common_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" . - cp "/stackable/spark-${PRODUCT}-stackable${RELEASE}/connector/connect/client/jvm/target/spark-connect-client-jvm_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" . - - # The Spark operator expects a file named spark-connect_${SCALA_BINARY_VERSION}-${PRODUCT}.jar without the -stackable${RELEASE} suffix. + case "${PRODUCT}" in + 4*) + cp "/stackable/spark-${PRODUCT}-stackable${RELEASE}/sql/connect/server/target/spark-connect_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" . + cp "/stackable/spark-${PRODUCT}-stackable${RELEASE}/sql/connect/common/target/spark-connect-common_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" . + cp "/stackable/spark-${PRODUCT}-stackable${RELEASE}/sql/connect/client/jvm/target/spark-connect-client-jvm_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" . + ;; + *) + cp "/stackable/spark-${PRODUCT}-stackable${RELEASE}/connector/connect/server/target/spark-connect_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" . + cp "/stackable/spark-${PRODUCT}-stackable${RELEASE}/connector/connect/common/target/spark-connect-common_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" . + cp "/stackable/spark-${PRODUCT}-stackable${RELEASE}/connector/connect/client/jvm/target/spark-connect-client-jvm_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" . + ;; + esac + + # This link is needed by the operator and is kept for backwards compatibility. + # TODO: remove it at some time in the future. ln -s "spark-connect_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" "spark-connect_${SCALA_BINARY_VERSION}-${PRODUCT}.jar" + # Link to the spark-connect jar without the stackable suffix and scala version. + # This link supersedes the previous link. + ln -s "spark-connect_${SCALA_BINARY_VERSION}-${PRODUCT}-stackable${RELEASE}.jar" "spark-connect-${PRODUCT}.jar" EOF # <<< Build spark @@ -218,13 +259,13 @@ WORKDIR /stackable/spark-${PRODUCT}-stackable${RELEASE}/dist/jars # Copy modules required for s3a:// COPY --from=hadoop-builder --chown=${STACKABLE_USER_UID}:0 \ - /stackable/hadoop/share/hadoop/tools/lib/hadoop-aws-${HADOOP}-stackable${RELEASE}.jar \ + /stackable/hadoop/share/hadoop/tools/lib/hadoop-aws-${HADOOP_VERSION}-stackable${RELEASE}.jar \ /stackable/hadoop/share/hadoop/tools/lib/bundle-${AWS_JAVA_SDK_BUNDLE}.jar \ ./ # Copy modules required for abfs:// COPY --from=hadoop-builder --chown=${STACKABLE_USER_UID}:0 \ - /stackable/hadoop/share/hadoop/tools/lib/hadoop-azure-${HADOOP}-stackable${RELEASE}.jar \ + /stackable/hadoop/share/hadoop/tools/lib/hadoop-azure-${HADOOP_VERSION}-stackable${RELEASE}.jar \ /stackable/hadoop/share/hadoop/tools/lib/azure-storage-${AZURE_STORAGE}.jar \ /stackable/hadoop/share/hadoop/tools/lib/azure-keyvault-core-${AZURE_KEYVAULT_CORE}.jar \ ./ diff --git a/spark-k8s/stackable/patches/4.0.0/0001-Update-CycloneDX-plugin.patch b/spark-k8s/stackable/patches/4.0.0/0001-Update-CycloneDX-plugin.patch new file mode 100644 index 000000000..db7b12530 --- /dev/null +++ b/spark-k8s/stackable/patches/4.0.0/0001-Update-CycloneDX-plugin.patch @@ -0,0 +1,38 @@ +From 2da5608928018dd017c91b904eb8f84a4f6df78a Mon Sep 17 00:00:00 2001 +From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> +Date: Fri, 4 Jul 2025 15:54:55 +0200 +Subject: Update CycloneDX plugin + +--- + dev/make-distribution.sh | 1 - + pom.xml | 5 +++++ + 2 files changed, 5 insertions(+), 1 deletion(-) + +diff --git a/dev/make-distribution.sh b/dev/make-distribution.sh +index 16607e45ae..44e345a245 100755 +--- a/dev/make-distribution.sh ++++ b/dev/make-distribution.sh +@@ -176,7 +176,6 @@ BUILD_COMMAND=("$MVN" clean package \ + -Dmaven.javadoc.skip=true \ + -Dmaven.scaladoc.skip=true \ + -Dmaven.source.skip \ +- -Dcyclonedx.skip=true \ + $@) + + # Actually build the jar +diff --git a/pom.xml b/pom.xml +index 443d46a430..632920f100 100644 +--- a/pom.xml ++++ b/pom.xml +@@ -3327,6 +3327,11 @@ + org.cyclonedx + cyclonedx-maven-plugin + 2.8.0 ++ ++ application ++ 1.5 ++ false ++ + + + package diff --git a/spark-k8s/stackable/patches/4.0.0/patchable.toml b/spark-k8s/stackable/patches/4.0.0/patchable.toml new file mode 100644 index 000000000..24d7204e9 --- /dev/null +++ b/spark-k8s/stackable/patches/4.0.0/patchable.toml @@ -0,0 +1,2 @@ +base = "fa33ea000a0bda9e5a3fa1af98e8e85b8cc5e4d4" +mirror = "https://github.com/stackabletech/spark.git" diff --git a/spark-k8s/versions.py b/spark-k8s/versions.py index 105429502..4150299fe 100644 --- a/spark-k8s/versions.py +++ b/spark-k8s/versions.py @@ -4,7 +4,7 @@ "java-base": "17", "java-devel": "17", "python": "3.11", - "hadoop": "3.4.1", # Current Stackable LTS version. Source of the AWS and Azure artifacts to Spark's classpath. + "hadoop/hadoop": "3.4.1", # Current Stackable LTS version. Source of the AWS and Azure artifacts to Spark's classpath. "hbase": "2.6.2", # Current Stackable LTS version. Used to build the HBase connector. "aws_java_sdk_bundle": "2.24.6", # https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-aws/3.4.1 "azure_storage": "7.0.1", # https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-azure/3.4.1 @@ -22,7 +22,7 @@ "java-base": "17", "java-devel": "17", "python": "3.11", - "hadoop": "3.4.1", # Current Stackable LTS version. Source of the AWS and Azure artifacts to Spark's classpath. + "hadoop/hadoop": "3.4.1", # Current Stackable LTS version. Source of the AWS and Azure artifacts to Spark's classpath. "hbase": "2.6.2", # Current Stackable LTS version. Used to build the HBase connector. "aws_java_sdk_bundle": "2.24.6", # https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-aws/3.4.1 "azure_storage": "7.0.1", # https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-azure/3.4.1 @@ -35,4 +35,22 @@ "tini": "0.19.0", "hbase_connector": "1.0.1", }, + { + "product": "4.0.0", + "java-base": "17", + "java-devel": "17", + "python": "3.11", + "hadoop": "3.4.1", + "hbase": "2.6.2", + "aws_java_sdk_bundle": "2.24.6", + "azure_storage": "7.0.1", # https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-azure/3.3.4 + "azure_keyvault_core": "1.0.0", # https://mvnrepository.com/artifact/com.microsoft.azure/azure-storage/7.0.1 + "jackson_dataformat_xml": "2.15.2", # https://mvnrepository.com/artifact/org.apache.spark/spark-core_2.13/3.5.1 + "stax2_api": "4.2.1", # https://mvnrepository.com/artifact/com.fasterxml.jackson.dataformat/jackson-dataformat-xml/2.15.2 + "woodstox_core": "6.5.1", # https://mvnrepository.com/artifact/com.fasterxml.jackson.dataformat/jackson-dataformat-xml/2.15.2 + "vector": "0.47.0", + "jmx_exporter": "1.3.0", + "tini": "0.19.0", + "hbase_connector": "1.0.1", + }, ] diff --git a/stackable-base/Dockerfile b/stackable-base/Dockerfile index 47d65e467..7ad3e5c65 100644 --- a/stackable-base/Dockerfile +++ b/stackable-base/Dockerfile @@ -36,7 +36,7 @@ EOF # Find the latest version at https://catalog.redhat.com/software/containers/ubi9/ubi-minimal/615bd9b4075b022acc111bf5?container-tabs=gti # IMPORTANT: Make sure to use the "Manifest List Digest" that references the images for multiple architectures # rather than just the "Image Digest" that references the image for the selected architecture. -FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:f172b3082a3d1bbe789a1057f03883c1113243564f01cd3020e27548b911d3f8 AS final +FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:383329bf9c4f968e87e85d30ba3a5cb988a3bbde28b8e4932dcd3a025fd9c98c AS final # intentionally unused ARG PRODUCT diff --git a/stackable-devel/Dockerfile b/stackable-devel/Dockerfile index 55b2204e4..962c6b312 100644 --- a/stackable-devel/Dockerfile +++ b/stackable-devel/Dockerfile @@ -11,7 +11,7 @@ # Find the latest version at https://catalog.redhat.com/software/containers/ubi9/ubi-minimal/615bd9b4075b022acc111bf5?container-tabs=gti # IMPORTANT: Make sure to use the "Manifest List Digest" that references the images for multiple architectures # rather than just the "Image Digest" that references the image for the selected architecture. -FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:f172b3082a3d1bbe789a1057f03883c1113243564f01cd3020e27548b911d3f8 +FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:383329bf9c4f968e87e85d30ba3a5cb988a3bbde28b8e4932dcd3a025fd9c98c # intentionally unused ARG PRODUCT @@ -43,7 +43,7 @@ COPY stackable-base/stackable/curlrc /root/.curlrc # This SHOULD be kept in sync with operator-templating and other tools to reduce build times # Find the latest version here: https://doc.rust-lang.org/stable/releases.html # renovate: datasource=github-releases packageName=rust-lang/rust -ENV RUST_DEFAULT_TOOLCHAIN_VERSION=1.84.1 +ENV RUST_DEFAULT_TOOLCHAIN_VERSION=1.87.0 # Find the latest version here: https://crates.io/crates/cargo-cyclonedx # renovate: datasource=crate packageName=cargo-cyclonedx ENV CARGO_CYCLONEDX_CRATE_VERSION=0.5.7 diff --git a/testing-tools/Dockerfile b/testing-tools/Dockerfile index 21c0f4dde..4e2b1501f 100644 --- a/testing-tools/Dockerfile +++ b/testing-tools/Dockerfile @@ -5,11 +5,14 @@ # https://hub.docker.com/_/python/tags # In Docker Hub, open up the tag and look for Index Digest. Otherwise do: # docker pull python:3.12-slim-bullseye and see the digest that appears in the output. -FROM python:3.12-slim-bullseye@sha256:229b2897e6b5c630d83a721e42dd1c96e3ec996323bb822b076eb865e2c6c0b2 +FROM python:3.12-slim-bullseye@sha256:f6d639b794b394cbeb7a9327d5af9976f0e8d61353bcf41916984775c9bbed1a ARG PRODUCT ARG RELEASE ARG KEYCLOAK_VERSION +ARG STACKABLE_USER_UID +ARG STACKABLE_USER_GID +ARG STACKABLE_USER_NAME LABEL name="Stackable Testing Tools" \ maintainer="info@stackable.tech" \ @@ -25,49 +28,64 @@ SHELL ["/bin/bash", "-euo", "pipefail", "-c"] # This is needed so that krb5-user installs without prompting for a realm. ENV DEBIAN_FRONTEND=noninteractive -# krb5-user/libkrb5-dev are needed for Kerberos support. -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - build-essential \ - ca-certificates \ - curl \ - gzip \ - jq \ - krb5-user \ - kubernetes-client \ - libkrb5-dev \ - libssl-dev \ - libxml2-dev \ - libxslt1-dev \ - pkg-config \ - python3-certifi \ - python3-idna \ - python3-semver \ - python3-thrift \ - python3-toml \ - python3-urllib3 \ - tar \ - zip \ - unzip \ - # Java 11 seems like the best middle-ground for all tools - openjdk-11-jdk-headless && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* COPY testing-tools/python /stackable/python COPY testing-tools/licenses /licenses -ENV PATH=/stackable/keycloak/bin:$PATH -RUN curl --fail -L https://repo.stackable.tech/repository/packages/keycloak/keycloak-${KEYCLOAK_VERSION}.tar.gz | tar -xzC /stackable && \ - ln -s /stackable/keycloak-${KEYCLOAK_VERSION} /stackable/keycloak -RUN pip install --no-cache-dir --upgrade pip && \ - pip install --no-cache-dir -r /stackable/python/requirements.txt && \ - groupadd -r stackable --gid=1000 && \ - useradd -r -g stackable --uid=1000 stackable && \ - chown -R stackable:stackable /stackable +RUN <