Skip to content

Commit 51e89be

Browse files
committed
Merge branch 'main' into hbase/entrypoint
2 parents 566123e + 094be62 commit 51e89be

File tree

18 files changed

+433
-200
lines changed

18 files changed

+433
-200
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ All notable changes to this project will be documented in this file.
3131
- Enable [Docker build checks](https://docs.docker.com/build/checks/) ([#872]).
3232
- java: migrate to temurin jdk/jre ([#894]).
3333
- tools: bump kubectl to `1.31.1` and jq to `1.7.1` ([#896]).
34+
- Make username, user id, group id configurable, use numeric ids everywhere, change group of all files to 0 ([#849], [#890]).
3435

3536
### Removed
3637

@@ -66,6 +67,7 @@ All notable changes to this project will be documented in this file.
6667
[#822]: https://github.com/stackabletech/docker-images/pull/822
6768
[#846]: https://github.com/stackabletech/docker-images/pull/846
6869
[#848]: https://github.com/stackabletech/docker-images/pull/848
70+
[#849]: https://github.com/stackabletech/docker-images/pull/849
6971
[#851]: https://github.com/stackabletech/docker-images/pull/851
7072
[#852]: https://github.com/stackabletech/docker-images/pull/852
7173
[#853]: https://github.com/stackabletech/docker-images/pull/853
@@ -80,6 +82,7 @@ All notable changes to this project will be documented in this file.
8082
[#880]: https://github.com/stackabletech/docker-images/pull/880
8183
[#881]: https://github.com/stackabletech/docker-images/pull/881
8284
[#882]: https://github.com/stackabletech/docker-images/pull/882
85+
[#890]: https://github.com/stackabletech/docker-images/pull/890
8386
[#894]: https://github.com/stackabletech/docker-images/pull/894
8487
[#896]: https://github.com/stackabletech/docker-images/pull/896
8588

airflow/Dockerfile

Lines changed: 26 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ FROM stackable/image/statsd_exporter AS statsd_exporter-builder
1212
FROM stackable/image/vector AS airflow-build-image
1313

1414
ARG PRODUCT
15+
ARG STATSD_EXPORTER
1516
ARG PYTHON
1617
ARG TARGETARCH
1718

@@ -38,20 +39,37 @@ RUN microdnf update && \
3839
python${PYTHON}-pip \
3940
python${PYTHON}-wheel \
4041
# The airflow odbc provider can compile without the development files (headers and libraries) (see https://github.com/stackabletech/docker-images/pull/683)
41-
unixODBC && \
42+
unixODBC \
43+
# Needed to modify the SBOM
44+
jq && \
4245
microdnf clean all && \
4346
rm -rf /var/cache/yum
4447

45-
RUN python${PYTHON} -m venv --system-site-packages /stackable/app && \
46-
source /stackable/app/bin/activate && \
47-
pip install --no-cache-dir --upgrade pip && \
48-
pip install --no-cache-dir apache-airflow[${AIRFLOW_EXTRAS}]==${PRODUCT} --constraint /tmp/constraints.txt && \
49-
# Needed for pandas S3 integration to e.g. write and read csv and parquet files to/from S3
50-
pip install --no-cache-dir s3fs cyclonedx-bom && \
51-
cyclonedx-py environment --schema-version 1.5 --outfile /stackable/airflow-${PRODUCT}.cdx.json
48+
RUN <<EOF
49+
python${PYTHON} -m venv --system-site-packages /stackable/app
50+
51+
source /stackable/app/bin/activate
52+
53+
pip install --no-cache-dir --upgrade pip
54+
pip install --no-cache-dir apache-airflow[${AIRFLOW_EXTRAS}]==${PRODUCT} --constraint /tmp/constraints.txt
55+
# Needed for pandas S3 integration to e.g. write and read csv and parquet files to/from S3
56+
pip install --no-cache-dir s3fs==2024.9.0 cyclonedx-bom==5.0.0
57+
58+
# Create the SBOM for Airflow
59+
# Important: All `pip install` commands must be above this line, otherwise the SBOM will be incomplete
60+
cyclonedx-py environment --schema-version 1.5 --outfile /tmp/sbom.json
61+
62+
# Break circular dependencies by removing the apache-airflow dependency from the providers
63+
jq '.dependencies |= map(if .ref | test("^apache-airflow-providers-") then
64+
.dependsOn |= map(select(. != "apache-airflow=='${PRODUCT}'"))
65+
else
66+
.
67+
end)' /tmp/sbom.json > /stackable/app/airflow-${PRODUCT}.cdx.json
68+
EOF
5269

5370
WORKDIR /stackable
5471
COPY --from=statsd_exporter-builder /statsd_exporter/statsd_exporter /stackable/statsd_exporter
72+
COPY --from=statsd_exporter-builder /statsd_exporter/statsd_exporter-${STATSD_EXPORTER}.cdx.json /stackable/statsd_exporter-${STATSD_EXPORTER}.cdx.json
5573

5674
FROM stackable/image/vector AS airflow-main-image
5775

druid/Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,8 @@ ln -s /stackable/apache-druid-${PRODUCT} /stackable/druid
120120
# Force to overwrite the existing 'run-druid'
121121
ln -sf /stackable/bin/run-druid /stackable/druid/bin/run-druid
122122

123-
# All files and folders owned by root to support running as arbitrary users
124-
# This is best practice as all container users will belong to the root group (0)
123+
# All files and folders owned by root group to support running as arbitrary users.
124+
# This is best practice as all container users will belong to the root group (0).
125125
chown -R ${STACKABLE_USER_UID}:0 /stackable
126126
chmod -R g=u /stackable
127127
EOF

hadoop/Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -169,8 +169,8 @@ find . -name 'hadoop-*tests.jar' -type f -delete
169169
# It is so non-root users (as we are) can mount a FUSE device and let other users access it
170170
echo "user_allow_other" > /etc/fuse.conf
171171

172-
# All files and folders owned by root to support running as arbitrary users
173-
# This is best practice as all container users will belong to the root group (0)
172+
# All files and folders owned by root group to support running as arbitrary users.
173+
# This is best practice as all container users will belong to the root group (0).
174174
chown -R ${STACKABLE_USER_UID}:0 /stackable
175175
chmod -R g=u /stackable
176176
EOF

hbase/Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -355,8 +355,8 @@ ln --symbolic --logical --verbose "/stackable/hbase-${PRODUCT}" /stackable/hbase
355355
ln --symbolic --logical --verbose "/stackable/hbase-operator-tools-${HBASE_OPERATOR_TOOLS}" /stackable/hbase-operator-tools
356356
ln --symbolic --logical --verbose "/stackable/phoenix/phoenix-server-hbase-${HBASE_PROFILE}.jar" "/stackable/hbase/lib/phoenix-server-hbase-${HBASE_PROFILE}.jar"
357357

358-
# All files and folders owned by root to support running as arbitrary users
359-
# This is best practice as all container users will belong to the root group (0)
358+
# All files and folders owned by root group to support running as arbitrary users.
359+
# This is best practice as all container users will belong to the root group (0).
360360
chown -R ${STACKABLE_USER_UID}:0 /stackable
361361
chmod -R g=u /stackable
362362
EOF

hello-world/Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ rm -rf /var/cache/yum
2222

2323
curl "https://repo.stackable.tech/repository/packages/hello-world/hello-world-${PRODUCT}.jar" -o /stackable/hello-world.jar
2424

25-
# All files and folders owned by root to support running as arbitrary users
26-
# This is best practice as all container users will belong to the root group (0)
25+
# All files and folders owned by root group to support running as arbitrary users.
26+
# This is best practice as all container users will belong to the root group (0).
2727
chown -R ${STACKABLE_USER_UID}:0 /stackable
2828
chmod -R g=u /stackable
2929
EOF

hive/Dockerfile

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -103,40 +103,47 @@ LABEL io.openshift.tags="ubi9,stackable,hive,sdp"
103103
LABEL io.k8s.description="${DESCRIPTION}"
104104
LABEL io.k8s.display-name="${NAME}"
105105

106-
RUN <<EOF
107-
microdnf update
108-
microdnf clean all
109-
rpm -qa --qf "%{NAME}-%{VERSION}-%{RELEASE}\n" | sort > /stackable/package_manifest.txt
110-
rm -rf /var/cache/yum
111-
EOF
112-
113-
USER ${STACKABLE_USER_UID}
114106
WORKDIR /stackable
115107

116108
COPY --chown=${STACKABLE_USER_UID}:0 --from=hive-builder /stackable/apache-hive-metastore-${PRODUCT}-bin /stackable/apache-hive-metastore-${PRODUCT}-bin
117-
RUN ln -s /stackable/apache-hive-metastore-${PRODUCT}-bin /stackable/hive-metastore
118109

119110
# It is useful to see which version of Hadoop is used at a glance
120111
# Therefore the use of the full name here
121112
# TODO: Do we really need all of Hadoop in here?
122113
COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-builder /stackable/hadoop /stackable/hadoop-${HADOOP}
123-
RUN ln -s /stackable/hadoop-${HADOOP} /stackable/hadoop
114+
115+
RUN <<EOF
116+
microdnf update
117+
microdnf clean all
118+
rpm -qa --qf "%{NAME}-%{VERSION}-%{RELEASE}\n" | sort > /stackable/package_manifest.txt
119+
rm -rf /var/cache/yum
120+
121+
ln -s /stackable/apache-hive-metastore-${PRODUCT}-bin /stackable/hive-metastore
122+
ln -s /stackable/hadoop-${HADOOP} /stackable/hadoop
124123

125124
# The next two sections for S3 and Azure use hardcoded version numbers on purpose instead of wildcards
126125
# This way the build will fail should one of the files not be available anymore in a later Hadoop version!
127126

128127
# Add S3 Support for Hive (support for s3a://)
129-
RUN cp /stackable/hadoop/share/hadoop/tools/lib/hadoop-aws-${HADOOP}.jar /stackable/hive-metastore/lib/
130-
RUN cp /stackable/hadoop/share/hadoop/tools/lib/aws-java-sdk-bundle-${AWS_JAVA_SDK_BUNDLE}.jar /stackable/hive-metastore/lib/
128+
cp /stackable/hadoop/share/hadoop/tools/lib/hadoop-aws-${HADOOP}.jar /stackable/hive-metastore/lib/
129+
cp /stackable/hadoop/share/hadoop/tools/lib/aws-java-sdk-bundle-${AWS_JAVA_SDK_BUNDLE}.jar /stackable/hive-metastore/lib/
131130

132131
# Add Azure ABFS support (support for abfs://)
133-
RUN cp /stackable/hadoop/share/hadoop/tools/lib/hadoop-azure-${HADOOP}.jar /stackable/hive-metastore/lib/
134-
RUN cp /stackable/hadoop/share/hadoop/tools/lib/azure-storage-${AZURE_STORAGE}.jar /stackable/hive-metastore/lib/
135-
RUN cp /stackable/hadoop/share/hadoop/tools/lib/azure-keyvault-core-${AZURE_KEYVAULT_CORE}.jar /stackable/hive-metastore/lib/
132+
cp /stackable/hadoop/share/hadoop/tools/lib/hadoop-azure-${HADOOP}.jar /stackable/hive-metastore/lib/
133+
cp /stackable/hadoop/share/hadoop/tools/lib/azure-storage-${AZURE_STORAGE}.jar /stackable/hive-metastore/lib/
134+
cp /stackable/hadoop/share/hadoop/tools/lib/azure-keyvault-core-${AZURE_KEYVAULT_CORE}.jar /stackable/hive-metastore/lib/
135+
136+
# All files and folders owned by root group to support running as arbitrary users.
137+
# This is best practice as all container users will belong to the root group (0).
138+
chown -R ${STACKABLE_USER_UID}:0 /stackable
139+
chmod -R g=u /stackable
140+
EOF
136141

137142
COPY --chown=${STACKABLE_USER_UID}:0 --from=hive-builder /stackable/jmx /stackable/jmx
138143
COPY hive/licenses /licenses
139144

145+
USER ${STACKABLE_USER_UID}
146+
140147
ENV HADOOP_HOME=/stackable/hadoop
141148
ENV HIVE_HOME=/stackable/hive-metastore
142149
ENV PATH="${PATH}":/stackable/hadoop/bin:/stackable/hive-metastore/bin

kafka-testing-tools/Dockerfile

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ FROM stackable/image/stackable-base AS final
88
ARG PRODUCT
99
ARG KCAT
1010
ARG RELEASE
11+
ARG STACKABLE_USER_UID
1112

1213
LABEL name="Kafka Testing Tools" \
1314
maintainer="[email protected]" \
@@ -29,11 +30,10 @@ RUN microdnf install \
2930
&& rm -rf /var/cache/yum
3031

3132
# Store kcat version with binary name and add softlink
32-
COPY --chown=stackable:stackable --from=kcat /stackable/kcat-${KCAT}/kcat /stackable/kcat-${KCAT}
33+
COPY --chown=${STACKABLE_USER_UID}:0 --from=kcat /stackable/kcat-${KCAT}/kcat /stackable/kcat-${KCAT}
3334
RUN ln -s /stackable/kcat-${KCAT} /stackable/kcat
34-
COPY --chown=stackable:stackable --from=kcat /licenses /licenses
35+
COPY --chown=${STACKABLE_USER_UID}:0 --from=kcat /licenses /licenses
3536

36-
37-
COPY --chown=stackable:stackable kafka-testing-tools/licenses /licenses
37+
COPY --chown=${STACKABLE_USER_UID}:0 kafka-testing-tools/licenses /licenses
3838

3939
ENTRYPOINT ["/stackable/kcat"]

kafka/Dockerfile

Lines changed: 52 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -9,53 +9,56 @@ ARG PRODUCT
99
ARG SCALA
1010
ARG OPA_AUTHORIZER
1111
ARG JMX_EXPORTER
12+
ARG STACKABLE_USER_UID
1213

13-
USER stackable
14+
RUN <<EOF
15+
microdnf update
16+
17+
# patch: Required for the apply-patches.sh script
18+
microdnf install \
19+
patch
20+
21+
microdnf clean all
22+
rm -rf /var/cache/yum
23+
EOF
24+
25+
USER ${STACKABLE_USER_UID}
1426
WORKDIR /stackable
1527

28+
COPY --chown=${STACKABLE_USER_UID}:0 kafka/stackable/patches/apply_patches.sh /stackable/kafka-${PRODUCT}-src/patches/apply_patches.sh
29+
COPY --chown=${STACKABLE_USER_UID}:0 kafka/stackable/patches/${PRODUCT} /stackable/kafka-${PRODUCT}-src/patches/${PRODUCT}
30+
1631
RUN curl "https://repo.stackable.tech/repository/packages/kafka/kafka-${PRODUCT}-src.tgz" | tar -xzC . && \
1732
cd kafka-${PRODUCT}-src && \
33+
./patches/apply_patches.sh ${PRODUCT} && \
1834
# TODO: Try to install gradle via package manager (if possible) instead of fetching it from the internet
1935
# We don't specify "-x test" to skip the tests, as we might bump some Kafka internal dependencies in the future and
2036
# it's a good idea to run the tests in this case.
2137
./gradlew clean releaseTarGz && \
38+
./gradlew cyclonedxBom && \
2239
tar -xf core/build/distributions/kafka_${SCALA}-${PRODUCT}.tgz -C /stackable && \
40+
cp build/reports/bom.json /stackable/kafka_${SCALA}-${PRODUCT}.cdx.json && \
2341
rm -rf /stackable/kafka_${SCALA}-${PRODUCT}/site-docs/ && \
2442
rm -rf /stackable/kafka-${PRODUCT}-src
2543

2644
# TODO (@NickLarsenNZ): Compile from source: https://github.com/StyraInc/opa-kafka-plugin
2745
RUN curl https://repo.stackable.tech/repository/packages/kafka-opa-authorizer/opa-authorizer-${OPA_AUTHORIZER}-all.jar \
2846
-o /stackable/kafka_${SCALA}-${PRODUCT}/libs/opa-authorizer-${OPA_AUTHORIZER}-all.jar
2947

30-
COPY --chown=stackable:stackable kafka/stackable/jmx/ /stackable/jmx/
48+
COPY --chown=${STACKABLE_USER_UID}:0 kafka/stackable/jmx/ /stackable/jmx/
3149
RUN curl https://repo.stackable.tech/repository/packages/jmx-exporter/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar \
3250
-o /stackable/jmx/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar && \
3351
chmod +x /stackable/jmx/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar && \
3452
ln -s /stackable/jmx/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar /stackable/jmx/jmx_prometheus_javaagent.jar
3553

36-
# For earlier versions this script removes the .class file that contains the
37-
# vulnerable code.
38-
# TODO: This can be restricted to target only versions which do not honor the environment
39-
# varible that has been set above but this has not currently been implemented
40-
COPY shared/log4shell.sh /bin
41-
RUN /bin/log4shell.sh /stackable/kafka_${SCALA}-${PRODUCT}
42-
43-
# Ensure no vulnerable files are left over
44-
# This will currently report vulnerable files being present, as it also alerts on
45-
# SocketNode.class, which we do not remove with our scripts.
46-
# Further investigation will be needed whether this should also be removed.
47-
COPY shared/log4shell_1.6.1-log4shell_Linux_x86_64 /bin/log4shell_scanner_x86_64
48-
COPY shared/log4shell_1.6.1-log4shell_Linux_aarch64 /bin/log4shell_scanner_aarch64
49-
COPY shared/log4shell_scanner /bin/log4shell_scanner
50-
RUN /bin/log4shell_scanner s /stackable/kafka_${SCALA}-${PRODUCT}
51-
# ===
5254

5355
FROM stackable/image/java-base AS final
5456

5557
ARG RELEASE
5658
ARG PRODUCT
5759
ARG SCALA
5860
ARG KCAT
61+
ARG STACKABLE_USER_UID
5962

6063
LABEL name="Apache Kafka" \
6164
maintainer="[email protected]" \
@@ -67,32 +70,39 @@ LABEL name="Apache Kafka" \
6770

6871
# This is needed for kubectl
6972
COPY kafka/kubernetes.repo /etc/yum.repos.d/kubernetes.repo
70-
RUN microdnf update && \
71-
microdnf install \
72-
# needed by kcat for kerberos
73-
cyrus-sasl-gssapi \
74-
# Can be removed once listener-operator integration is used
75-
kubectl && \
76-
microdnf clean all && \
77-
rpm -qa --qf "%{NAME}-%{VERSION}-%{RELEASE}\n" | sort > /stackable/package_manifest.txt && \
78-
rm -rf /var/cache/yum
79-
80-
USER stackable
81-
WORKDIR /stackable
82-
83-
COPY --chown=stackable:stackable kafka/licenses /licenses
73+
COPY --chown=${STACKABLE_USER_UID}:0 kafka/licenses /licenses
74+
COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/kafka_${SCALA}-${PRODUCT} /stackable/kafka_${SCALA}-${PRODUCT}
75+
COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/kafka_${SCALA}-${PRODUCT}.cdx.json /stackable/kafka_${SCALA}-${PRODUCT}/kafka_${SCALA}-${PRODUCT}.cdx.json
76+
COPY --chown=${STACKABLE_USER_UID}:0 --from=kafka-builder /stackable/jmx/ /stackable/jmx/
77+
COPY --chown=${STACKABLE_USER_UID}:0 --from=kcat /stackable/kcat-${KCAT}/kcat /stackable/bin/kcat-${KCAT}
78+
COPY --chown=${STACKABLE_USER_UID}:0 --from=kcat /licenses /licenses
8479

85-
# We copy opa-authorizer.jar and jmx-exporter through the builder image to have an absolutely minimal final image
86-
# (e.g. we don't even need curl in it).
87-
COPY --chown=stackable:stackable --from=kafka-builder /stackable/kafka_${SCALA}-${PRODUCT} /stackable/kafka_${SCALA}-${PRODUCT}
88-
COPY --chown=stackable:stackable --from=kafka-builder /stackable/jmx/ /stackable/jmx/
89-
COPY --chown=stackable:stackable --from=kcat /stackable/kcat-${KCAT}/kcat /stackable/bin/kcat-${KCAT}
90-
COPY --chown=stackable:stackable --from=kcat /licenses /licenses
80+
WORKDIR /stackable
9181

92-
RUN ln -s /stackable/bin/kcat-${KCAT} /stackable/bin/kcat && \
93-
# kcat was located in /stackable/kcat - legacy
94-
ln -s /stackable/bin/kcat /stackable/kcat && \
95-
ln -s /stackable/kafka_${SCALA}-${PRODUCT} /stackable/kafka
82+
RUN <<EOF
83+
microdnf update
84+
# cyrus-sasl-gssapi: needed by kcat for kerberos
85+
# kubectl: Can be removed once listener-operator integration is used
86+
microdnf install \
87+
cyrus-sasl-gssapi \
88+
kubectl
89+
90+
microdnf clean all
91+
rpm -qa --qf "%{NAME}-%{VERSION}-%{RELEASE}\n" | sort > /stackable/package_manifest.txt
92+
rm -rf /var/cache/yum
93+
94+
ln -s /stackable/bin/kcat-${KCAT} /stackable/bin/kcat
95+
# kcat was located in /stackable/kcat - legacy
96+
ln -s /stackable/bin/kcat /stackable/kcat
97+
ln -s /stackable/kafka_${SCALA}-${PRODUCT} /stackable/kafka
98+
99+
# All files and folders owned by root group to support running as arbitrary users.
100+
# This is best practice as all container users will belong to the root group (0).
101+
chown -R ${STACKABLE_USER_UID}:0 /stackable
102+
chmod -R g=u /stackable
103+
EOF
104+
105+
USER ${STACKABLE_USER_UID}
96106

97107
ENV PATH="${PATH}:/stackable/bin:/stackable/kafka/bin"
98108

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
diff --git a/build.gradle b/build.gradle
2+
index 32e6e8f..13a0def 100644
3+
--- a/build.gradle
4+
+++ b/build.gradle
5+
@@ -48,6 +48,47 @@ plugins {
6+
// artifacts - see https://github.com/johnrengelman/shadow/issues/901
7+
id 'com.github.johnrengelman.shadow' version '8.1.0' apply false
8+
id 'com.diffplug.spotless' version '6.14.0' apply false // 6.14.1 and newer require Java 11 at compile time, so we can't upgrade until AK 4.0
9+
+ id 'org.cyclonedx.bom' version '1.10.0'
10+
+}
11+
+
12+
+cyclonedxBom {
13+
+ // Specified the type of project being built. Defaults to 'library'
14+
+ projectType = "application"
15+
+ // Specified the version of the CycloneDX specification to use. Defaults to '1.5'
16+
+ schemaVersion = "1.5"
17+
+ // Boms destination directory. Defaults to 'build/reports'
18+
+ destination = file("build/reports")
19+
+ // The file name for the generated BOMs (before the file format suffix). Defaults to 'bom'
20+
+ outputName = "bom"
21+
+ // The file format generated, can be xml, json or all for generating both. Defaults to 'all'
22+
+ outputFormat = "json"
23+
+ includeConfigs = ["runtimeClasspath"]
24+
+ // Exclude test components. This list needs to be checked and, if it changed, updated for every new Kafka version.
25+
+ // The list can be obtained by running `gradle projects | grep upgrade-system-tests`
26+
+ skipProjects = [
27+
+ 'upgrade-system-tests-0100',
28+
+ 'upgrade-system-tests-0101',
29+
+ 'upgrade-system-tests-0102',
30+
+ 'upgrade-system-tests-0110',
31+
+ 'upgrade-system-tests-10',
32+
+ 'upgrade-system-tests-11',
33+
+ 'upgrade-system-tests-20',
34+
+ 'upgrade-system-tests-21',
35+
+ 'upgrade-system-tests-22',
36+
+ 'upgrade-system-tests-23',
37+
+ 'upgrade-system-tests-24',
38+
+ 'upgrade-system-tests-25',
39+
+ 'upgrade-system-tests-26',
40+
+ 'upgrade-system-tests-27',
41+
+ 'upgrade-system-tests-28',
42+
+ 'upgrade-system-tests-30',
43+
+ 'upgrade-system-tests-31',
44+
+ 'upgrade-system-tests-32',
45+
+ 'upgrade-system-tests-33',
46+
+ 'upgrade-system-tests-34',
47+
+ 'upgrade-system-tests-35',
48+
+ 'upgrade-system-tests-36'
49+
+ ]
50+
}
51+
52+
ext {

0 commit comments

Comments
 (0)