Skip to content

Commit 936ee5f

Browse files
committed
[circle-mlir/infra] Revise u2204/Dockerfile
This will revise u2204/Dockerfile with - download onnx-mlir patch from github - update ONNX-MLIR and llvm-project hash - update related changes - remove not used anymore clang-format ONE-DCO-1.0-Signed-off-by: SaeHie Park <saehie.park@gmail.com>
1 parent 5b965ae commit 936ee5f

File tree

1 file changed

+13
-56
lines changed

1 file changed

+13
-56
lines changed

circle-mlir/infra/docker/u2204/Dockerfile

Lines changed: 13 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,6 @@ RUN apt-get update \
1111
&& apt-get -qqy install build-essential cmake git fakeroot autoconf automake libtool unzip wget \
1212
devscripts debmake debhelper lcov
1313

14-
# Install clang-format
15-
RUN apt-get update \
16-
&& apt-get install -qqy gnupg2 \
17-
&& wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - \
18-
&& add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main" \
19-
&& apt-get update \
20-
&& apt-get install -qqy clang-format-16
21-
2214
# additonal tools
2315
RUN apt-get update \
2416
&& apt-get -qqy install tree tmux
@@ -27,16 +19,14 @@ RUN apt-get update \
2719
RUN apt-get update \
2820
&& apt-get -qqy install python3 python3-pip python3-venv python3-dev python3-all dh-python
2921
RUN python3 -m pip install --upgrade pip setuptools
30-
RUN python3 -m pip install yapf==0.43.0 numpy==1.24.4 h5py==3.8.0 einops
22+
RUN python3 -m pip install yapf==0.43.0 numpy==2.2.4 h5py==3.8.0 einops
3123

3224
# TODO upgrade
33-
ARG VER_TORCH=2.1.0
34-
ARG VER_ONNX=1.16.0
35-
ARG VER_ONNXRUNTIME=1.18.0
25+
ARG VER_TORCH=2.6.0
26+
ARG VER_ONNX=1.17.0
27+
ARG VER_ONNXRUNTIME=1.21.0
3628

37-
RUN python3 -m pip install torch==${VER_TORCH}
38-
RUN python3 -m pip install onnx==${VER_ONNX}
39-
RUN python3 -m pip install onnxruntime==${VER_ONNXRUNTIME}
29+
RUN python3 -m pip install torch==${VER_TORCH} onnx==${VER_ONNX} onnxruntime==${VER_ONNXRUNTIME}
4030

4131
# Clean archives (to reduce image size)
4232
RUN apt-get clean -y
@@ -48,47 +38,15 @@ ARG NPROC=4
4838
ARG WORK_DIR=/workdir
4939
ARG FLATBUFFERS_HASH=a078130c
5040
ARG ABSEIL_CPP_HASH=21510581
51-
ARG LLVM_PROJECT_HASH=91088978d712cd7b33610c59f69d87d5a39e3113
52-
ARG ONNX_MLIR_HASH=40615b362fe474d6cce4fd9c31a7ec51acd8f88a
41+
ARG LLVM_PROJECT_HASH=b270525f730be6e7196667925f5a9bfa153262e9
42+
ARG ONNX_MLIR_HASH=660bd8efa664bd2d05801a7a793fe29925313b4b
5343

5444
WORKDIR ${WORK_DIR}
5545

56-
# Generate experimental patch file for onnx-mlir
57-
58-
RUN set -x \
59-
&& { \
60-
echo 'diff --git a/src/Dialect/ONNX/ONNXOps.td.inc b/src/Dialect/ONNX/ONNXOps.td.inc'; \
61-
echo 'index d75ee6b6..f63a2910 100644'; \
62-
echo '--- a/src/Dialect/ONNX/ONNXOps.td.inc'; \
63-
echo '+++ b/src/Dialect/ONNX/ONNXOps.td.inc'; \
64-
echo '@@ -1779,9 +1779,9 @@ def ONNXDequantizeLinearOp:ONNX_Op<"DequantizeLinear",'; \
65-
echo ' `zero-point` is usually not used in the case of float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz quantization,'; \
66-
echo " but the dequantization formula remains the same for consistency and 'x_scale' still determines the output type."; \
67-
echo ' }];'; \
68-
echo '- let arguments = (ins AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[I32]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>]>:$x,'; \
69-
echo '+ let arguments = (ins AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>]>:$x,'; \
70-
echo ' AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F16]>, TensorOf<[BF16]>]>:$x_scale,'; \
71-
echo '- AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[I32]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>, NoneType]>:$x_zero_point,'; \
72-
echo '+ AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>, NoneType]>:$x_zero_point,'; \
73-
echo ' DefaultValuedAttr<SI64Attr, "1">:$axis);'; \
74-
echo ' let results = (outs AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F16]>, TensorOf<[BF16]>]>:$y);'; \
75-
echo ' let extraClassDeclaration = [{'; \
76-
echo '@@ -5721,10 +5721,10 @@ def ONNXQuantizeLinearOp:ONNX_Op<"QuantizeLinear",'; \
77-
echo ' }];'; \
78-
echo ' let arguments = (ins AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F16]>, TensorOf<[BF16]>, TensorOf<[I32]>]>:$x,'; \
79-
echo ' AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F16]>, TensorOf<[BF16]>, TensorOf<[I32]>]>:$y_scale,'; \
80-
echo '- AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>, NoneType]>:$y_zero_point,'; \
81-
echo '+ AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[I16]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>, NoneType]>:$y_zero_point,'; \
82-
echo ' DefaultValuedAttr<SI64Attr, "1">:$axis,'; \
83-
echo ' DefaultValuedAttr<SI64Attr, "1">:$saturate);'; \
84-
echo '- let results = (outs AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>]>:$y);'; \
85-
echo '+ let results = (outs AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[I16]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>]>:$y);'; \
86-
echo ' let extraClassDeclaration = [{'; \
87-
echo ' static int getNumberOfOperands() {'; \
88-
echo ' return 3;'; \
89-
echo ''; \
90-
} > ${WORK_DIR}/onnx_quantdequant_i16.patch \
91-
&& cat ${WORK_DIR}/onnx_quantdequant_i16.patch
46+
# Download experimental patch file for onnx-mlir
47+
48+
RUN wget https://raw.githubusercontent.com/Samsung/ONE/refs/heads/master/circle-mlir/externals/onnx_mlir_0_5_0_0.diff \
49+
-O ${WORK_DIR}/onnx_mlir_0_5_0_0.diff
9250

9351
# FLATBUFFERS build from source
9452
RUN git clone --recursive https://github.com/google/flatbuffers.git \
@@ -165,9 +123,9 @@ RUN mkdir llvm-project-build && cd llvm-project-build \
165123
RUN cd llvm-project-build && cmake --build . && cmake --install .
166124

167125
# ONNX-MLIR build from source
168-
# Patch to accept INT16 for QuantizeLinear/DequantizeLinear
126+
# Patch to accept INT16 for QuantizeLinear/DequantizeLinear and fix ONNX passes
169127
RUN cd onnx-mlir \
170-
&& patch -p1 < ${WORK_DIR}/onnx_quantdequant_i16.patch
128+
&& patch -p1 < ${WORK_DIR}/onnx_mlir_0_5_0_0.diff
171129

172130
RUN mkdir onnx-mlir-build && cd onnx-mlir-build \
173131
&& cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=${WORK_DIR} \
@@ -176,7 +134,6 @@ RUN mkdir onnx-mlir-build && cd onnx-mlir-build \
176134
-DCMAKE_JOB_POOL_COMPILE:STRING=compile -DCMAKE_JOB_POOL_LINK:STRING=link \
177135
-DCMAKE_JOB_POOLS:STRING='compile=4;link=1' \
178136
-DONNX_MLIR_BUILD_TESTS=OFF -DONNX_MLIR_ENABLE_MHLO=OFF -DLLVM_ENABLE_ASSERTIONS=ON \
179-
-DONNX_MLIR_DECOMP_ONNX_CONVTRANSPOSE=OFF \
180137
../onnx-mlir
181138

182139
RUN cd onnx-mlir-build && cmake --build . && cmake --install .

0 commit comments

Comments
 (0)