Skip to content
This repository was archived by the owner on Oct 19, 2025. It is now read-only.

Commit 2850b0c

Browse files
committed
Fix Intel apt packaging, and update compute runtime and associated packages to correct version. Also fix startup.sh not having the correct install command and fix documentation.
1 parent e2bb448 commit 2850b0c

File tree

4 files changed

+58
-52
lines changed

4 files changed

+58
-52
lines changed

Dockerfile

Lines changed: 46 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ RUN apt-get update && \
3939
RUN wget --progress=dot:giga -qO - https://repositories.intel.com/graphics/intel-graphics.key | \
4040
gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg
4141
# hadolint ignore=DL4006
42-
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" | \
42+
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy unified" | \
4343
tee /etc/apt/sources.list.d/intel.gpu.jammy.list
4444

4545
ARG UBUNTU_VERSION=22.04
@@ -53,7 +53,7 @@ COPY --from=oneapi-lib-installer /opt/intel/oneapi/compiler/${CMPLR_COMMON_VER}/
5353
COPY --from=oneapi-lib-installer /usr/share/keyrings/intel-graphics.gpg /usr/share/keyrings/intel-graphics.gpg
5454
COPY --from=oneapi-lib-installer /etc/apt/sources.list.d/intel.gpu.jammy.list /etc/apt/sources.list.d/intel.gpu.jammy.list
5555

56-
# Set apt install to not be interactive for things like tzdata
56+
# Set apt install to not be interactive for some packages that require it.
5757
ENV DEBIAN_FRONTEND=noninteractive
5858

5959
# Set oneAPI library environment variable
@@ -69,7 +69,44 @@ RUN apt-get update && \
6969
gnupg2 \
7070
gpg-agent \
7171
software-properties-common \
72-
wget
72+
wget && \
73+
apt-get clean && \
74+
rm -rf /var/lib/apt/lists/*
75+
76+
# Sets versions of Level-Zero, OpenCL and memory allocator chosen.
77+
ARG ICD_VER=23.17.26241.33-647~22.04
78+
ARG LEVEL_ZERO_GPU_VER=1.3.26241.33-647~22.04
79+
ARG LEVEL_ZERO_VER=1.11.0-647~22.04
80+
ARG LEVEL_ZERO_DEV_VER=1.11.0-647~22.04
81+
ARG ALLOCATOR=tcmalloc
82+
ENV ALLOCATOR=${ALLOCATOR}
83+
ARG ALLOCATOR_PACKAGE=libgoogle-perftools-dev
84+
ARG ALLOCATOR_LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc.so
85+
RUN if [ "${ALLOCATOR}" = "jemalloc" ] ; then \
86+
ALLOCATOR_PACKAGE=libjemalloc-dev; \
87+
ALLOCATOR_LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so; \
88+
fi
89+
90+
# Install Level-Zero and OpenCL backends.
91+
RUN apt-get update && \
92+
apt-get install -y --no-install-recommends --fix-missing \
93+
intel-opencl-icd=${ICD_VER} \
94+
intel-level-zero-gpu=${LEVEL_ZERO_GPU_VER} && \
95+
apt-get clean && \
96+
rm -rf /var/lib/apt/lists/*
97+
98+
# Getting the latest versions of Intel's Compute Runtime and associated packages on Github and installing it will update everything we installed before.
99+
RUN mkdir neo
100+
WORKDIR /neo
101+
RUN wget --progress=dot:giga https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.16695.4/intel-igc-core_1.0.16695.4_amd64.deb && \
102+
wget --progress=dot:giga https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.16695.4/intel-igc-opencl_1.0.16695.4_amd64.deb && \
103+
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/24.17.29377.6/intel-level-zero-gpu_1.3.29377.6_amd64.deb && \
104+
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/24.17.29377.6/intel-opencl-icd_24.17.29377.6_amd64.deb && \
105+
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/24.17.29377.6/libigdgmm12_22.3.19_amd64.deb && \
106+
wget --progress=dot:giga https://github.com/oneapi-src/level-zero/releases/download/v1.16.14/level-zero_1.16.14+u20.04_amd64.deb && \
107+
wget --progress=dot:giga https://github.com/oneapi-src/level-zero/releases/download/v1.16.14/level-zero-devel_1.16.14+u20.04_amd64.deb && \
108+
dpkg -i -- *.deb
109+
WORKDIR /
73110

74111
# Install Python and other associated packages from PPA since default is 3.10
75112
ARG PYTHON=python3.11
@@ -80,7 +117,9 @@ RUN add-apt-repository ppa:deadsnakes/ppa && \
80117
${PYTHON} \
81118
lib${PYTHON} \
82119
python3-pip \
83-
${PYTHON}-venv
120+
${PYTHON}-venv && \
121+
apt-get clean && \
122+
rm -rf /var/lib/apt/lists/*
84123

85124
# Update pip
86125
# hadolint ignore=DL3013
@@ -94,39 +133,6 @@ RUN ln -sf "$(which ${PYTHON})" /usr/local/bin/python && \
94133
ln -sf "$(which ${PYTHON})" /usr/bin/python && \
95134
ln -sf "$(which ${PYTHON})" /usr/bin/python3
96135

97-
# Sets versions of Level-Zero, OpenCL and memory allocator chosen.
98-
ARG ICD_VER=23.43.27642.40-803~22.04
99-
ARG LEVEL_ZERO_GPU_VER=1.3.27642.40-803~22.04
100-
ARG LEVEL_ZERO_VER=1.14.0-744~22.04
101-
ARG LEVEL_ZERO_DEV_VER=1.14.0-744~22.04
102-
ARG ALLOCATOR=tcmalloc
103-
ENV ALLOCATOR=${ALLOCATOR}
104-
ARG ALLOCATOR_PACKAGE=libgoogle-perftools-dev
105-
ARG ALLOCATOR_LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc.so
106-
RUN if [ "${ALLOCATOR}" = "jemalloc" ] ; then \
107-
ALLOCATOR_PACKAGE=libjemalloc-dev; \
108-
ALLOCATOR_LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so; \
109-
fi
110-
111-
# Install Level-Zero and OpenCL backends.
112-
RUN apt-get update && \
113-
apt-get install -y --no-install-recommends --fix-missing \
114-
intel-opencl-icd=${ICD_VER} \
115-
intel-level-zero-gpu=${LEVEL_ZERO_GPU_VER} \
116-
level-zero=${LEVEL_ZERO_VER} \
117-
level-zero-dev=${LEVEL_ZERO_DEV_VER}
118-
119-
# Update Compute Runtime to latest version
120-
RUN mkdir neo && \
121-
cd neo && \
122-
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.16510.2/intel-igc-core_1.0.16510.2_amd64.deb && \
123-
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.16510.2/intel-igc-opencl_1.0.16510.2_amd64.deb && \
124-
wget https://github.com/intel/compute-runtime/releases/download/24.13.29138.7/intel-level-zero-gpu_1.3.29138.7_amd64.deb && \
125-
wget https://github.com/intel/compute-runtime/releases/download/24.13.29138.7/intel-opencl-icd_24.13.29138.7_amd64.deb && \
126-
wget https://github.com/intel/compute-runtime/releases/download/24.13.29138.7/libigdgmm12_22.3.18_amd64.deb && \
127-
dpkg -i *.deb && \
128-
cd ..
129-
130136
# Install Comfy UI/Pytorch dependencies.
131137
# hadolint ignore=DL3008
132138
RUN apt-get update && \
@@ -135,7 +141,9 @@ RUN apt-get update && \
135141
libgl1 \
136142
libglib2.0-0 \
137143
libgomp1 \
138-
numactl
144+
numactl && \
145+
apt-get clean && \
146+
rm -rf /var/lib/apt/lists/*
139147

140148
# Make sure everything is up to date.
141149
# hadolint ignore=DL3008

LICENSE

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@
186186
same "printed page" as the copyright notice for easier
187187
identification within third-party archives.
188188

189-
Copyright [yyyy] [name of copyright owner]
189+
Copyright [2023-2024] [Simon Lui]
190190

191191
Licensed under the Apache License, Version 2.0 (the "License");
192192
you may not use this file except in compliance with the License.

README.md

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Stable Diffusion ComfyUI Docker/OCI Image for Intel Arc GPUs
22

3-
This Docker/OCI image is designed to run [ComfyUI](https://github.com/comfyanonymous/ComfyUI) inside a Docker container for Intel Arc GPUs. This work was based in large part on the work done by a Docker image made by nuullll [here](https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu) for a different Stable Diffusion UI.
3+
This Docker/OCI image is designed to run [ComfyUI](https://github.com/comfyanonymous/ComfyUI) inside a Docker/OCI container for Intel Arc GPUs. This work was based in large part on the work done by a Docker image made by nuullll [here](https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu) for a different Stable Diffusion UI and the official Docker images from the Intel® Extension for PyTorch* xpu-main branch Docker images [here](https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main/docker).
44

55
The Docker/OCI image includes
66
- Intel oneAPI DPC++ runtime libs _(Note: compiler executables are not included)_
@@ -22,23 +22,20 @@ There are reports that Intel® Xe GPUs (iGPU and dGPU) in Tiger Lake (11th gener
2222
* Docker (Desktop) or podman
2323
* Linux or Windows, with the latest drivers installed.
2424

25-
Windows should work, but it is highly not recommended to run this unless you have a specific reason to do so i.e. needing a Linux host/userspace to run custom nodes or etc. For most purposes, doing a native install will give better speeds and less headaches. You can find instructions I have written for doing that with ComfyUI via [this link](https://github.com/comfyanonymous/ComfyUI/discussions/476#discussioncomment-7152963).
26-
25+
Windows should work, but it is highly not recommended to run this unless you have a specific reason to do so i.e. needing a Linux host/userspace to run custom nodes or etc. For most purposes, doing a native install will give better speeds and less headaches. Please follow the install instructions listed in the [ComfyUI README.md](https://github.com/comfyanonymous/ComfyUI/?tab=readme-ov-file#intel-gpus)
2726
* If using Windows, you must have WSL2 set up via [this link](https://learn.microsoft.com/en-us/windows/wsl/install) in addition to Docker to be able to pass through your GPU.
2827

2928
## Build and run the image
3029

3130
Instructions will assume Docker but podman has command compatibility so it should be easy to replace docker in these commands to run also. Run the following command in a terminal to checkout the repository and build the image.
32-
```console
31+
```sh
3332
git clone https://github.com/simonlui/Docker_IPEX_ComfyUI
3433
cd Docker_IPEX_ComfyUI
3534
docker build -t ipex-arc-comfy:latest -f Dockerfile .
3635
```
37-
#### Temporary workaround with Intel Extension for Pytorch
38-
Go to the releases page and download the Python 3.11 packages versions for intel_extension_for_pytorch, intel_extension_for_pytorch_deepspeed, torch, torchaudio, and torchvision and put the package files in your ComfyUI directory. This will replace installation of these packages from Intel's repositories until a new version of IPEX releases. The startup script will pick them up and install them the first time you start the container.
3936

4037
Once the image build is complete, then run the following if using Linux in terminal or Docker Desktop.
41-
```console
38+
```sh
4239
docker run -it `
4340
--device /dev/dri `
4441
-e ComfyArgs="<ComfyUI command line arguments>" `
@@ -52,7 +49,7 @@ docker run -it `
5249
ipex-arc-comfy:latest
5350
```
5451
For Windows, run the following in terminal or Docker Desktop.
55-
```console
52+
```sh
5653
docker run -it `
5754
--device /dev/dxg `
5855
-e ComfyArgs="<ComfyUI command line arguments>" `
@@ -70,7 +67,7 @@ Below is an explanation on what the above commands mean so one will know how to
7067

7168
* docker run creates and runs a new container from an image. No modification needed here.
7269
* On Linux, `--device /dev/dri` passes in your GPU from your host computer to the container as is required to enable container access to your GPU to run ComfyUI. On Windows, `--device /dev/dxg` and `-v /usr/lib/wsl:/usr/lib/wsl` are the equivalent commands to do the same thing through WSL2.
73-
* `-e ComfyArgs="<ComfyUI command line arguments>"` specifies the ComfyUI arguments that you can pass to ComfyUI to use. You can take a look at the options you can pass [here](https://github.com/comfyanonymous/ComfyUI/blob/21a563d385ff520e1f7fdaada722212b35fb8d95/comfy/cli_args.py#L36). As of the time of this writing, you may need to specify `--highvram`. `--highvram` keeps the model in GPU memory which can stop a source of crashing.
70+
* `-e ComfyArgs="<ComfyUI command line arguments>"` specifies the ComfyUI arguments that you can pass to ComfyUI to use. You can take a look at the options you can pass [here](https://github.com/comfyanonymous/ComfyUI/blob/21a563d385ff520e1f7fdaada722212b35fb8d95/comfy/cli_args.py#L36). Things like Pytorch Cross Attention and BF16 are already turned on by default. Options that may help speed but impact accuracy and stability as a result include `--fp8_e4m3fn-text-enc`, `--fp8_e4m3fn-unet` and `--gpu-only`. Be aware that with the last option, offloading everything to VRAM may not be that great given that Intel Arc DG2 series cards and similar have a limitation of any one allocation being maximum 4GB in size due to hardware limitations as discussed in [here](https://github.com/oneapi-src/oneDNN/issues/1638) and one may need to use various VRAM reduction methods to actually work around this for higher resolution image generation.
7471
* `-it` will let you launch the container with an interactive command line. This is highly recommended, but not mandatory, since we may need to monitor ComfyUI's output for any status changes or errors which would be made available easily by including this option.
7572
* `--name comfy-server` assigns a meaningful name (e.g. comfy-server) to the newly created container. This option is useful but not mandatory to reference your container for later uses.
7673
* `--network=host` allows the container access to your host computer's network which is needed to access ComfyUI without specifying the `--listen` argument on Linux hosts only, not Windows.
@@ -89,7 +86,7 @@ docker:
8986
base_path: /
9087
...
9188
```
92-
* `ipexrun` is a launcher script to use Intel's Extension For Pytorch without code changes with optimizations enabled. There may be issues running ComfyUI through the launcher with some of the arguments you can use so it is not enabled by default. To use the XPU path that uses your GPU, add in `-e UseIPEXRUN=true` to the argument string above. Additionally, if one wants to run it in CPU mode, you should additionally add in `-e UseXPU=false` to that list. You should also then set the environment variable for passing arguments to `ipexrun` adding `-e IPEXRUNArgs=<Your arguments here>`. A reference to all the `ipexrun` arguments can be found [here](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/performance_tuning/launch_script.html)
89+
* `ipexrun` is a launcher script to use Intel's Extension For Pytorch without code changes with optimizations enabled. GPU is still not supported and running ComfyUI through the launcher with some of the arguments you can use will be unsupported by Intel themselves so it is not enabled by default. To use the XPU path that uses your GPU, add in `-e UseIPEXRUN=true` to the argument string above. If CPU mode is to be used, you should additionally add in `-e UseXPU=false` to that list. You should also then set the environment variable for passing arguments to `ipexrun` adding `-e IPEXRUNArgs=<Your arguments here>`. A reference to all the `ipexrun` arguments can be found [here](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/performance_tuning/launch_script.html)
9390
* You can change between `tcmalloc` (default) and `jemalloc` if using CPU `ipexrun`, add in `--build-arg="ALLOCATOR=jemalloc"` when building the image in the first step to switch between the two allocators for `ipexrun`.
9491

95-
Please refer to the [Dockerfile](./Dockerfile) for all available build arguments and environment variables not mentioned here and documented.
92+
Please refer to the [Dockerfile](./Dockerfile) for all available build arguments and environment variables not mentioned here and documented.

startup.sh

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,9 @@ fi
2727
if [ "$FirstLaunch" = "true" ]
2828
then
2929
echo "Installing ComfyUI Python dependencies."
30-
#python -m pip install torch==2.1.0.post0 torchvision==0.16.0.post0 torchaudio==2.1.0.post0 intel-extension-for-pytorch==2.1.20+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
31-
pip install *.whl
30+
python -m pip install torch==2.1.0.post2 torchvision==0.16.0.post2 torchaudio==2.1.0.post2 intel-extension-for-pytorch==2.1.30+xpu oneccl_bind_pt==2.1.300+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
31+
# Comment out the above command and uncomment the following one instead if you are a user from the PRC.
32+
# python -m pip install torch==2.1.0.post2 torchvision==0.16.0.post2 torchaudio==2.1.0.post2 intel-extension-for-pytorch==2.1.30+xpu oneccl_bind_pt==2.1.300+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
3233
pip install -r requirements.txt
3334
fi
3435

0 commit comments

Comments
 (0)