Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions deploy/paddleocr_vl_docker/accelerators/amd-gpu/.env
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
API_IMAGE_TAG_SUFFIX=latest-amd-gpu-offline
VLM_BACKEND=vllm
VLM_IMAGE_TAG_SUFFIX=latest-amd-gpu-offline
47 changes: 47 additions & 0 deletions deploy/paddleocr_vl_docker/accelerators/amd-gpu/compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
services:
paddleocr-vl-api:
image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleocr-vl:${API_IMAGE_TAG_SUFFIX}
container_name: paddleocr-vl-api
ports:
- 8080:8080
depends_on:
paddleocr-vlm-server:
condition: service_healthy
devices:
- /dev/kfd
- /dev/dri
group_add:
- video
cap_add:
- SYS_PTRACE
security_opt:
- seccomp=unconfined
shm_size: 64g
# TODO: Allow using a regular user
user: root
restart: unless-stopped
environment:
- VLM_BACKEND=${VLM_BACKEND:-vllm}
command: /bin/bash -c "paddlex --serve --pipeline /home/paddleocr/pipeline_config_${VLM_BACKEND}.yaml"
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]

paddleocr-vlm-server:
image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleocr-genai-${VLM_BACKEND}-server:${VLM_IMAGE_TAG_SUFFIX}
container_name: paddleocr-vlm-server
devices:
- /dev/kfd
- /dev/dri
group_add:
- video
cap_add:
- SYS_PTRACE
security_opt:
- seccomp=unconfined
shm_size: 64g
# TODO: Allow using a regular user
user: root
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
start_period: 300s
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
FROM vivienfanghua/vllm_paddle:base

ENV DEBIAN_FRONTEND=noninteractive

ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1

ENV PIP_BREAK_SYSTEM_PACKAGES=1
RUN ln -s /usr/bin/python3 /usr/bin/python

RUN apt-get update \
&& apt-get install -y --no-install-recommends miopen-hip-dev rocrand-dev rocblas-dev \
&& apt-get install -y --no-install-recommends \
fontconfig \
fonts-dejavu-core \
fonts-liberation \
fonts-noto-cjk \
fonts-wqy-microhei \
fonts-freefont-ttf \
&& fc-cache -fv \
&& rm -rf /var/lib/apt/lists/*

ARG PADDLEOCR_VERSION=">=3.4.0,<3.5"
ARG PADDLEX_VERSION=">=3.4.0,<3.5"
RUN --mount=type=cache,target=/root/.cache/pip \
python -m pip install "paddleocr[doc-parser]${PADDLEOCR_VERSION}" "paddlex[serving]${PADDLEX_VERSION}"

RUN groupadd -g 1001 paddleocr \
&& useradd -m -s /bin/bash -u 1001 -g 1001 paddleocr
ENV HOME=/home/paddleocr
WORKDIR /home/paddleocr

USER paddleocr

ARG BUILD_FOR_OFFLINE=false
RUN if [ "${BUILD_FOR_OFFLINE}" = 'true' ]; then \
mkdir -p "${HOME}/.paddlex/official_models" \
&& cd "${HOME}/.paddlex/official_models" \
&& wget https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/UVDoc_infer.tar \
https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/PP-LCNet_x1_0_doc_ori_infer.tar \
https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/PP-DocLayoutV3_infer.tar \
https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/PaddleOCR-VL-1.5_infer.tar \
&& tar -xf UVDoc_infer.tar \
&& mv UVDoc_infer UVDoc \
&& tar -xf PP-LCNet_x1_0_doc_ori_infer.tar \
&& mv PP-LCNet_x1_0_doc_ori_infer PP-LCNet_x1_0_doc_ori \
&& tar -xf PP-DocLayoutV3_infer.tar \
&& mv PP-DocLayoutV3_infer PP-DocLayoutV3 \
&& tar -xf PaddleOCR-VL-1.5_infer.tar \
&& mv PaddleOCR-VL-1.5_infer PaddleOCR-VL-1.5 \
&& rm -f UVDoc_infer.tar PP-LCNet_x1_0_doc_ori_infer.tar PP-DocLayoutV3_infer.tar PaddleOCR-VL-1.5_infer.tar \
&& mkdir -p "${HOME}/.paddlex/fonts" \
&& wget -P "${HOME}/.paddlex/fonts" https://paddle-model-ecology.bj.bcebos.com/paddlex/PaddleX3.0/fonts/PingFang-SC-Regular.ttf; \
fi

COPY --chown=paddleocr:paddleocr pipeline_config_vllm.yaml /home/paddleocr
COPY --chown=paddleocr:paddleocr pipeline_config_fastdeploy.yaml /home/paddleocr

EXPOSE 8080

CMD ["paddlex", "--serve", "--pipeline", "/home/paddleocr/pipeline_config_vllm.yaml"]
33 changes: 33 additions & 0 deletions deploy/paddleocr_vl_docker/accelerators/amd-gpu/vlm.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
ARG BACKEND=vllm


FROM rocm/vllm-dev:nightly_main_20260125 AS base-vllm


FROM base-${BACKEND}

ARG PADDLEOCR_VERSION=">=3.4.0,<3.5"
ARG PADDLEX_VERSION=">=3.4.0,<3.5"
RUN --mount=type=cache,target=/root/.cache/pip \
python -m pip install "paddleocr${PADDLEOCR_VERSION}" "paddlex${PADDLEX_VERSION}"

RUN groupadd -g 1001 paddleocr \
&& useradd -m -s /bin/bash -u 1001 -g 1001 paddleocr
ENV HOME=/home/paddleocr
WORKDIR /home/paddleocr

USER paddleocr

ARG BUILD_FOR_OFFLINE=false
RUN if [ "${BUILD_FOR_OFFLINE}" = 'true' ]; then \
mkdir -p "${HOME}/.paddlex/official_models" \
&& cd "${HOME}/.paddlex/official_models" \
&& wget https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/PaddleOCR-VL-1.5_infer.tar \
&& tar -xf PaddleOCR-VL-1.5_infer.tar \
&& mv PaddleOCR-VL-1.5_infer PaddleOCR-VL-1.5 \
&& rm -f PaddleOCR-VL-1.5_infer.tar; \
fi

ARG BACKEND
ENV BACKEND=${BACKEND}
CMD ["/bin/bash", "-c", "paddleocr genai_server --model_name PaddleOCR-VL-1.5-0.9B --host 0.0.0.0 --port 8080 --backend ${BACKEND}"]
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ services:
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
- /usr/local/dcmi:/usr/local/dcmi
privileged: true
shm_size: 64G
shm_size: 64g

paddleocr-vlm-server:
image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleocr-genai-${VLM_BACKEND}-server:${VLM_IMAGE_TAG_SUFFIX}
Expand All @@ -34,4 +34,4 @@ services:
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
- /usr/local/dcmi:/usr/local/dcmi
privileged: true
shm_size: 64G
shm_size: 64g
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ services:
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
volumes:
- /opt/hyhal/:/opt/hyhal/:ro
shm_size: 64G
shm_size: 64g

paddleocr-vlm-server:
image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleocr-genai-${VLM_BACKEND}-server:${VLM_IMAGE_TAG_SUFFIX}
Expand All @@ -50,4 +50,4 @@ services:
start_period: 300s
volumes:
- /opt/hyhal/:/opt/hyhal/:ro
shm_size: 64G
shm_size: 64g
3 changes: 3 additions & 0 deletions deploy/paddleocr_vl_docker/accelerators/intel-gpu/.env
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
API_IMAGE_TAG_SUFFIX=latest-intel-gpu-offline
VLM_BACKEND=vllm
VLM_IMAGE_TAG_SUFFIX=latest-intel-gpu-offline
33 changes: 33 additions & 0 deletions deploy/paddleocr_vl_docker/accelerators/intel-gpu/compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
services:
paddleocr-vl-api:
image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleocr-vl:${API_IMAGE_TAG_SUFFIX}
container_name: paddleocr-vl-api
ports:
- 8080:8080
depends_on:
paddleocr-vlm-server:
condition: service_healthy
devices:
- /dev:/dev
shm_size: 64g
# TODO: Allow using a regular user
user: root
restart: unless-stopped
environment:
- VLM_BACKEND=${VLM_BACKEND:-vllm}
command: /bin/bash -c "paddlex --serve --pipeline /home/paddleocr/pipeline_config_${VLM_BACKEND}.yaml"
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]

paddleocr-vlm-server:
image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleocr-genai-${VLM_BACKEND}-server:${VLM_IMAGE_TAG_SUFFIX}
container_name: paddleocr-vlm-server
devices:
- /dev:/dev
shm_size: 64g
# TODO: Allow using a regular user
user: root
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
start_period: 300s
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
FROM intel/llm-scaler-vllm:0.11.1-b7

ENV DEBIAN_FRONTEND=noninteractive

ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1

RUN apt-get update \
&& apt-get install -y --no-install-recommends \
fontconfig \
fonts-dejavu-core \
fonts-liberation \
fonts-noto-cjk \
fonts-wqy-microhei \
fonts-freefont-ttf \
&& fc-cache -fv \
&& rm -rf /var/lib/apt/lists/*

RUN --mount=type=cache,target=/root/.cache/pip \
python -m pip install paddlepaddle==3.2.1 -i https://www.paddlepaddle.org.cn/packages/stable/cpu/

ARG PADDLEOCR_VERSION=">=3.4.0,<3.5"
ARG PADDLEX_VERSION=">=3.4.0,<3.5"
RUN --mount=type=cache,target=/root/.cache/pip \
python -m pip install "paddleocr[doc-parser]${PADDLEOCR_VERSION}" "paddlex[serving]${PADDLEX_VERSION}"

RUN groupadd -g 1001 paddleocr \
&& useradd -m -s /bin/bash -u 1001 -g 1001 paddleocr
ENV HOME=/home/paddleocr
WORKDIR /home/paddleocr

USER paddleocr

ARG BUILD_FOR_OFFLINE=false
RUN if [ "${BUILD_FOR_OFFLINE}" = 'true' ]; then \
mkdir -p "${HOME}/.paddlex/official_models" \
&& cd "${HOME}/.paddlex/official_models" \
&& wget https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/UVDoc_infer.tar \
https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/PP-LCNet_x1_0_doc_ori_infer.tar \
https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/PP-DocLayoutV3_infer.tar \
https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/PaddleOCR-VL-1.5_infer.tar \
&& tar -xf UVDoc_infer.tar \
&& mv UVDoc_infer UVDoc \
&& tar -xf PP-LCNet_x1_0_doc_ori_infer.tar \
&& mv PP-LCNet_x1_0_doc_ori_infer PP-LCNet_x1_0_doc_ori \
&& tar -xf PP-DocLayoutV3_infer.tar \
&& mv PP-DocLayoutV3_infer PP-DocLayoutV3 \
&& tar -xf PaddleOCR-VL-1.5_infer.tar \
&& mv PaddleOCR-VL-1.5_infer PaddleOCR-VL-1.5 \
&& rm -f UVDoc_infer.tar PP-LCNet_x1_0_doc_ori_infer.tar PP-DocLayoutV3_infer.tar PaddleOCR-VL-1.5_infer.tar \
&& mkdir -p "${HOME}/.paddlex/fonts" \
&& wget -P "${HOME}/.paddlex/fonts" https://paddle-model-ecology.bj.bcebos.com/paddlex/PaddleX3.0/fonts/PingFang-SC-Regular.ttf; \
fi

COPY --chown=paddleocr:paddleocr pipeline_config_vllm.yaml /home/paddleocr
COPY --chown=paddleocr:paddleocr pipeline_config_fastdeploy.yaml /home/paddleocr

EXPOSE 8080

CMD ["paddlex", "--serve", "--pipeline", "/home/paddleocr/pipeline_config_vllm.yaml"]
33 changes: 33 additions & 0 deletions deploy/paddleocr_vl_docker/accelerators/intel-gpu/vlm.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
ARG BACKEND=vllm


FROM intel/llm-scaler-vllm:0.11.1-b7 AS base-vllm


FROM base-${BACKEND}

ARG PADDLEOCR_VERSION=">=3.4.0,<3.5"
ARG PADDLEX_VERSION=">=3.4.0,<3.5"
RUN --mount=type=cache,target=/root/.cache/pip \
python -m pip install "paddleocr${PADDLEOCR_VERSION}" "paddlex${PADDLEX_VERSION}"

RUN groupadd -g 1001 paddleocr \
&& useradd -m -s /bin/bash -u 1001 -g 1001 paddleocr
ENV HOME=/home/paddleocr
WORKDIR /home/paddleocr

USER paddleocr

ARG BUILD_FOR_OFFLINE=false
RUN if [ "${BUILD_FOR_OFFLINE}" = 'true' ]; then \
mkdir -p "${HOME}/.paddlex/official_models" \
&& cd "${HOME}/.paddlex/official_models" \
&& wget https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0.0/PaddleOCR-VL-1.5_infer.tar \
&& tar -xf PaddleOCR-VL-1.5_infer.tar \
&& mv PaddleOCR-VL-1.5_infer PaddleOCR-VL-1.5 \
&& rm -f PaddleOCR-VL-1.5_infer.tar; \
fi

ARG BACKEND
ENV BACKEND=${BACKEND}
CMD ["/bin/bash", "-c", "paddleocr genai_server --model_name PaddleOCR-VL-1.5-0.9B --host 0.0.0.0 --port 8080 --backend ${BACKEND}"]
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ services:
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
privileged: true
shm_size: 64G
shm_size: 64g

paddleocr-vlm-server:
image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleocr-genai-${VLM_BACKEND}-server:${VLM_IMAGE_TAG_SUFFIX}
Expand All @@ -26,4 +26,4 @@ services:
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
start_period: 300s
privileged: true
shm_size: 64G
shm_size: 64g
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ services:
- driver: nvidia
device_ids: ["0"]
capabilities: [gpu]
shm_size: 64g
# TODO: Allow using a regular user
user: root
restart: unless-stopped
Expand All @@ -33,6 +34,7 @@ services:
- driver: nvidia
device_ids: ["0"]
capabilities: [gpu]
shm_size: 64g
# TODO: Allow using a regular user
user: root
restart: unless-stopped
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ services:
- driver: nvidia
device_ids: ["0"]
capabilities: [gpu]
shm_size: 64g
# TODO: Allow using a regular user
user: root
restart: unless-stopped
Expand All @@ -33,6 +34,7 @@ services:
- driver: nvidia
device_ids: ["0"]
capabilities: [gpu]
shm_size: 64g
# TODO: Allow using a regular user
user: root
restart: unless-stopped
Expand Down
13 changes: 6 additions & 7 deletions deploy/paddleocr_vl_docker/build_pipeline.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ show_usage() {
Usage: $(basename "$0") [OPTIONS]

Options:
--device-type <type> Device type (nvidia-gpu|nvidia-gpu-sm120|hygon-dcu|kunlunxin-xpu|metax-gpu|iluvatar-gpu|huawei-npu) [default: ${device_type}]
--device-type <type> Device type (nvidia-gpu|nvidia-gpu-sm120|hygon-dcu|kunlunxin-xpu|metax-gpu|iluvatar-gpu|huawei-npu|intel-gpu|amd-gpu) [default: ${device_type}]
--offline Build offline version
--ppocr-version <ver> PaddleOCR version [default: ${paddleocr_version}]
--pdx-version <ver> PaddleX version [default: ${paddlex_version}]
Expand Down Expand Up @@ -48,7 +48,7 @@ while [[ $# -gt 0 ]]; do
shift
shift
case "${device_type}" in
nvidia-gpu|nvidia-gpu-sm120|hygon-dcu|kunlunxin-xpu|metax-gpu|iluvatar-gpu|huawei-npu)
nvidia-gpu|nvidia-gpu-sm120|hygon-dcu|kunlunxin-xpu|metax-gpu|iluvatar-gpu|huawei-npu|intel-gpu|amd-gpu)
;;
*)
echo "Error: Unknown device type: ${device_type}" >&2
Expand Down Expand Up @@ -154,8 +154,8 @@ if [ ! -f "${dockerfile}" ]; then
exit 1
fi

dockerfile_hash="$(sha256sum "${dockerfile}" | cut -c1-12)"
image_version="${paddleocr_version}-${paddlex_version}-${dockerfile_hash}"
revision="$(git rev-parse --short HEAD)"
image_version="${revision}-ppocr${paddleocr_version}-pdx${paddlex_version}"

# Image name
base_image_name='paddleocr-vl'
Expand All @@ -178,9 +178,8 @@ build_args=(
'--build-arg' "http_proxy=${http_proxy:-}"
'--build-arg' "https_proxy=${https_proxy:-}"
'--build-arg' "no_proxy=${no_proxy:-}"
'--label' "org.opencontainers.image.version.paddleocr=${paddleocr_version}"
'--label' "org.opencontainers.image.version.paddlex=${paddlex_version}"
'--label' "org.opencontainers.image.version.dockerfile.sha=${dockerfile_hash}"
'--label' "org.opencontainers.image.version=${image_version}"
'--label' "org.opencontainers.image.revision=${revision}"
'.'
)

Expand Down
Loading
Loading