Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
5d19fca
feat(storage): 添加阿里云OSS存储支持
zyfjrx Jan 29, 2026
f0ccfd4
Use cache mount for genai docker (#4954)
Bobholamovic Jan 29, 2026
f34bfc2
Fix HPS order bug (#4955)
Bobholamovic Jan 29, 2026
f53eaf7
Fix HPS and remove scipy from required deps (#4957)
Bobholamovic Jan 29, 2026
839241e
Fix transformers version (#4956)
Bobholamovic Jan 29, 2026
a102e8d
bugfix: unexpected change of the constant IMAGE_LABELS (#4960)
changdazhou Jan 30, 2026
bb4b1c1
[METAX] add ppdoclayoutv3 to METAX_GPU_WHITELIST (#4962)
changdazhou Jan 30, 2026
f7f83b7
vllm 0.10.2 needs transformers 4.x (#4963)
zhang-prog Jan 30, 2026
56ca189
Support setting PDF rendering scale factor (#4967)
Bobholamovic Feb 2, 2026
2e70318
fix: check if cropped image size is zero in table recognition v2 (#4937)
lyn-zzz Feb 3, 2026
06223e3
Fix/doc vlm async cancellation (#4969)
scyyh11 Feb 4, 2026
a20fddc
Fix: Update langchain import to use langchain_core.documents (#4944)
Ihebdhouibi Feb 4, 2026
edeb50e
Fix typo (#4982)
Bobholamovic Feb 6, 2026
fb23d61
Update Docker image for CI workflow (#4975)
plusNew001 Feb 9, 2026
ac930a9
add llama.cpp support (#4983)
zhang-prog Feb 9, 2026
e2b463e
fixing langchain text splitter import (#4981)
norbbrog Feb 10, 2026
b0be02f
修复PNG格式空白图像出现超出索引范围的问题 (#4945)
yang-521 Feb 10, 2026
9cdf48e
Remove PaddleOCR-VL server page limit (#4991)
Bobholamovic Feb 11, 2026
fe7c149
Add Intel GPU config (#4992)
Bobholamovic Feb 11, 2026
d59b2c4
PaddleX Add ROCm 7.0 compatibility patches (#4990)
M4jupitercannon Feb 11, 2026
9a3f4dd
[Feat] Support setting expiration for BOS URLs (#4993)
Bobholamovic Feb 12, 2026
3b04645
add \n for seal rec && bugfix for text in table && delete_pass by mod…
changdazhou Feb 13, 2026
bfda368
Fix auto batch size for PaddleOCR-VL-1.5-0.9B (#5003)
Bobholamovic Feb 13, 2026
e63a51a
Update HPS frozon deps (#5004)
Bobholamovic Feb 13, 2026
5bf095a
update vlm batch_size (#5005)
zhang-prog Feb 13, 2026
50f5932
add P800 document (#4995)
onecatcn Feb 14, 2026
062a782
Update mkdocs.yml to reflect kunlunxin docs changes (#5006)
onecatcn Feb 14, 2026
2054f94
support iluvatar_gpu for ppdet (#5002)
leo-q8 Feb 14, 2026
4511e2f
fix: add langchain compatibility shim for newer versions (0.1.x+) (#4…
Ansarimajid Feb 24, 2026
914f5fb
fix codes (#4984)
liu-jiaxuan Feb 24, 2026
cfba8bc
fix: guard chart_recognition_model init with use_chart_recognition fl…
scyyh11 Feb 24, 2026
3d5e3a0
Use git hash as image version (#5016)
Bobholamovic Feb 26, 2026
2044265
fix typo in error message (#5015)
F-Palmer Feb 26, 2026
1ffc4a6
Feature/hps paddleocr vl 1.5 (#5017)
scyyh11 Feb 28, 2026
09e1ff1
Add independent version.txt for PaddleOCR-VL-1.5 HPS SDK (#5026)
scyyh11 Mar 2, 2026
6b397a6
Fix: Integer overflow in `calculate_overlap_ratio` (`utils.py:248`) (…
albcunha Mar 2, 2026
2909209
[Fix] refine config of RT-DETR-L (#5036)
liu-jiaxuan Mar 5, 2026
01c1d80
Merge branch 'develop' into support-aliyun-oss
Bobholamovic Mar 9, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/deploy_docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: Develop Docs
on:
push:
branches: #设置更新哪个分支会更新站点
- release/3.3
- release/3.4
permissions:
contents: write
jobs:
Expand All @@ -27,5 +27,5 @@ jobs:
- run: pip install mike mkdocs-material jieba mkdocs-git-revision-date-localized-plugin mkdocs-git-committers-plugin-2 mkdocs-git-authors-plugin mkdocs-static-i18n mkdocs-minify-plugin
- run: |
git fetch origin gh-pages --depth=1
mike deploy --push --update-aliases 3.3 latest
mike deploy --push --update-aliases 3.4 latest
mike set-default --push latest
5 changes: 3 additions & 2 deletions .github/workflows/xpu_ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:

- name: Code Checkout
env:
docker_image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.3.0
docker_image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:xpu-ubuntu2204-x86_64-gcc123-py310
run: |
REPO="https://github.com/${{ github.repository }}.git"
FULL_REPO="${{ github.repository }}"
Expand Down Expand Up @@ -58,7 +58,7 @@ jobs:

- name: Run CI unittest
env:
docker_image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.3.0
docker_image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:xpu-ubuntu2204-x86_64-gcc123-py310
run: |
runner_name="${{ runner.name }}"
PARENT_DIR=$(dirname "$WORKSPACE")
Expand All @@ -71,5 +71,6 @@ jobs:
${docker_image} /bin/bash -c "
git config --global --add safe.directory /workspace/PaddleX
cd PaddleX
export PADDLE_PDX_DISABLE_MODEL_SOURCE_CHECK=true
bash tests/run_xpu_ci.sh
"
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,3 +72,4 @@ repos:
files: ^paddlex/.*\.py$
additional_dependencies:
- stdlib-list==0.10.0
- setuptools
1 change: 1 addition & 0 deletions .precommit/check_imports.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
"langchain_community": "langchain-community",
"langchain_core": "langchain-core",
"langchain_openai": "langchain-openai",
"langchain_text_splitters": "langchain-text-splitters",
"lxml": "lxml",
"matplotlib": "matplotlib",
"modelscope": "modelscope",
Expand Down
11 changes: 7 additions & 4 deletions deploy/genai_vllm_server_docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,24 @@ RUN apt-get update \
&& apt-get install -y libgl1 \
&& rm -rf /var/lib/apt/lists/*

ENV PIP_NO_CACHE_DIR=0
ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1

RUN python -m pip install torch==2.8.0
RUN --mount=type=cache,target=/root/.cache/pip \
python -m pip install torch==2.8.0

ARG PADDLEX_VERSION=">=3.3.6,<3.4"
RUN python -m pip install "paddlex${PADDLEX_VERSION}"
RUN --mount=type=cache,target=/root/.cache/pip \
python -m pip install "paddlex${PADDLEX_VERSION}"

ARG BUILD_FOR_SM120=false
RUN if [ "${BUILD_FOR_SM120}" = 'true' ]; then \
RUN --mount=type=cache,target=/root/.cache/pip \
if [ "${BUILD_FOR_SM120}" = 'true' ]; then \
python -m pip install https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.4.11/flash_attn-2.8.3%2Bcu128torch2.8-cp310-cp310-linux_x86_64.whl; \
else \
python -m pip install https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.3.14/flash_attn-2.8.2+cu128torch2.8-cp310-cp310-linux_x86_64.whl; \
fi \
&& python -m pip install transformers==4.57.6 \
&& paddlex --install genai-vllm-server

EXPOSE 8080
Expand Down
18 changes: 13 additions & 5 deletions deploy/hps/sdk/pipelines/OCR/server/model_repo/ocr/1/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def initialize(self, args):
self.context = {}
self.context["file_storage"] = None
self.context["return_img_urls"] = False
self.context["url_expires_in"] = -1
self.context["max_num_input_imgs"] = _DEFAULT_MAX_NUM_INPUT_IMGS
self.context["max_output_img_size"] = _DEFAULT_MAX_OUTPUT_IMG_SIZE
if self.app_config.extra:
Expand All @@ -59,6 +60,8 @@ def initialize(self, args):
self.context["return_img_urls"] = self.app_config.extra[
"return_img_urls"
]
if "url_expires_in" in self.app_config.extra:
self.context["url_expires_in"] = self.app_config.extra["url_expires_in"]
if "max_num_input_imgs" in self.app_config.extra:
self.context["max_num_input_imgs"] = self.app_config.extra[
"max_num_input_imgs"
Expand Down Expand Up @@ -105,12 +108,16 @@ def run_batch(self, inputs, log_ids, batch_id):

ret = executor.map(self._preprocess, inputs_g, log_ids_g)
ind_img_lsts, ind_data_info_lst, ind_visualize_enabled_lst = [], [], []
ind_input_id_lst, ind_log_id_lst, ind_input_lst = [], [], []
for i, item in enumerate(ret):
if isinstance(item, tuple):
assert len(item) == 3, len(item)
ind_img_lsts.append(item[0])
ind_data_info_lst.append(item[1])
ind_visualize_enabled_lst.append(item[2])
ind_input_id_lst.append(input_ids_g[i])
ind_log_id_lst.append(log_ids_g[i])
ind_input_lst.append(inputs_g[i])
else:
input_id = input_ids_g[i]
result_or_output_dic[input_id] = item
Expand Down Expand Up @@ -146,19 +153,19 @@ def run_batch(self, inputs, log_ids, batch_id):
ind_preds.append(preds[start_idx : start_idx + len(item)])
start_idx += len(item)

for i, result in zip(
input_ids_g,
for input_id, result in zip(
ind_input_id_lst,
executor.map(
self._postprocess,
ind_img_lsts,
ind_data_info_lst,
ind_visualize_enabled_lst,
ind_preds,
log_ids_g,
inputs_g,
ind_log_id_lst,
ind_input_lst,
),
):
result_or_output_dic[i] = result
result_or_output_dic[input_id] = result

assert len(result_or_output_dic) == len(
inputs
Expand Down Expand Up @@ -270,6 +277,7 @@ def _postprocess(self, images, data_info, visualize_enabled, preds, log_id, inpu
filename_template=f"{{key}}_{i}.jpg",
file_storage=self.context["file_storage"],
return_urls=self.context["return_img_urls"],
url_expires_in=self.context["url_expires_in"],
max_img_size=self.context["max_output_img_size"],
)
else:
Expand Down
2 changes: 1 addition & 1 deletion deploy/hps/sdk/pipelines/OCR/version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.2.5
0.2.6
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ def initialize(self, args):
self.context = {}
self.context["file_storage"] = None
self.context["return_img_urls"] = False
self.context["url_expires_in"] = -1
self.context["max_num_input_imgs"] = _DEFAULT_MAX_NUM_INPUT_IMGS
self.context["max_output_img_size"] = _DEFAULT_MAX_OUTPUT_IMG_SIZE
if self.app_config.extra:
Expand All @@ -44,6 +45,8 @@ def initialize(self, args):
self.context["return_img_urls"] = self.app_config.extra[
"return_img_urls"
]
if "url_expires_in" in self.app_config.extra:
self.context["url_expires_in"] = self.app_config.extra["url_expires_in"]
if "max_num_input_imgs" in self.app_config.extra:
self.context["max_num_input_imgs"] = self.app_config.extra[
"max_num_input_imgs"
Expand Down Expand Up @@ -142,6 +145,7 @@ def run(self, input, log_id):
filename_template=f"{{key}}_{i}.jpg",
file_storage=self.context["file_storage"],
return_urls=self.context["return_img_urls"],
url_expires_in=self.context["url_expires_in"],
max_img_size=self.context["max_output_img_size"],
)
else:
Expand Down
2 changes: 1 addition & 1 deletion deploy/hps/sdk/pipelines/PP-ChatOCRv3-doc/version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.3.2
0.3.3
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ def initialize(self, args):
self.context = {}
self.context["file_storage"] = None
self.context["return_img_urls"] = False
self.context["url_expires_in"] = -1
self.context["max_num_input_imgs"] = _DEFAULT_MAX_NUM_INPUT_IMGS
self.context["max_output_img_size"] = _DEFAULT_MAX_OUTPUT_IMG_SIZE
if self.app_config.extra:
Expand All @@ -44,6 +45,8 @@ def initialize(self, args):
self.context["return_img_urls"] = self.app_config.extra[
"return_img_urls"
]
if "url_expires_in" in self.app_config.extra:
self.context["url_expires_in"] = self.app_config.extra["url_expires_in"]
if "max_num_input_imgs" in self.app_config.extra:
self.context["max_num_input_imgs"] = self.app_config.extra[
"max_num_input_imgs"
Expand Down Expand Up @@ -143,6 +146,7 @@ def run(self, input, log_id):
filename_template=f"{{key}}_{i}.jpg",
file_storage=self.context["file_storage"],
return_urls=self.context["return_img_urls"],
url_expires_in=self.context["url_expires_in"],
max_img_size=self.context["max_output_img_size"],
)
else:
Expand Down
2 changes: 1 addition & 1 deletion deploy/hps/sdk/pipelines/PP-ChatOCRv4-doc/version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.4.2
0.4.3
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,10 @@
class TritonPythonModel(BaseTritonPythonModel):
def initialize(self, args):
super().initialize(args)

self.pipeline.inintial_visual_predictor(self.pipeline.config)

self.context = {}
self.context["file_storage"] = None
self.context["return_img_urls"] = False
self.context["url_expires_in"] = -1
self.context["max_num_input_imgs"] = _DEFAULT_MAX_NUM_INPUT_IMGS
self.context["max_output_img_size"] = _DEFAULT_MAX_OUTPUT_IMG_SIZE
if self.app_config.extra:
Expand All @@ -47,6 +45,8 @@ def initialize(self, args):
self.context["return_img_urls"] = self.app_config.extra[
"return_img_urls"
]
if "url_expires_in" in self.app_config.extra:
self.context["url_expires_in"] = self.app_config.extra["url_expires_in"]
if "max_num_input_imgs" in self.app_config.extra:
self.context["max_num_input_imgs"] = self.app_config.extra[
"max_num_input_imgs"
Expand Down Expand Up @@ -151,6 +151,7 @@ def run(self, input, log_id):
filename_template=f"markdown_{i}/{{key}}",
file_storage=self.context["file_storage"],
return_urls=self.context["return_img_urls"],
url_expires_in=self.context["url_expires_in"],
max_img_size=self.context["max_output_img_size"],
)
md_flags = md_data["page_continuation_flags"]
Expand All @@ -165,6 +166,7 @@ def run(self, input, log_id):
filename_template=f"{{key}}_{i}.jpg",
file_storage=self.context["file_storage"],
return_urls=self.context["return_img_urls"],
url_expires_in=self.context["url_expires_in"],
max_img_size=self.context["max_output_img_size"],
)
else:
Expand Down
2 changes: 1 addition & 1 deletion deploy/hps/sdk/pipelines/PP-DocTranslation/version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.1.2
0.1.3
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def initialize(self, args):
self.context = {}
self.context["file_storage"] = None
self.context["return_img_urls"] = False
self.context["url_expires_in"] = -1
self.context["max_num_input_imgs"] = _DEFAULT_MAX_NUM_INPUT_IMGS
self.context["max_output_img_size"] = _DEFAULT_MAX_OUTPUT_IMG_SIZE
if self.app_config.extra:
Expand All @@ -58,6 +59,8 @@ def initialize(self, args):
self.context["return_img_urls"] = self.app_config.extra[
"return_img_urls"
]
if "url_expires_in" in self.app_config.extra:
self.context["url_expires_in"] = self.app_config.extra["url_expires_in"]
if "max_num_input_imgs" in self.app_config.extra:
self.context["max_num_input_imgs"] = self.app_config.extra[
"max_num_input_imgs"
Expand Down Expand Up @@ -104,12 +107,16 @@ def run_batch(self, inputs, log_ids, batch_id):

ret = executor.map(self._preprocess, inputs_g, log_ids_g)
ind_img_lsts, ind_data_info_lst, ind_visualize_enabled_lst = [], [], []
ind_input_ids_lst, ind_log_ids_lst, ind_inputs_lst = [], [], []
for i, item in enumerate(ret):
if isinstance(item, tuple):
assert len(item) == 3, len(item)
ind_img_lsts.append(item[0])
ind_data_info_lst.append(item[1])
ind_visualize_enabled_lst.append(item[2])
ind_input_ids_lst.append(input_ids_g[i])
ind_log_ids_lst.append(log_ids_g[i])
ind_inputs_lst.append(inputs_g[i])
else:
input_id = input_ids_g[i]
result_or_output_dic[input_id] = item
Expand Down Expand Up @@ -179,19 +186,19 @@ def run_batch(self, inputs, log_ids, batch_id):
ind_preds.append(preds[start_idx : start_idx + len(item)])
start_idx += len(item)

for i, result in zip(
input_ids_g,
for input_id, result in zip(
ind_input_ids_lst,
executor.map(
self._postprocess,
ind_img_lsts,
ind_data_info_lst,
ind_visualize_enabled_lst,
ind_preds,
log_ids_g,
inputs_g,
ind_log_ids_lst,
ind_inputs_lst,
),
):
result_or_output_dic[i] = result
result_or_output_dic[input_id] = result

assert len(result_or_output_dic) == len(
inputs
Expand Down Expand Up @@ -323,6 +330,7 @@ def _postprocess(self, images, data_info, visualize_enabled, preds, log_id, inpu
filename_template=f"markdown_{i}/{{key}}",
file_storage=self.context["file_storage"],
return_urls=self.context["return_img_urls"],
url_expires_in=self.context["url_expires_in"],
max_img_size=self.context["max_output_img_size"],
)
md_flags = md_data["page_continuation_flags"]
Expand All @@ -337,6 +345,7 @@ def _postprocess(self, images, data_info, visualize_enabled, preds, log_id, inpu
filename_template=f"{{key}}_{i}.jpg",
file_storage=self.context["file_storage"],
return_urls=self.context["return_img_urls"],
url_expires_in=self.context["url_expires_in"],
max_img_size=self.context["max_output_img_size"],
)
else:
Expand Down
2 changes: 1 addition & 1 deletion deploy/hps/sdk/pipelines/PP-StructureV3/version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.3.5
0.3.6
Loading