Skip to content

Commit 4a1f717

Browse files
rename mkldnn to onednn (#1183)
1 parent ade45c8 commit 4a1f717

File tree

5 files changed

+29
-21
lines changed

5 files changed

+29
-21
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ PaddleScience 是一个基于深度学习框架 PaddlePaddle 开发的科学计
166166
### 安装 PaddlePaddle
167167

168168
<!-- --8<-- [start:paddle_install] -->
169-
请根据您的运行环境,访问 [PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/develop/install/pip/linux-pip.html) 官网,安装 <font color="red"><b>3.0 develop</b></font> 版的 PaddlePaddle
169+
请根据您的运行环境,访问 [PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/develop/install/pip/linux-pip.html) 官网,建议安装 PaddlePaddle <font color="red"><b>3.0 以上稳定版,或最新的 develop 开发版</b></font>。
170170

171171
安装完毕之后,运行以下命令,验证 Paddle 是否安装成功。
172172

deploy/python_infer/base.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ class Predictor:
3939
pdmodel_path (Optional[str]): Path to the PaddlePaddle model file. Defaults to None.
4040
pdiparams_path (Optional[str]): Path to the PaddlePaddle model parameters file. Defaults to None.
4141
device (Literal["cpu", "gpu", "npu", "xpu", "sdaa"], optional): Device to use for inference. Defaults to "cpu".
42-
engine (Literal["native", "tensorrt", "onnx", "mkldnn"], optional): Inference engine to use. Defaults to "native".
42+
engine (Literal["native", "tensorrt", "onnx", "onednn"], optional): Inference engine to use. Defaults to "native".
4343
precision (Literal["fp32", "fp16", "int8"], optional): Precision to use for inference. Defaults to "fp32".
4444
onnx_path (Optional[str], optional): Path to the ONNX model file. Defaults to None.
4545
ir_optim (bool, optional): Whether to use IR optimization. Defaults to True.
@@ -55,7 +55,7 @@ def __init__(
5555
pdiparams_path: Optional[str] = None,
5656
*,
5757
device: Literal["cpu", "gpu", "npu", "xpu", "sdaa"] = "cpu",
58-
engine: Literal["native", "tensorrt", "onnx", "mkldnn"] = "native",
58+
engine: Literal["native", "tensorrt", "onnx", "onednn"] = "native",
5959
precision: Literal["fp32", "fp16", "int8"] = "fp32",
6060
onnx_path: Optional[str] = None,
6161
ir_optim: bool = True,
@@ -157,11 +157,11 @@ def _create_paddle_predictor(
157157
config.enable_xpu(10 * 1024 * 1024)
158158
else:
159159
config.disable_gpu()
160-
if self.engine == "mkldnn":
160+
if self.engine == "onednn":
161161
# 'set_mkldnn_cache_capatity' is not available on macOS
162162
if platform.system() != "Darwin":
163163
...
164-
# cache 10 different shapes for mkldnn to avoid memory leak
164+
# cache 10 different shapes for onednn to avoid memory leak
165165
# config.set_mkldnn_cache_capacity(10)
166166
config.enable_mkldnn()
167167

@@ -170,6 +170,11 @@ def _create_paddle_predictor(
170170

171171
config.set_cpu_math_library_num_threads(self.num_cpu_threads)
172172

173+
elif self.engine == "mkldnn":
174+
raise ValueError(
175+
"The 'mkldnn' engine is deprecated. Please use 'onednn' instead."
176+
)
177+
173178
# enable memory optim
174179
config.enable_memory_optim()
175180
# config.disable_glog_info()
@@ -221,9 +226,9 @@ def _check_device(self, device: str):
221226
)
222227

223228
def _check_engine(self, engine: str):
224-
if engine not in ["native", "tensorrt", "onnx", "mkldnn"]:
229+
if engine not in ["native", "tensorrt", "onnx", "onednn"]:
225230
raise ValueError(
226-
"Inference only supports 'native', 'tensorrt', 'onnx' and 'mkldnn' "
231+
"Inference only supports 'native', 'tensorrt', 'onnx' and 'onednn' "
227232
f"engines, but got {engine}."
228233
)
229234

docs/zh/user_guide.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -464,7 +464,7 @@ ppsci MESSAGE: Visualization result is saved to: ./aneurysm_pred.vtu
464464
465465
PaddleScience 提供了多种推理配置组合,可通过命令行进行组合,目前支持的推理配置如下:
466466
467-
| | Native | ONNX | TensorRT | macaRT | MKLDNN |
467+
| | Native | ONNX | TensorRT | macaRT | oneDNN |
468468
| :--- | :--- | :--- | :--- | :--- | :--- |
469469
| Intel(CPU) | ✅ | ✅ | / | / | ✅ |
470470
| NVIDIA | ✅ | ✅ | ✅ | / | / |
@@ -576,31 +576,31 @@ PaddleScience 提供了多种推理配置组合,可通过命令行进行组合
576576
INFER.engine=onnx
577577
```
578578
579-
=== "使用 MKLDNN 推理"
579+
=== "使用 oneDNN 推理"
580580
581-
MKLDNN 是英特尔推出的高性能推理引擎,适用于 CPU 推理加速,PaddleScience 支持了 MKLDNN 推理功能。
581+
oneDNN 是英特尔推出的高性能推理引擎,适用于 CPU 推理加速,PaddleScience 支持了 oneDNN 推理功能。
582582
583583
运行以下命令进行推理:
584584
585585
``` sh
586586
python aneurysm.py mode=infer \
587587
INFER.device=cpu \
588-
INFER.engine=mkldnn
588+
INFER.engine=onednn
589589
```
590590
591591
!!! info "完整推理配置参数"
592592
593593
| 参数 | 默认值 | 说明 |
594594
| :--- | :--- | :--- |
595595
| `INFER.device` | `cpu` | 推理设备,目前支持 `cpu``gpu` |
596-
| `INFER.engine` | `native` | 推理引擎,目前支持 `native`, `tensorrt`, `onnx``mkldnn` |
596+
| `INFER.engine` | `native` | 推理引擎,目前支持 `native`, `tensorrt`, `onnx``onednn` |
597597
| `INFER.precision` | `fp32` | 推理精度,目前支持 `fp32`, `fp16` |
598598
| `INFER.ir_optim` | `True` | 是否启用 IR 优化 |
599599
| `INFER.min_subgraph_size` | `30` | TensorRT 中最小子图 size,当子图的 size 大于该值时,才会尝试对该子图使用 TensorRT 计算 |
600600
| `INFER.gpu_mem` | `2000` | 初始显存大小 |
601601
| `INFER.gpu_id` | `0` | GPU 逻辑设备号 |
602602
| `INFER.max_batch_size` | `1024` | 推理时的最大 batch_size |
603-
| `INFER.num_cpu_threads` | `10` | MKLDNN 和 ONNX 在 CPU 推理时的线程数 |
603+
| `INFER.num_cpu_threads` | `10` | oneDNN 和 ONNX 在 CPU 推理时的线程数 |
604604
| `INFER.batch_size` | `256` | 推理时的 batch_size |
605605
606606
### 1.4 断点继续训练

ppsci/solver/solver.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -477,16 +477,15 @@ def dist_wrapper(model: nn.Layer) -> paddle.DataParallel:
477477

478478
# log paddlepaddle's version
479479
if version.Version(paddle.__version__) != version.Version("0.0.0"):
480-
paddle_version = paddle.__version__
481480
if version.Version(paddle.__version__) < version.Version("2.6.0"):
482481
logger.warning(
483-
f"Detected paddlepaddle version is '{paddle_version}', "
482+
f"Detected paddlepaddle version is '{paddle.__version__}', "
484483
"currently it is recommended to use paddlepaddle >= 2.6 or develop version."
485484
)
486-
else:
487-
paddle_version = f"develop({paddle.version.commit[:7]})"
488485

489-
logger.info(f"Using paddlepaddle {paddle_version} on device {self.device}")
486+
logger.info(
487+
f"Using paddlepaddle {paddle.version.full_version}({paddle.version.commit[:7]}) on device {self.device}"
488+
)
490489

491490
self.forward_helper = expression.ExpressionSolver()
492491

ppsci/utils/config.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ class InferConfig(BaseModel):
213213
pdiparams_path: Optional[str] = None
214214
onnx_path: Optional[str] = None
215215
device: Literal["cpu", "gpu", "npu", "xpu", "sdaa"] = "cpu"
216-
engine: Literal["native", "tensorrt", "onnx", "mkldnn"] = "native"
216+
engine: Literal["native", "tensorrt", "onnx", "onednn"] = "native"
217217
precision: Literal["fp32", "fp16", "int8"] = "fp32"
218218
ir_optim: bool = True
219219
min_subgraph_size: int = 30
@@ -226,14 +226,18 @@ class InferConfig(BaseModel):
226226
# Fine-grained validator(s) below
227227
@field_validator("engine")
228228
def engine_check(cls, v, info: ValidationInfo):
229+
if v == "mkldnn":
230+
raise ValueError(
231+
"The 'mkldnn' engine is deprecated. Please use 'onednn' instead."
232+
)
229233
if v == "tensorrt" and info.data["device"] != "gpu":
230234
raise ValueError(
231235
"'INFER.device' should be 'gpu' when 'INFER.engine' is 'tensorrt', "
232236
f"but got '{info.data['device']}'"
233237
)
234-
if v == "mkldnn" and info.data["device"] != "cpu":
238+
if v == "onednn" and info.data["device"] != "cpu":
235239
raise ValueError(
236-
"'INFER.device' should be 'cpu' when 'INFER.engine' is 'mkldnn', "
240+
"'INFER.device' should be 'cpu' when 'INFER.engine' is 'onednn', "
237241
f"but got '{info.data['device']}'"
238242
)
239243

0 commit comments

Comments
 (0)