Skip to content

Commit eb74830

Browse files
authored
Support mlperf inference v6.0 (#811)
1 parent d5c00a4 commit eb74830

File tree

6 files changed

+60
-1
lines changed

6 files changed

+60
-1
lines changed

script/app-mlperf-inference/meta.yaml

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -467,6 +467,35 @@ variations:
467467
docker:
468468
base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v5.0-cuda12.8-pytorch25.01-ubuntu24.04-aarch64-Grace-release
469469

470+
nvidia-original,r6.0-dev_default:
471+
env:
472+
MLC_NVIDIA_MITTEN_FROM_SRC: 'yes'
473+
docker:
474+
os_version: "24.04"
475+
user: 'ubuntu'
476+
build_deps:
477+
- tags: detect,os
478+
image_name: mlperf-inference-nvidia-v5.0-common
479+
build_env:
480+
ENV: release
481+
deps:
482+
- names:
483+
- numpy
484+
tags: get,generic-python-lib,_package.numpy
485+
version_max: "1.26.999"
486+
version_max_usable: "1.26.4"
487+
update_meta_if_env:
488+
- enable_if_env:
489+
MLC_HOST_PLATFORM_FLAVOR:
490+
- x86_64
491+
docker:
492+
base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v5.0-cuda12.8-pytorch25.01-ubuntu24.04-x86_64-release
493+
- skip_if_env:
494+
MLC_HOST_PLATFORM_FLAVOR:
495+
- x86_64
496+
docker:
497+
base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v5.0-cuda12.8-pytorch25.01-ubuntu24.04-aarch64-Grace-release
498+
470499
nvidia-original,r5.1_default:
471500
env:
472501
MLC_NVIDIA_MITTEN_FROM_SRC: 'yes'
@@ -2377,6 +2406,13 @@ variations:
23772406
group:
23782407
reproducibility
23792408
add_deps_recursive:
2409+
nvidia-inference-common-code:
2410+
tags: _mlcommons,_v6.0-dev
2411+
nvidia-inference-server:
2412+
version: r5.0
2413+
tags: _mlcommons
2414+
nvidia-harness:
2415+
tags: _v5.0
23802416
pycuda:
23812417
version_min: "2024.1"
23822418
default_env:

script/get-cuda-devices/print_cuda_devices.cu

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,11 @@ int main(int argc, char *argv[])
5454
printf("CUDA runtime version: %d.%d\n", rtver/1000, (rtver%100)/10);
5555

5656
printf("Global memory: %llu\n", (unsigned long long) features.totalGlobalMem);
57-
printf("Max clock rate: %f MHz\n", features.clockRate * 0.001);
57+
58+
int clock;
59+
cudaDeviceGetAttribute(&clock, cudaDevAttrClockRate, id);
60+
61+
printf("Max clock rate: %f MHz\n", 0.001 * clock);
5862

5963
printf("Total amount of shared memory per block: %lu\n", features.sharedMemPerBlock);
6064
printf("Total number of registers available per block: %d\n", features.regsPerBlock);

script/get-mlperf-inference-nvidia-common-code/meta.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,10 @@ variations:
5757
add_deps_recursive:
5858
mlperf-inference-results:
5959
tags: _code-only,_v5.1
60+
v6.0-dev:
61+
add_deps_recursive:
62+
mlperf-inference-results:
63+
tags: _code-only-for-v6.0,_v6.0-dev
6064
versions:
6165
r2.1:
6266
add_deps_recursive:

script/get-mlperf-inference-results/meta.yaml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,11 @@ variations:
4747
inference-results-repo:
4848
tags: _branch.mlc-code-only-for-v5.1
4949
group: repo-branch
50+
code-only-for-v6.0:
51+
adr:
52+
inference-results-repo:
53+
tags: _branch.mlc-code-only-for-v6.0
54+
group: repo-branch
5055
ctuning:
5156
env:
5257
GITHUB_REPO_OWNER: ctuning
@@ -93,6 +98,12 @@ variations:
9398
MLC_GIT_URL: https://github.com/<<<GITHUB_REPO_OWNER>>>/inference_results_v5.1.git
9499
MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v5.1
95100
MLC_VERSION: "v5.1"
101+
v6.0-dev:
102+
group: version
103+
env:
104+
MLC_GIT_URL: https://github.com/<<<GITHUB_REPO_OWNER>>>/inference_results_v5.0.git
105+
MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v6.0-dev
106+
MLC_VERSION: "v5.0"
96107
versions:
97108
v2.1:
98109
env:

script/install-cuda-prebuilt/meta.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,3 +97,6 @@ versions:
9797
12.9.1:
9898
env:
9999
MLC_CUDA_LINUX_FILENAME: cuda_12.9.1_575.57.08_linux.run
100+
13.1.1:
101+
env:
102+
MLC_CUDA_LINUX_FILENAME: cuda_13.1.1_590.48.01_linux.run

script/run-mlperf-inference-app/meta.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ input_mapping:
103103
skip_truncation: MLC_SKIP_TRUNCATE_ACCURACY
104104
submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR
105105
submitter: MLC_MLPERF_SUBMITTER
106+
submitter_id: MLC_MLPERF_SUBMITTER_ID
106107
sut_servers: MLC_NETWORK_LOADGEN_SUT_SERVERS
107108
sw_notes_extra: MLC_MLPERF_SUT_SW_NOTES_EXTRA
108109
system_type: MLC_MLPERF_SUBMISSION_SYSTEM_TYPE

0 commit comments

Comments
 (0)