|
| 1 | +# vector_reduce_min/vector_reduce_min_jit.py -*- Python -*- |
| 2 | +# |
| 3 | +# This file is licensed under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +# See https://llvm.org/LICENSE.txt for license information. |
| 5 | +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | +# |
| 7 | +# (c) Copyright 2025 Advanced Micro Devices, Inc. or its affiliates |
| 8 | +import numpy as np |
| 9 | +import sys |
| 10 | +import os |
| 11 | +import argparse |
| 12 | +import time |
| 13 | + |
| 14 | +import aie.iron as iron |
| 15 | +from aie.iron import ObjectFifo, Program, Runtime, Worker |
| 16 | +from aie.iron.placers import SequentialPlacer |
| 17 | +from aie.iron import ExternalFunction |
| 18 | + |
| 19 | + |
| 20 | +@iron.jit(is_placed=False) |
| 21 | +def my_reduce_min(input_tensor, output_tensor): |
| 22 | + |
| 23 | + num_elements = input_tensor.numel() |
| 24 | + assert output_tensor.numel() == 1, "Output tensor must be a scalar" |
| 25 | + |
| 26 | + # Define tensor types |
| 27 | + in_ty = np.ndarray[(num_elements,), np.dtype[input_tensor.dtype]] |
| 28 | + out_ty = np.ndarray[(1,), np.dtype[output_tensor.dtype]] |
| 29 | + |
| 30 | + # AIE-array data movement with object fifos |
| 31 | + of_in = ObjectFifo(in_ty, name="in") |
| 32 | + of_out = ObjectFifo(out_ty, name="out") |
| 33 | + |
| 34 | + # AIE Core Function declarations |
| 35 | + root_dir = os.path.abspath(os.path.join(__file__, "../../../..")) |
| 36 | + kernel_dir = os.path.join(root_dir, "aie_kernels/aie2") |
| 37 | + source_file = os.path.join(kernel_dir, "reduce_min.cc") |
| 38 | + reduce_min_vector = ExternalFunction( |
| 39 | + "reduce_min_vector", |
| 40 | + source_file=source_file, |
| 41 | + arg_types=[in_ty, out_ty, np.int32], |
| 42 | + include_dirs=[kernel_dir], |
| 43 | + ) |
| 44 | + |
| 45 | + # Define a task |
| 46 | + def core_body(of_in, of_out, reduce_min_vector): |
| 47 | + elem_out = of_out.acquire(1) |
| 48 | + elem_in = of_in.acquire(1) |
| 49 | + reduce_min_vector(elem_in, elem_out, num_elements) |
| 50 | + of_in.release(1) |
| 51 | + of_out.release(1) |
| 52 | + |
| 53 | + # Define a worker to run the task on a core |
| 54 | + worker = Worker(core_body, fn_args=[of_in.cons(), of_out.prod(), reduce_min_vector]) |
| 55 | + |
| 56 | + # Runtime operations to move data to/from the AIE-array |
| 57 | + rt = Runtime() |
| 58 | + with rt.sequence(in_ty, out_ty) as (a_in, c_out): |
| 59 | + rt.start(worker) |
| 60 | + rt.fill(of_in.prod(), a_in) |
| 61 | + rt.drain(of_out.cons(), c_out, wait=True) |
| 62 | + |
| 63 | + # Place program components (assign them resources on the device) and generate an MLIR module |
| 64 | + return Program(iron.get_current_device(), rt).resolve_program(SequentialPlacer()) |
| 65 | + |
| 66 | + |
| 67 | +def main(): |
| 68 | + |
| 69 | + parser = argparse.ArgumentParser() |
| 70 | + parser.add_argument( |
| 71 | + "-n", |
| 72 | + "--num-elements", |
| 73 | + type=int, |
| 74 | + default=2048, |
| 75 | + help="Number of elements (default: 2048)", |
| 76 | + ) |
| 77 | + parser.add_argument( |
| 78 | + "-w", |
| 79 | + "--warmup", |
| 80 | + type=int, |
| 81 | + default=10, |
| 82 | + help="Number of warmup iterations (default: 10)", |
| 83 | + ) |
| 84 | + parser.add_argument( |
| 85 | + "-i", |
| 86 | + "--iters", |
| 87 | + type=int, |
| 88 | + default=20, |
| 89 | + help="Number of measurement iterations (default: 20)", |
| 90 | + ) |
| 91 | + |
| 92 | + args = parser.parse_args() |
| 93 | + num_elements = args.num_elements |
| 94 | + n_warmup_iterations = args.warmup |
| 95 | + n_iterations = args.iters |
| 96 | + data_type = np.int32 |
| 97 | + |
| 98 | + # Construct input and output tensors that are accessible to the NPU |
| 99 | + input_tensor = iron.randint(10, 100, (num_elements,), dtype=data_type, device="npu") |
| 100 | + output_tensor = iron.tensor((1,), dtype=data_type, device="npu") |
| 101 | + |
| 102 | + # Initialize timing variables |
| 103 | + npu_time_total = 0.0 |
| 104 | + npu_time_min = float("inf") |
| 105 | + npu_time_max = 0.0 |
| 106 | + |
| 107 | + # Main run loop with warmup and measurement iterations |
| 108 | + total_iterations = n_warmup_iterations + n_iterations |
| 109 | + for iter_num in range(total_iterations): |
| 110 | + # Launch the kernel and measure execution time |
| 111 | + start_time = time.perf_counter() |
| 112 | + my_reduce_min(input_tensor, output_tensor) |
| 113 | + end_time = time.perf_counter() |
| 114 | + |
| 115 | + # Calculate execution time in microseconds |
| 116 | + execution_time_us = (end_time - start_time) * 1_000_000 |
| 117 | + |
| 118 | + # Skip warmup iterations for timing statistics |
| 119 | + if iter_num >= n_warmup_iterations: |
| 120 | + npu_time_total += execution_time_us |
| 121 | + npu_time_min = min(npu_time_min, execution_time_us) |
| 122 | + npu_time_max = max(npu_time_max, execution_time_us) |
| 123 | + |
| 124 | + # Check the correctness of the result |
| 125 | + computed = output_tensor.numpy()[0] |
| 126 | + expected = input_tensor.numpy().min() |
| 127 | + |
| 128 | + if expected == computed: |
| 129 | + # Print timing results |
| 130 | + if n_iterations > 1: |
| 131 | + avg_time = npu_time_total / n_iterations |
| 132 | + print(f"\nAvg NPU time: {avg_time:.1f}us.") |
| 133 | + print(f"Min NPU time: {npu_time_min:.1f}us.") |
| 134 | + print(f"Max NPU time: {npu_time_max:.1f}us.") |
| 135 | + else: |
| 136 | + print(f"\nNPU time: {npu_time_total:.1f}us.") |
| 137 | + print("PASS!") |
| 138 | + sys.exit(0) |
| 139 | + else: |
| 140 | + print(f"FAIL!: Expected {expected} but got {computed}") |
| 141 | + sys.exit(1) |
| 142 | + |
| 143 | + |
| 144 | +if __name__ == "__main__": |
| 145 | + main() |
0 commit comments