From af0ef28721662db53c5c07d9dfc607be1f4608b3 Mon Sep 17 00:00:00 2001 From: gongchensu Date: Thu, 8 Jan 2026 06:21:12 +0000 Subject: [PATCH] Issue/888 - Add averagepool,batch_norm,cross_entropy_loss,exp,hardswish,gather,index_copy_inplace,interpolate_nearest,maxpool,scatter operators from competition. --- include/infiniop/ops/averagepool.h | 29 ++ include/infiniop/ops/batch_norm.h | 37 ++ include/infiniop/ops/cross_entropy_loss.h | 27 ++ include/infiniop/ops/exp.h | 24 ++ include/infiniop/ops/gather.h | 31 ++ include/infiniop/ops/hardswish.h | 24 ++ include/infiniop/ops/index_copy_inplace.h | 30 ++ include/infiniop/ops/interpolate_nearest.h | 25 ++ include/infiniop/ops/maxpool.h | 29 ++ include/infiniop/ops/scatter.h | 30 ++ src/infiniop/ops/averagepool/averagepool.h | 52 +++ .../ops/averagepool/cpu/averagepool_cpu.cc | 348 ++++++++++++++++++ .../ops/averagepool/cpu/averagepool_cpu.h | 8 + .../averagepool/cuda/averagepool_kernel.cuh | 185 ++++++++++ src/infiniop/ops/averagepool/info.h | 136 +++++++ .../ops/averagepool/nvidia/averagepool.cu | 220 +++++++++++ .../averagepool/nvidia/averagepool_nvidia.cuh | 8 + src/infiniop/ops/averagepool/operator.cc | 147 ++++++++ src/infiniop/ops/batch_norm/batch_norm.h | 54 +++ .../ops/batch_norm/cpu/batch_norm_cpu.cc | 118 ++++++ .../ops/batch_norm/cpu/batch_norm_cpu.h | 8 + src/infiniop/ops/batch_norm/cuda/kernel.cuh | 57 +++ src/infiniop/ops/batch_norm/info.h | 69 ++++ .../batch_norm/nvidia/batch_norm_nvidia.cu | 176 +++++++++ .../batch_norm/nvidia/batch_norm_nvidia.cuh | 10 + src/infiniop/ops/batch_norm/operator.cc | 156 ++++++++ .../cpu/cross_entropy_loss_cpu.cc | 321 ++++++++++++++++ .../cpu/cross_entropy_loss_cpu.h | 8 + .../cross_entropy_loss/cross_entropy_loss.h | 48 +++ src/infiniop/ops/cross_entropy_loss/info.h | 36 ++ .../nvidia/cross_entropy_loss_nvidia.cu | 217 +++++++++++ .../nvidia/cross_entropy_loss_nvidia.cuh | 8 + .../ops/cross_entropy_loss/operator.cc | 142 +++++++ src/infiniop/ops/exp/cpu/exp_cpu.cc | 52 +++ src/infiniop/ops/exp/cpu/exp_cpu.h | 21 ++ src/infiniop/ops/exp/cuda/kernel.cuh | 39 ++ src/infiniop/ops/exp/nvidia/exp_nvidia.cu | 59 +++ src/infiniop/ops/exp/nvidia/exp_nvidia.cuh | 8 + src/infiniop/ops/exp/operator.cc | 139 +++++++ src/infiniop/ops/gather/cpu/gather_cpu.cc | 96 +++++ src/infiniop/ops/gather/cpu/gather_cpu.h | 8 + src/infiniop/ops/gather/cuda/kernel.cuh | 37 ++ src/infiniop/ops/gather/gather.h | 47 +++ src/infiniop/ops/gather/info.h | 58 +++ .../ops/gather/nvidia/gather_nvidia.cu | 179 +++++++++ .../ops/gather/nvidia/gather_nvidia.cuh | 7 + src/infiniop/ops/gather/operator.cc | 144 ++++++++ .../ops/hardswish/cpu/hardswish_cpu.cc | 52 +++ .../ops/hardswish/cpu/hardswish_cpu.h | 30 ++ src/infiniop/ops/hardswish/cuda/kernel.cuh | 55 +++ .../ops/hardswish/nvidia/hardswish_nvidia.cu | 59 +++ .../ops/hardswish/nvidia/hardswish_nvidia.cuh | 8 + src/infiniop/ops/hardswish/operator.cc | 139 +++++++ .../cpu/index_copy_inplace_cpu.cc | 93 +++++ .../cpu/index_copy_inplace_cpu.h | 8 + .../index_copy_inplace/index_copy_inplace.h | 53 +++ src/infiniop/ops/index_copy_inplace/info.h | 74 ++++ .../nvidia/index_copy_inplace_nvidia.cu | 127 +++++++ .../nvidia/index_copy_inplace_nvidia.cuh | 7 + .../ops/index_copy_inplace/operator.cc | 144 ++++++++ .../cpu/interpolate_nearest_cpu.cc | 284 ++++++++++++++ .../cpu/interpolate_nearest_cpu.h | 8 + .../ops/interpolate_nearest/cuda/kernel.cuh | 168 +++++++++ src/infiniop/ops/interpolate_nearest/info.h | 118 ++++++ .../interpolate_nearest/interpolate_nearest.h | 51 +++ .../nvidia/interpolate_nearest_nvidia.cu | 93 +++++ .../nvidia/interpolate_nearest_nvidia.cuh | 9 + .../ops/interpolate_nearest/operator.cc | 139 +++++++ src/infiniop/ops/maxpool/cpu/maxpool_cpu.cc | 322 ++++++++++++++++ src/infiniop/ops/maxpool/cpu/maxpool_cpu.h | 8 + src/infiniop/ops/maxpool/info.h | 113 ++++++ src/infiniop/ops/maxpool/maxpool.h | 53 +++ .../ops/maxpool/nvidia/maxpool_nvidia.cu | 240 ++++++++++++ .../ops/maxpool/nvidia/maxpool_nvidia.cuh | 8 + src/infiniop/ops/maxpool/operator.cc | 147 ++++++++ src/infiniop/ops/scatter/cpu/scatter_cpu.cc | 100 +++++ src/infiniop/ops/scatter/cpu/scatter_cpu.h | 8 + src/infiniop/ops/scatter/cuda/kernel.cuh | 37 ++ src/infiniop/ops/scatter/info.h | 64 ++++ .../ops/scatter/metax/scatter_metax.h | 8 + .../ops/scatter/metax/scatter_metax.maca | 190 ++++++++++ .../ops/scatter/nvidia/scatter_nvidia.cu | 180 +++++++++ .../ops/scatter/nvidia/scatter_nvidia.cuh | 7 + src/infiniop/ops/scatter/operator.cc | 160 ++++++++ src/infiniop/ops/scatter/scatter.h | 47 +++ test/infiniop/averagepool.py | 239 ++++++++++++ test/infiniop/batch_norm.py | 244 ++++++++++++ test/infiniop/cross_entropy_loss.py | 213 +++++++++++ test/infiniop/exp.py | 165 +++++++++ test/infiniop/gather.py | 160 ++++++++ test/infiniop/hardswish.py | 167 +++++++++ test/infiniop/index_copy_inplace.py | 180 +++++++++ test/infiniop/interpolate_nearest.py | 265 +++++++++++++ test/infiniop/libinfiniop/op_register.py | 321 +++++++++++++++- test/infiniop/maxpool.py | 242 ++++++++++++ test/infiniop/scatter.py | 196 ++++++++++ 96 files changed, 9514 insertions(+), 1 deletion(-) create mode 100644 include/infiniop/ops/averagepool.h create mode 100644 include/infiniop/ops/batch_norm.h create mode 100644 include/infiniop/ops/cross_entropy_loss.h create mode 100644 include/infiniop/ops/exp.h create mode 100644 include/infiniop/ops/gather.h create mode 100644 include/infiniop/ops/hardswish.h create mode 100644 include/infiniop/ops/index_copy_inplace.h create mode 100644 include/infiniop/ops/interpolate_nearest.h create mode 100644 include/infiniop/ops/maxpool.h create mode 100644 include/infiniop/ops/scatter.h create mode 100644 src/infiniop/ops/averagepool/averagepool.h create mode 100644 src/infiniop/ops/averagepool/cpu/averagepool_cpu.cc create mode 100644 src/infiniop/ops/averagepool/cpu/averagepool_cpu.h create mode 100644 src/infiniop/ops/averagepool/cuda/averagepool_kernel.cuh create mode 100644 src/infiniop/ops/averagepool/info.h create mode 100644 src/infiniop/ops/averagepool/nvidia/averagepool.cu create mode 100644 src/infiniop/ops/averagepool/nvidia/averagepool_nvidia.cuh create mode 100644 src/infiniop/ops/averagepool/operator.cc create mode 100644 src/infiniop/ops/batch_norm/batch_norm.h create mode 100644 src/infiniop/ops/batch_norm/cpu/batch_norm_cpu.cc create mode 100644 src/infiniop/ops/batch_norm/cpu/batch_norm_cpu.h create mode 100644 src/infiniop/ops/batch_norm/cuda/kernel.cuh create mode 100644 src/infiniop/ops/batch_norm/info.h create mode 100644 src/infiniop/ops/batch_norm/nvidia/batch_norm_nvidia.cu create mode 100644 src/infiniop/ops/batch_norm/nvidia/batch_norm_nvidia.cuh create mode 100644 src/infiniop/ops/batch_norm/operator.cc create mode 100644 src/infiniop/ops/cross_entropy_loss/cpu/cross_entropy_loss_cpu.cc create mode 100644 src/infiniop/ops/cross_entropy_loss/cpu/cross_entropy_loss_cpu.h create mode 100644 src/infiniop/ops/cross_entropy_loss/cross_entropy_loss.h create mode 100644 src/infiniop/ops/cross_entropy_loss/info.h create mode 100644 src/infiniop/ops/cross_entropy_loss/nvidia/cross_entropy_loss_nvidia.cu create mode 100644 src/infiniop/ops/cross_entropy_loss/nvidia/cross_entropy_loss_nvidia.cuh create mode 100644 src/infiniop/ops/cross_entropy_loss/operator.cc create mode 100644 src/infiniop/ops/exp/cpu/exp_cpu.cc create mode 100644 src/infiniop/ops/exp/cpu/exp_cpu.h create mode 100644 src/infiniop/ops/exp/cuda/kernel.cuh create mode 100644 src/infiniop/ops/exp/nvidia/exp_nvidia.cu create mode 100644 src/infiniop/ops/exp/nvidia/exp_nvidia.cuh create mode 100644 src/infiniop/ops/exp/operator.cc create mode 100644 src/infiniop/ops/gather/cpu/gather_cpu.cc create mode 100644 src/infiniop/ops/gather/cpu/gather_cpu.h create mode 100644 src/infiniop/ops/gather/cuda/kernel.cuh create mode 100644 src/infiniop/ops/gather/gather.h create mode 100644 src/infiniop/ops/gather/info.h create mode 100644 src/infiniop/ops/gather/nvidia/gather_nvidia.cu create mode 100644 src/infiniop/ops/gather/nvidia/gather_nvidia.cuh create mode 100644 src/infiniop/ops/gather/operator.cc create mode 100644 src/infiniop/ops/hardswish/cpu/hardswish_cpu.cc create mode 100644 src/infiniop/ops/hardswish/cpu/hardswish_cpu.h create mode 100644 src/infiniop/ops/hardswish/cuda/kernel.cuh create mode 100644 src/infiniop/ops/hardswish/nvidia/hardswish_nvidia.cu create mode 100644 src/infiniop/ops/hardswish/nvidia/hardswish_nvidia.cuh create mode 100644 src/infiniop/ops/hardswish/operator.cc create mode 100644 src/infiniop/ops/index_copy_inplace/cpu/index_copy_inplace_cpu.cc create mode 100644 src/infiniop/ops/index_copy_inplace/cpu/index_copy_inplace_cpu.h create mode 100644 src/infiniop/ops/index_copy_inplace/index_copy_inplace.h create mode 100644 src/infiniop/ops/index_copy_inplace/info.h create mode 100644 src/infiniop/ops/index_copy_inplace/nvidia/index_copy_inplace_nvidia.cu create mode 100644 src/infiniop/ops/index_copy_inplace/nvidia/index_copy_inplace_nvidia.cuh create mode 100644 src/infiniop/ops/index_copy_inplace/operator.cc create mode 100644 src/infiniop/ops/interpolate_nearest/cpu/interpolate_nearest_cpu.cc create mode 100644 src/infiniop/ops/interpolate_nearest/cpu/interpolate_nearest_cpu.h create mode 100644 src/infiniop/ops/interpolate_nearest/cuda/kernel.cuh create mode 100644 src/infiniop/ops/interpolate_nearest/info.h create mode 100644 src/infiniop/ops/interpolate_nearest/interpolate_nearest.h create mode 100644 src/infiniop/ops/interpolate_nearest/nvidia/interpolate_nearest_nvidia.cu create mode 100644 src/infiniop/ops/interpolate_nearest/nvidia/interpolate_nearest_nvidia.cuh create mode 100644 src/infiniop/ops/interpolate_nearest/operator.cc create mode 100644 src/infiniop/ops/maxpool/cpu/maxpool_cpu.cc create mode 100644 src/infiniop/ops/maxpool/cpu/maxpool_cpu.h create mode 100644 src/infiniop/ops/maxpool/info.h create mode 100644 src/infiniop/ops/maxpool/maxpool.h create mode 100644 src/infiniop/ops/maxpool/nvidia/maxpool_nvidia.cu create mode 100644 src/infiniop/ops/maxpool/nvidia/maxpool_nvidia.cuh create mode 100644 src/infiniop/ops/maxpool/operator.cc create mode 100644 src/infiniop/ops/scatter/cpu/scatter_cpu.cc create mode 100644 src/infiniop/ops/scatter/cpu/scatter_cpu.h create mode 100644 src/infiniop/ops/scatter/cuda/kernel.cuh create mode 100644 src/infiniop/ops/scatter/info.h create mode 100644 src/infiniop/ops/scatter/metax/scatter_metax.h create mode 100644 src/infiniop/ops/scatter/metax/scatter_metax.maca create mode 100644 src/infiniop/ops/scatter/nvidia/scatter_nvidia.cu create mode 100644 src/infiniop/ops/scatter/nvidia/scatter_nvidia.cuh create mode 100644 src/infiniop/ops/scatter/operator.cc create mode 100644 src/infiniop/ops/scatter/scatter.h create mode 100644 test/infiniop/averagepool.py create mode 100644 test/infiniop/batch_norm.py create mode 100644 test/infiniop/cross_entropy_loss.py create mode 100644 test/infiniop/exp.py create mode 100644 test/infiniop/gather.py create mode 100644 test/infiniop/hardswish.py create mode 100644 test/infiniop/index_copy_inplace.py create mode 100644 test/infiniop/interpolate_nearest.py create mode 100644 test/infiniop/maxpool.py create mode 100644 test/infiniop/scatter.py diff --git a/include/infiniop/ops/averagepool.h b/include/infiniop/ops/averagepool.h new file mode 100644 index 000000000..87e857175 --- /dev/null +++ b/include/infiniop/ops/averagepool.h @@ -0,0 +1,29 @@ +#ifndef __INFINIOP_AVERAGEPOOL_H__ +#define __INFINIOP_AVERAGEPOOL_H__ + +#include "../operator_descriptor.h" + +__C typedef struct InfiniopDescriptor *infiniopAvgPoolDescriptor_t; + +__C infiniStatus_t infiniopCreateAvgPoolDescriptor(infiniopHandle_t handle, + infiniopAvgPoolDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, + void *strides, + void *pads, + bool ceil_mode); + +__C infiniStatus_t infiniopGetAvgPoolWorkspaceSize(infiniopAvgPoolDescriptor_t desc, + size_t *size); + +__C infiniStatus_t infiniopAvgPool(infiniopAvgPoolDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream); + +__C infiniStatus_t infiniopDestroyAvgPoolDescriptor(infiniopAvgPoolDescriptor_t desc); + +#endif // __INFINIOP_AVERAGEPOOL_H__ diff --git a/include/infiniop/ops/batch_norm.h b/include/infiniop/ops/batch_norm.h new file mode 100644 index 000000000..5487a1f69 --- /dev/null +++ b/include/infiniop/ops/batch_norm.h @@ -0,0 +1,37 @@ +#ifndef __INFINIOP_BATCH_NORM_API_H__ +#define __INFINIOP_BATCH_NORM_API_H__ + +#include "../operator_descriptor.h" + +typedef struct InfiniopDescriptor *infiniopBatchNormDescriptor_t; + +__C __export infiniStatus_t infiniopCreateBatchNormDescriptor( + infiniopHandle_t handle, + infiniopBatchNormDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t running_mean_desc, + infiniopTensorDescriptor_t running_var_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t weight_desc, + infiniopTensorDescriptor_t bias_desc, + float momentum, + float eps +); + +__C __export infiniStatus_t infiniopGetBatchNormWorkspaceSize(infiniopBatchNormDescriptor_t desc, size_t *size); + +__C __export infiniStatus_t infiniopBatchNorm(infiniopBatchNormDescriptor_t desc, + void *workspace, + size_t workspace_size, + void * output, + void * running_mean, + void * running_var, + const void * input, + const void * weight, + const void * bias, + void *stream +); + +__C __export infiniStatus_t infiniopDestroyBatchNormDescriptor(infiniopBatchNormDescriptor_t desc); + +#endif diff --git a/include/infiniop/ops/cross_entropy_loss.h b/include/infiniop/ops/cross_entropy_loss.h new file mode 100644 index 000000000..8b59843c9 --- /dev/null +++ b/include/infiniop/ops/cross_entropy_loss.h @@ -0,0 +1,27 @@ +#ifndef __INFINIOP_CROSS_ENTROPY_LOSS_API_H__ +#define __INFINIOP_CROSS_ENTROPY_LOSS_API_H__ + +#include "../operator_descriptor.h" + +typedef struct InfiniopDescriptor *infiniopCrossEntropyLossDescriptor_t; + +__C infiniStatus_t infiniopCreateCrossEntropyLossDescriptor(infiniopHandle_t handle, + infiniopCrossEntropyLossDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t loss_desc, + infiniopTensorDescriptor_t logits_desc, + infiniopTensorDescriptor_t target_desc); + +__C infiniStatus_t infiniopGetCrossEntropyLossWorkspaceSize(infiniopCrossEntropyLossDescriptor_t desc, + size_t *size); + +__C infiniStatus_t infiniopCrossEntropyLoss(infiniopCrossEntropyLossDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *loss, + const void *logits, + const void *target, + void *stream); + +__C infiniStatus_t infiniopDestroyCrossEntropyLossDescriptor(infiniopCrossEntropyLossDescriptor_t desc); + +#endif // __INFINIOP_CROSS_ENTROPY_LOSS_API_H__ diff --git a/include/infiniop/ops/exp.h b/include/infiniop/ops/exp.h new file mode 100644 index 000000000..624bc5363 --- /dev/null +++ b/include/infiniop/ops/exp.h @@ -0,0 +1,24 @@ +#ifndef __INFINIOP_EXP_API_H__ +#define __INFINIOP_EXP_API_H__ + +#include "../operator_descriptor.h" + +typedef struct InfiniopDescriptor *infiniopExpDescriptor_t; + +__C __export infiniStatus_t infiniopCreateExpDescriptor(infiniopHandle_t handle, + infiniopExpDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output, + infiniopTensorDescriptor_t input); + +__C __export infiniStatus_t infiniopGetExpWorkspaceSize(infiniopExpDescriptor_t desc, size_t *size); + +__C __export infiniStatus_t infiniopExp(infiniopExpDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream); + +__C __export infiniStatus_t infiniopDestroyExpDescriptor(infiniopExpDescriptor_t desc); + +#endif diff --git a/include/infiniop/ops/gather.h b/include/infiniop/ops/gather.h new file mode 100644 index 000000000..9ffe310c9 --- /dev/null +++ b/include/infiniop/ops/gather.h @@ -0,0 +1,31 @@ +#ifndef __INFINIOP_GATHER_API_H__ +#define __INFINIOP_GATHER_API_H__ + +#include "../operator_descriptor.h" + +typedef struct InfiniopDescriptor *infiniopGatherDescriptor_t; + +__C __export infiniStatus_t infiniopCreateGatherDescriptor( + infiniopHandle_t handle, + infiniopGatherDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim +); + +__C __export infiniStatus_t infiniopGetGatherWorkspaceSize(infiniopGatherDescriptor_t desc, size_t *size); + +__C __export infiniStatus_t infiniopGather( + infiniopGatherDescriptor_t desc, + void *workspace, + size_t workspace_size, + void * output, + const void * input, + const void * index, + void *stream +); + +__C __export infiniStatus_t infiniopDestroyGatherDescriptor(infiniopGatherDescriptor_t desc); + +#endif diff --git a/include/infiniop/ops/hardswish.h b/include/infiniop/ops/hardswish.h new file mode 100644 index 000000000..8d655fe82 --- /dev/null +++ b/include/infiniop/ops/hardswish.h @@ -0,0 +1,24 @@ +#ifndef __INFINIOP_HARDSWISH_API_H__ +#define __INFINIOP_HARDSWISH_API_H__ + +#include "../operator_descriptor.h" + +typedef struct InfiniopDescriptor *infiniopHardswishDescriptor_t; + +__C __export infiniStatus_t infiniopCreateHardswishDescriptor(infiniopHandle_t handle, + infiniopHardswishDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output, + infiniopTensorDescriptor_t input); + +__C __export infiniStatus_t infiniopGetHardswishWorkspaceSize(infiniopHardswishDescriptor_t desc, size_t *size); + +__C __export infiniStatus_t infiniopHardswish(infiniopHardswishDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream); + +__C __export infiniStatus_t infiniopDestroyHardswishDescriptor(infiniopHardswishDescriptor_t desc); + +#endif diff --git a/include/infiniop/ops/index_copy_inplace.h b/include/infiniop/ops/index_copy_inplace.h new file mode 100644 index 000000000..e2266299a --- /dev/null +++ b/include/infiniop/ops/index_copy_inplace.h @@ -0,0 +1,30 @@ +#ifndef __INFINIOP_INDEX_COPY_INPLACE_API_H__ +#define __INFINIOP_INDEX_COPY_INPLACE_API_H__ + +#include "../operator_descriptor.h" + +typedef struct InfiniopDescriptor *infiniopIndexCopyInplaceDescriptor_t; + +__C __export infiniStatus_t infiniopCreateIndexCopyInplaceDescriptor( + infiniopHandle_t handle, + infiniopIndexCopyInplaceDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim +); + +__C __export infiniStatus_t infiniopGetIndexCopyInplaceWorkspaceSize(infiniopIndexCopyInplaceDescriptor_t desc, size_t *size); + +__C __export infiniStatus_t infiniopIndexCopyInplace(infiniopIndexCopyInplaceDescriptor_t desc, + void *workspace, + size_t workspace_size, + void * output, + const void * input, + const void * index, + void *stream +); + +__C __export infiniStatus_t infiniopDestroyIndexCopyInplaceDescriptor(infiniopIndexCopyInplaceDescriptor_t desc); + +#endif diff --git a/include/infiniop/ops/interpolate_nearest.h b/include/infiniop/ops/interpolate_nearest.h new file mode 100644 index 000000000..7f970dc38 --- /dev/null +++ b/include/infiniop/ops/interpolate_nearest.h @@ -0,0 +1,25 @@ +#ifndef __INFINIOP_INTERPOLATE_NEAREST_H__ +#define __INFINIOP_INTERPOLATE_NEAREST_H__ + +#include "../operator_descriptor.h" + +__C typedef struct InfiniopDescriptor *infiniopInterpolateNearestDescriptor_t; + +__C infiniStatus_t infiniopCreateInterpolateNearestDescriptor(infiniopHandle_t handle, + infiniopInterpolateNearestDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc); + +__C infiniStatus_t infiniopGetInterpolateNearestWorkspaceSize(infiniopInterpolateNearestDescriptor_t desc, + size_t *size); + +__C infiniStatus_t infiniopInterpolateNearest(infiniopInterpolateNearestDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream); + +__C infiniStatus_t infiniopDestroyInterpolateNearestDescriptor(infiniopInterpolateNearestDescriptor_t desc); + +#endif // __INFINIOP_INTERPOLATE_NEAREST_H__ diff --git a/include/infiniop/ops/maxpool.h b/include/infiniop/ops/maxpool.h new file mode 100644 index 000000000..e47a43aed --- /dev/null +++ b/include/infiniop/ops/maxpool.h @@ -0,0 +1,29 @@ +#ifndef __INFINIOP_MAX_POOL_H__ +#define __INFINIOP_MAX_POOL_H__ + +#include "../operator_descriptor.h" + +__C typedef struct InfiniopDescriptor *infiniopMaxPoolDescriptor_t; + +__C infiniStatus_t infiniopCreateMaxPoolDescriptor(infiniopHandle_t handle, + infiniopMaxPoolDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, + void *strides, + void *pads, + bool ceil_mode); + +__C infiniStatus_t infiniopGetMaxPoolWorkspaceSize(infiniopMaxPoolDescriptor_t desc, + size_t *size); + +__C infiniStatus_t infiniopMaxPool(infiniopMaxPoolDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream); + +__C infiniStatus_t infiniopDestroyMaxPoolDescriptor(infiniopMaxPoolDescriptor_t desc); + +#endif // __INFINIOP_MAX_POOL_H__ diff --git a/include/infiniop/ops/scatter.h b/include/infiniop/ops/scatter.h new file mode 100644 index 000000000..22e0eff83 --- /dev/null +++ b/include/infiniop/ops/scatter.h @@ -0,0 +1,30 @@ +#ifndef __INFINIOP_SCATTER_API_H__ +#define __INFINIOP_SCATTER_API_H__ + +#include "../operator_descriptor.h" + +typedef struct InfiniopDescriptor *infiniopScatterDescriptor_t; + +__C __export infiniStatus_t infiniopCreateScatterDescriptor( + infiniopHandle_t handle, + infiniopScatterDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim +); + +__C __export infiniStatus_t infiniopGetScatterWorkspaceSize(infiniopScatterDescriptor_t desc, size_t *size); + +__C __export infiniStatus_t infiniopScatter(infiniopScatterDescriptor_t desc, + void *workspace, + size_t workspace_size, + void * output, + const void * input, + const void * index, + void *stream +); + +__C __export infiniStatus_t infiniopDestroyScatterDescriptor(infiniopScatterDescriptor_t desc); + +#endif diff --git a/src/infiniop/ops/averagepool/averagepool.h b/src/infiniop/ops/averagepool/averagepool.h new file mode 100644 index 000000000..7762826ab --- /dev/null +++ b/src/infiniop/ops/averagepool/averagepool.h @@ -0,0 +1,52 @@ +#ifndef __AVERAGEPOOL_H__ +#define __AVERAGEPOOL_H__ + +#include "../../operator.h" +#include "info.h" + +#define DESCRIPTOR(NAMESPACE) \ + namespace op::averagepool::NAMESPACE { \ + class Descriptor final : public InfiniopDescriptor { \ + struct Opaque; \ + Opaque *_opaque; \ + infiniDtype_t _dtype; \ + AvgPoolInfo _info; \ + size_t _workspace_size; \ + \ + Descriptor( \ + infiniDtype_t dtype, \ + AvgPoolInfo info, \ + size_t workspace_size_, \ + Opaque *opaque, \ + infiniDevice_t device_type, \ + int device_id) \ + : InfiniopDescriptor{device_type, device_id}, \ + _opaque(opaque), \ + _dtype(dtype), \ + _info(info), \ + _workspace_size(workspace_size_) {} \ + \ + public: \ + ~Descriptor(); \ + \ + size_t workspaceSize() const { return _workspace_size; } \ + \ + static infiniStatus_t create( \ + infiniopHandle_t handle, \ + Descriptor **desc_ptr, \ + infiniopTensorDescriptor_t output_desc, \ + infiniopTensorDescriptor_t input_desc, \ + void *kernel_size, \ + void *strides, \ + void *pads, \ + bool ceil_mode); \ + \ + infiniStatus_t calculate( \ + void *workspace, size_t workspace_size, \ + void *output, \ + const void *input, \ + void *stream) const; \ + }; \ + } + +#endif // __AVERAGEPOOL_H__ diff --git a/src/infiniop/ops/averagepool/cpu/averagepool_cpu.cc b/src/infiniop/ops/averagepool/cpu/averagepool_cpu.cc new file mode 100644 index 000000000..95a347ddc --- /dev/null +++ b/src/infiniop/ops/averagepool/cpu/averagepool_cpu.cc @@ -0,0 +1,348 @@ +#include "averagepool_cpu.h" +#include "../../../devices/cpu/common_cpu.h" +#include "../../../devices/cpu/cpu_handle.h" +#include "../info.h" +#include +#include +#include +#include +#include + +namespace op::averagepool::cpu { + +struct Descriptor::Opaque { + device::cpu::Handle *handle; + AvgPoolInfo info; + size_t workspace_size = 0; + +private: + Opaque(device::cpu::Handle *handle_ptr, const AvgPoolInfo &avgpool_info) + : handle(handle_ptr), info(avgpool_info) { + workspace_size = 0; + } + + template + void _avgpool_1d(Ydata *output, const T *input) const { + size_t batch_size = info.batch; + size_t channels = info.channels; + size_t input_width = info.input_dims[0]; + size_t output_width = info.output_dims[0]; + size_t kernel_width = info.kernel_sizes[0]; + size_t stride_width = info.strides[0]; + size_t pad_width = info.pads[0]; + + const size_t input_nc_stride = input_width; + const size_t output_nc_stride = output_width; + +#pragma omp parallel for schedule(static) + for (int b = 0; b < static_cast(batch_size); ++b) { + for (int c = 0; c < static_cast(channels); ++c) { + const size_t input_offset = (static_cast(b) * channels + static_cast(c)) * input_nc_stride; + const size_t output_offset = (static_cast(b) * channels + static_cast(c)) * output_nc_stride; + + for (size_t ow = 0; ow < output_width; ++ow) { + float sum = 0.0f; + int valid_count = 0; + + const int window_start = static_cast(ow * stride_width) - static_cast(pad_width); + const int window_end = window_start + static_cast(kernel_width); + + for (int iw = window_start; iw < window_end; ++iw) { + if (iw >= 0 && iw < static_cast(input_width)) { + sum += utils::cast(input[input_offset + iw]); + valid_count++; + } else if (iw >= -static_cast(pad_width) && iw < static_cast(input_width + pad_width)) { + valid_count++; + } + } + + float result = 0.0f; + if (valid_count > 0) { + result = sum / static_cast(valid_count); + } + output[output_offset + ow] = utils::cast(result); + } + } + } + } + + template + void _avgpool_2d(Ydata *output, const T *input) const { + size_t batch_size = info.batch; + size_t channels = info.channels; + size_t input_height = info.input_dims[0]; + size_t input_width = info.input_dims[1]; + size_t output_height = info.output_dims[0]; + size_t output_width = info.output_dims[1]; + size_t kernel_height = info.kernel_sizes[0]; + size_t kernel_width = info.kernel_sizes[1]; + size_t stride_height = info.strides[0]; + size_t stride_width = info.strides[1]; + size_t pad_height = info.pads[0]; + size_t pad_width = info.pads[1]; + + const size_t input_nc_stride = input_height * input_width; + const size_t output_nc_stride = output_height * output_width; + +#pragma omp parallel for schedule(static) + for (int b = 0; b < static_cast(batch_size); ++b) { + for (int c = 0; c < static_cast(channels); ++c) { + const size_t input_offset = (static_cast(b) * channels + static_cast(c)) * input_nc_stride; + const size_t output_offset = (static_cast(b) * channels + static_cast(c)) * output_nc_stride; + + for (size_t oh = 0; oh < output_height; ++oh) { + for (size_t ow = 0; ow < output_width; ++ow) { + float sum = 0.0f; + int valid_count = 0; + + const int start_h = static_cast(oh * stride_height) - static_cast(pad_height); + const int start_w = static_cast(ow * stride_width) - static_cast(pad_width); + + for (int kh = 0; kh < static_cast(kernel_height); ++kh) { + for (int kw = 0; kw < static_cast(kernel_width); ++kw) { + const int ih = start_h + kh; + const int iw = start_w + kw; + + if (ih >= 0 && ih < static_cast(input_height) && iw >= 0 && iw < static_cast(input_width)) { + sum += utils::cast(input[input_offset + ih * input_width + iw]); + valid_count++; + } else if (ih >= -static_cast(pad_height) && ih < static_cast(input_height + pad_height) && iw >= -static_cast(pad_width) && iw < static_cast(input_width + pad_width)) { + valid_count++; + } + } + } + + float result = 0.0f; + if (valid_count > 0) { + result = sum / static_cast(valid_count); + } + output[output_offset + oh * output_width + ow] = utils::cast(result); + } + } + } + } + } + + template + void _avgpool_3d(Ydata *output, const T *input) const { + size_t batch_size = info.batch; + size_t channels = info.channels; + size_t input_depth = info.input_dims[0]; + size_t input_height = info.input_dims[1]; + size_t input_width = info.input_dims[2]; + size_t output_depth = info.output_dims[0]; + size_t output_height = info.output_dims[1]; + size_t output_width = info.output_dims[2]; + size_t kernel_depth = info.kernel_sizes[0]; + size_t kernel_height = info.kernel_sizes[1]; + size_t kernel_width = info.kernel_sizes[2]; + size_t stride_depth = info.strides[0]; + size_t stride_height = info.strides[1]; + size_t stride_width = info.strides[2]; + size_t pad_depth = info.pads[0]; + size_t pad_height = info.pads[1]; + size_t pad_width = info.pads[2]; + + const size_t input_nc_stride = input_depth * input_height * input_width; + const size_t output_nc_stride = output_depth * output_height * output_width; + +#pragma omp parallel for schedule(static) + for (int b = 0; b < static_cast(batch_size); ++b) { + for (int c = 0; c < static_cast(channels); ++c) { + const size_t input_offset = (static_cast(b) * channels + static_cast(c)) * input_nc_stride; + const size_t output_offset = (static_cast(b) * channels + static_cast(c)) * output_nc_stride; + + for (size_t od = 0; od < output_depth; ++od) { + for (size_t oh = 0; oh < output_height; ++oh) { + for (size_t ow = 0; ow < output_width; ++ow) { + float sum = 0.0f; + int valid_count = 0; + + const int start_d = static_cast(od * stride_depth) - static_cast(pad_depth); + const int start_h = static_cast(oh * stride_height) - static_cast(pad_height); + const int start_w = static_cast(ow * stride_width) - static_cast(pad_width); + + for (int kd = 0; kd < static_cast(kernel_depth); ++kd) { + const int id = start_d + kd; + for (int kh = 0; kh < static_cast(kernel_height); ++kh) { + const int ih = start_h + kh; + for (int kw = 0; kw < static_cast(kernel_width); ++kw) { + const int iw = start_w + kw; + + if (id >= 0 && id < static_cast(input_depth) && ih >= 0 && ih < static_cast(input_height) && iw >= 0 && iw < static_cast(input_width)) { + const size_t idx = id * (input_height * input_width) + ih * input_width + iw; + sum += utils::cast(input[input_offset + idx]); + valid_count++; + } else if (id >= -static_cast(pad_depth) && id < static_cast(input_depth + pad_depth) && ih >= -static_cast(pad_height) && ih < static_cast(input_height + pad_height) && iw >= -static_cast(pad_width) && iw < static_cast(input_width + pad_width)) { + valid_count++; + } + } + } + } + + float result = 0.0f; + if (valid_count > 0) { + result = sum / static_cast(valid_count); + } + + const size_t out_idx = od * (output_height * output_width) + oh * output_width + ow; + output[output_offset + out_idx] = utils::cast(result); + } + } + } + } + } + } + + template + void _avgpool_cpu(Ydata *output, const T *input) const { + switch (info.ndim) { + case 1: + _avgpool_1d(output, input); + break; + case 2: + _avgpool_2d(output, input); + break; + case 3: + _avgpool_3d(output, input); + break; + default: + break; + } + } + +public: + Opaque(Opaque &&other) noexcept + : handle(other.handle), + info(std::move(other.info)), + workspace_size(other.workspace_size) { + other.handle = nullptr; + other.workspace_size = 0; + } + + ~Opaque() = default; + + static inline utils::Result + create(device::cpu::Handle *handle_ptr, + AvgPoolInfo &info) { + + Opaque opaque(handle_ptr, info); + return utils::Result(std::move(opaque)); + } + + infiniStatus_t calculate(void *workspace, size_t workspace_size, + void *output, const void *input, infiniDtype_t dtype) const { + if (!output || !input) { + return INFINI_STATUS_BAD_PARAM; + } + + size_t output_size = info.batch * info.channels; + for (size_t i = 0; i < info.ndim; ++i) { + output_size *= info.output_dims[i]; + } + + switch (dtype) { + case INFINI_DTYPE_F32: { + float *typed_output = static_cast(output); + const float *typed_input = static_cast(input); + _avgpool_cpu(typed_output, typed_input); + break; + } + case INFINI_DTYPE_F16: { + float *typed_output_f32 = static_cast(workspace); + const fp16_t *typed_input = static_cast(input); + + _avgpool_cpu(typed_output_f32, typed_input); + + fp16_t *typed_output = static_cast(output); +#pragma omp parallel for + for (int i = 0; i < static_cast(output_size); ++i) { + typed_output[i] = utils::cast(typed_output_f32[i]); + } + break; + } + case INFINI_DTYPE_BF16: { + float *typed_output_f32 = static_cast(workspace); + const bf16_t *typed_input = static_cast(input); + + _avgpool_cpu(typed_output_f32, typed_input); + + bf16_t *typed_output = static_cast(output); +#pragma omp parallel for + for (int i = 0; i < static_cast(output_size); ++i) { + typed_output[i] = utils::cast(typed_output_f32[i]); + } + break; + } + default: + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + return INFINI_STATUS_SUCCESS; + } +}; + +Descriptor::~Descriptor() { + if (_opaque) { + delete _opaque; + } +} + +inline size_t calculateOutputSize(const AvgPoolInfo &info) { + size_t size = info.batch * info.channels; + for (size_t i = 0; i < info.ndim; ++i) { + size *= info.output_dims[i]; + } + return size; +} + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, + void *strides, + void *pads, + bool ceil_mode) { + + auto handle = reinterpret_cast(handle_); + auto dtype = input_desc->dtype(); + + CHECK_DTYPE(dtype, INFINI_DTYPE_F32, INFINI_DTYPE_F16, INFINI_DTYPE_BF16); + + auto result = AvgPoolInfo::create(output_desc, input_desc, kernel_size, + strides, pads, ceil_mode); + CHECK_RESULT(result); + auto info = result.take(); + + auto opaque_result = Opaque::create(handle, info); + CHECK_RESULT(opaque_result); + auto opaque = new Opaque(opaque_result.take()); + + size_t workspace_size = 0; + if (dtype == INFINI_DTYPE_F16 || dtype == INFINI_DTYPE_BF16) { + workspace_size = calculateOutputSize(info) * sizeof(float); + } + + *desc_ptr = new Descriptor(dtype, std::move(info), workspace_size, + opaque, handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream) const { + + if (workspace_size < _workspace_size) { + return INFINI_STATUS_INSUFFICIENT_WORKSPACE; + } + + return _opaque->calculate(workspace, workspace_size, output, input, _dtype); +} + +} // namespace op::averagepool::cpu diff --git a/src/infiniop/ops/averagepool/cpu/averagepool_cpu.h b/src/infiniop/ops/averagepool/cpu/averagepool_cpu.h new file mode 100644 index 000000000..8388f80ff --- /dev/null +++ b/src/infiniop/ops/averagepool/cpu/averagepool_cpu.h @@ -0,0 +1,8 @@ +#ifndef __AVERAGEPOOL_CPU_H__ +#define __AVERAGEPOOL_CPU_H__ + +#include "../averagepool.h" + +DESCRIPTOR(cpu) + +#endif // __AVERAGEPOOL_CPU_H__ diff --git a/src/infiniop/ops/averagepool/cuda/averagepool_kernel.cuh b/src/infiniop/ops/averagepool/cuda/averagepool_kernel.cuh new file mode 100644 index 000000000..7c9d0f438 --- /dev/null +++ b/src/infiniop/ops/averagepool/cuda/averagepool_kernel.cuh @@ -0,0 +1,185 @@ +#ifndef __AVERAGEPOOL_KERNEL_H__ +#define __AVERAGEPOOL_KERNEL_H__ + +#include + +// 1D平均池化kernel,兼容PyTorch的隐式填充逻辑 +template +__global__ void avgpool1d_pytorch_compatible_kernel( + const T *input, T *output, int batch_size, int channels, int input_length, + int output_length, int kernel_size, int stride, int padding) { + + int batch_idx = blockIdx.x; + int channel_idx = blockIdx.y; + int output_idx = blockIdx.z * blockDim.x + threadIdx.x; + + if (batch_idx >= batch_size || channel_idx >= channels || output_idx >= output_length) { + return; + } + + // 计算输入和输出的偏移 + const T *input_ptr = input + batch_idx * channels * input_length + channel_idx * input_length; + T *output_ptr = output + batch_idx * channels * output_length + channel_idx * output_length; + + // 计算池化窗口的起始位置 + int window_start = output_idx * stride - padding; + + // 使用单精度进行中间计算 + float sum = 0.0f; + int valid_count = 0; + + // 遍历池化窗口 + for (int k = 0; k < kernel_size; ++k) { + int input_pos = window_start + k; + + if (input_pos >= 0 && input_pos < input_length) { + // 有效的输入位置,转换为单精度进行累加 + sum += static_cast(input_ptr[input_pos]); + valid_count++; + } else if (input_pos >= -padding && input_pos < input_length + padding) { + // 显式填充区域,值为0,只增加计数 + valid_count++; + } + // 其他位置是隐式填充,不计入分母 + } + + // 计算平均值并转换回原始数据类型 + if (valid_count > 0) { + float result = sum / static_cast(valid_count); + output_ptr[output_idx] = static_cast(result); + } else { + output_ptr[output_idx] = T(0); + } +} + +// 2D平均池化kernel,兼容PyTorch的隐式填充逻辑 +template +__global__ void avgpool2d_pytorch_compatible_kernel( + const T *input, T *output, int batch_size, int channels, int input_height, + int input_width, int output_height, int output_width, int kernel_h, + int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w) { + + int batch_idx = blockIdx.x; + int channel_idx = blockIdx.y; + int output_idx = blockIdx.z * blockDim.x + threadIdx.x; + + int total_output_elements = output_height * output_width; + if (batch_idx >= batch_size || channel_idx >= channels || output_idx >= total_output_elements) { + return; + } + + // 将线性索引转换为2D坐标 + int out_h = output_idx / output_width; + int out_w = output_idx % output_width; + + // 计算输入和输出的偏移 + const T *input_ptr = input + batch_idx * channels * input_height * input_width + channel_idx * input_height * input_width; + T *output_ptr = output + batch_idx * channels * output_height * output_width + channel_idx * output_height * output_width; + + // 计算池化窗口的起始位置 + int window_start_h = out_h * stride_h - pad_h; + int window_start_w = out_w * stride_w - pad_w; + + // 使用单精度进行中间计算 + float sum = 0.0f; + int valid_count = 0; + + // 遍历池化窗口 + for (int kh = 0; kh < kernel_h; ++kh) { + for (int kw = 0; kw < kernel_w; ++kw) { + int input_h = window_start_h + kh; + int input_w = window_start_w + kw; + + if (input_h >= 0 && input_h < input_height && input_w >= 0 && input_w < input_width) { + // 有效的输入位置,转换为单精度进行累加 + int input_idx = input_h * input_width + input_w; + sum += static_cast(input_ptr[input_idx]); + valid_count++; + } else if (input_h >= -pad_h && input_h < input_height + pad_h && input_w >= -pad_w && input_w < input_width + pad_w) { + // 显式填充区域,值为0,只增加计数 + valid_count++; + } + // 其他位置是隐式填充,不计入分母 + } + } + + // 计算平均值并转换回原始数据类型 + if (valid_count > 0) { + float result = sum / static_cast(valid_count); + output_ptr[output_idx] = static_cast(result); + } else { + output_ptr[output_idx] = T(0); + } +} + +// 3D平均池化kernel,兼容PyTorch的隐式填充逻辑 +template +__global__ void avgpool3d_pytorch_compatible_kernel( + const T *input, T *output, int batch_size, int channels, int input_depth, + int input_height, int input_width, int output_depth, int output_height, + int output_width, int kernel_d, int kernel_h, int kernel_w, int stride_d, + int stride_h, int stride_w, int pad_d, int pad_h, int pad_w) { + + int batch_idx = blockIdx.x; + int channel_idx = blockIdx.y; + int output_idx = blockIdx.z * blockDim.x + threadIdx.x; + + int total_output_elements = output_depth * output_height * output_width; + if (batch_idx >= batch_size || channel_idx >= channels || output_idx >= total_output_elements) { + return; + } + + // 将线性索引转换为3D坐标 + int out_d = output_idx / (output_height * output_width); + int remaining = output_idx % (output_height * output_width); + int out_h = remaining / output_width; + int out_w = remaining % output_width; + + // 计算输入和输出的偏移 + int input_spatial_size = input_depth * input_height * input_width; + int output_spatial_size = output_depth * output_height * output_width; + + const T *input_ptr = input + batch_idx * channels * input_spatial_size + channel_idx * input_spatial_size; + T *output_ptr = output + batch_idx * channels * output_spatial_size + channel_idx * output_spatial_size; + + // 计算池化窗口的起始位置 + int window_start_d = out_d * stride_d - pad_d; + int window_start_h = out_h * stride_h - pad_h; + int window_start_w = out_w * stride_w - pad_w; + + // 使用单精度进行中间计算 + float sum = 0.0f; + int valid_count = 0; + + // 遍历池化窗口 + for (int kd = 0; kd < kernel_d; ++kd) { + for (int kh = 0; kh < kernel_h; ++kh) { + for (int kw = 0; kw < kernel_w; ++kw) { + int input_d = window_start_d + kd; + int input_h = window_start_h + kh; + int input_w = window_start_w + kw; + + if (input_d >= 0 && input_d < input_depth && input_h >= 0 && input_h < input_height && input_w >= 0 && input_w < input_width) { + // 有效的输入位置,转换为单精度进行累加 + int input_idx = (input_d * input_height + input_h) * input_width + input_w; + sum += static_cast(input_ptr[input_idx]); + valid_count++; + } else if (input_d >= -pad_d && input_d < input_depth + pad_d && input_h >= -pad_h && input_h < input_height + pad_h && input_w >= -pad_w && input_w < input_width + pad_w) { + // 显式填充区域,值为0,只增加计数 + valid_count++; + } + // 其他位置是隐式填充,不计入分母 + } + } + } + + // 计算平均值并转换回原始数据类型 + if (valid_count > 0) { + float result = sum / static_cast(valid_count); + output_ptr[output_idx] = static_cast(result); + } else { + output_ptr[output_idx] = T(0); + } +} + +#endif // __AVERAGEPOOL_KERNEL_H__ diff --git a/src/infiniop/ops/averagepool/info.h b/src/infiniop/ops/averagepool/info.h new file mode 100644 index 000000000..871e827a7 --- /dev/null +++ b/src/infiniop/ops/averagepool/info.h @@ -0,0 +1,136 @@ +#ifndef __AVERAGEPOOL_INFO_H__ +#define __AVERAGEPOOL_INFO_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" +#include +#include + +namespace op::averagepool { + +inline utils::Result calculatePoolOutputSize( + size_t input_size, + size_t kernel_size, + size_t stride, + size_t padding = 0, + bool ceil_mode = false) { + + if (stride == 0) { + return utils::Result(INFINI_STATUS_BAD_PARAM); + } + if (kernel_size == 0) { + return utils::Result(INFINI_STATUS_BAD_PARAM); + } + + size_t padded_input_size = input_size + 2 * padding; + + if (padded_input_size < kernel_size) { + return utils::Result(INFINI_STATUS_BAD_TENSOR_SHAPE); + } + + size_t output_size; + if (ceil_mode) { + // 等效于整数的上取整 + output_size = (padded_input_size - kernel_size + stride - 1) / stride + 1; + } else { + // 等效于整数的下取整 + output_size = (padded_input_size - kernel_size) / stride + 1; + } + + return utils::Result(output_size); +} + +// 检查是否存在隐式填充 +inline bool hasImplicitPadding( + size_t input_size, + size_t kernel_size, + size_t stride, + size_t padding, + bool ceil_mode) { + + if (!ceil_mode) { + return false; + } + return ((input_size + 2 * padding) - kernel_size) % stride != 0; +} + +class AvgPoolInfo { + AvgPoolInfo() = default; + +public: + std::vector input_dims; + std::vector output_dims; + std::vector kernel_sizes; + std::vector strides; + std::vector pads; + bool ceil_mode; + size_t ndim; + size_t batch; + size_t channels; + bool has_implicit_padding = false; + + static utils::Result create( + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, + void *strides, + void *pads, + bool ceil_mode) { + + AvgPoolInfo info; + + if (input_desc->ndim() < 3 || input_desc->ndim() > 5) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + if (input_desc->ndim() != output_desc->ndim()) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + if (input_desc->dim(0) != output_desc->dim(0) || input_desc->dim(1) != output_desc->dim(1)) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + info.ndim = input_desc->ndim() - 2; // 空间维度 + info.batch = input_desc->dim(0); + info.channels = input_desc->dim(1); + info.ceil_mode = ceil_mode; + + auto kernel_ptr = reinterpret_cast(kernel_size); + auto stride_ptr = reinterpret_cast(strides); + auto pad_ptr = reinterpret_cast(pads); + + // 初始化隐式填充标志 + info.has_implicit_padding = false; + + // 获取并校验空间维度 + for (size_t i = 0; i < info.ndim; ++i) { + info.input_dims.push_back(input_desc->dim(i + 2)); + info.kernel_sizes.push_back(kernel_ptr[i]); + info.strides.push_back(stride_ptr[i]); + info.pads.push_back(pad_ptr[i]); + + auto output_size_result = calculatePoolOutputSize( + info.input_dims[i], info.kernel_sizes[i], info.strides[i], info.pads[i], info.ceil_mode); + CHECK_RESULT(output_size_result); + + size_t expected_size = output_size_result.take(); + if (expected_size != output_desc->dim(i + 2)) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + info.output_dims.push_back(output_desc->dim(i + 2)); + + // 检查当前维度是否存在隐式填充 + if (hasImplicitPadding(info.input_dims[i], info.kernel_sizes[i], + info.strides[i], info.pads[i], info.ceil_mode)) { + info.has_implicit_padding = true; + } + } + return utils::Result(std::move(info)); + } +}; +} // namespace op::averagepool + +#endif // __AVERAGEPOOL_INFO_H__ diff --git a/src/infiniop/ops/averagepool/nvidia/averagepool.cu b/src/infiniop/ops/averagepool/nvidia/averagepool.cu new file mode 100644 index 000000000..6f276aac8 --- /dev/null +++ b/src/infiniop/ops/averagepool/nvidia/averagepool.cu @@ -0,0 +1,220 @@ +#include "../../../devices/nvidia/nvidia_common.cuh" +#include "../../../devices/nvidia/nvidia_handle.cuh" +#include "averagepool_nvidia.cuh" + +#define DESTROY_CUDNN_DESCRIPTOR(desc_ptr, destroy_func) \ + do { \ + if (desc_ptr) { \ + destroy_func(desc_ptr); \ + desc_ptr = nullptr; \ + } \ + } while (0) + +#define CLEANUP_CUDNN_DESCRIPTORS() \ + do { \ + DESTROY_CUDNN_DESCRIPTOR(input_desc, cudnnDestroyTensorDescriptor); \ + DESTROY_CUDNN_DESCRIPTOR(output_desc, cudnnDestroyTensorDescriptor); \ + DESTROY_CUDNN_DESCRIPTOR(pooling_desc, cudnnDestroyPoolingDescriptor); \ + } while (0) + +namespace op::averagepool::nvidia { + +struct Descriptor::Opaque { + std::shared_ptr internal; + size_t workspace_size = 0; + +#ifdef ENABLE_CUDNN_API + cudnnTensorDescriptor_t input_desc = nullptr; + cudnnTensorDescriptor_t output_desc = nullptr; + cudnnPoolingDescriptor_t pooling_desc = nullptr; +#endif + +private: + Opaque(std::shared_ptr internal_ptr) + : internal(internal_ptr) {} + +#ifdef ENABLE_CUDNN_API + infiniStatus_t getCudnnDataType(infiniDtype_t data_type, + cudnnDataType_t &cudnn_data_type) const { + if (data_type == INFINI_DTYPE_F16) { + cudnn_data_type = device::nvidia::getCudnnDtype(data_type); + } else if (data_type == INFINI_DTYPE_F32) { + cudnn_data_type = device::nvidia::getCudnnDtype(data_type); + } else if (data_type == INFINI_DTYPE_BF16) { + cudnn_data_type = device::nvidia::getCudnnDtype(data_type); + } else { + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + return INFINI_STATUS_SUCCESS; + } + + infiniStatus_t createPoolingDescriptors(const AvgPoolInfo &info, + cudnnDataType_t cudnn_data_type) { + CHECK_CUDNN(cudnnCreateTensorDescriptor(&input_desc)); + CHECK_CUDNN(cudnnCreateTensorDescriptor(&output_desc)); + CHECK_CUDNN(cudnnCreatePoolingDescriptor(&pooling_desc)); + + std::vector input_dims_vec = {static_cast(info.batch), + static_cast(info.channels)}; + std::vector output_dims_vec = {static_cast(info.batch), + static_cast(info.channels)}; + + for (size_t i = 0; i < info.ndim; ++i) { + input_dims_vec.push_back(static_cast(info.input_dims[i])); + output_dims_vec.push_back(static_cast(info.output_dims[i])); + } + + if (info.ndim == 1) { + input_dims_vec.push_back(1); + output_dims_vec.push_back(1); + } + + CHECK_CUDNN(cudnnSetTensorNdDescriptorEx( + input_desc, CUDNN_TENSOR_NCHW, cudnn_data_type, input_dims_vec.size(), + input_dims_vec.data())); + + CHECK_CUDNN(cudnnSetTensorNdDescriptorEx( + output_desc, CUDNN_TENSOR_NCHW, cudnn_data_type, output_dims_vec.size(), + output_dims_vec.data())); + + return INFINI_STATUS_SUCCESS; + } + + infiniStatus_t setupPoolingDescriptor(const AvgPoolInfo &info) { + std::vector kernel_vec, stride_vec, pad_vec; + for (size_t i = 0; i < info.ndim; ++i) { + kernel_vec.push_back(static_cast(info.kernel_sizes[i])); + stride_vec.push_back(static_cast(info.strides[i])); + pad_vec.push_back(static_cast(info.pads[i])); + } + + if (info.ndim == 1) { + kernel_vec.push_back(1); + stride_vec.push_back(1); + pad_vec.push_back(0); + } + + CHECK_CUDNN(cudnnSetPoolingNdDescriptor( + pooling_desc, CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING, + CUDNN_NOT_PROPAGATE_NAN, kernel_vec.size(), kernel_vec.data(), + pad_vec.data(), stride_vec.data())); + + return INFINI_STATUS_SUCCESS; + } + + infiniStatus_t initializeCudnnContext(AvgPoolInfo &info, + infiniDtype_t data_type) { + cudnnDataType_t cudnn_data_type; + CHECK_STATUS(getCudnnDataType(data_type, cudnn_data_type)); + + CHECK_STATUS(createPoolingDescriptors(info, cudnn_data_type)); + CHECK_STATUS(setupPoolingDescriptor(info)); + + // Average pooling typically doesn't need a workspace + workspace_size = 0; + + return INFINI_STATUS_SUCCESS; + } +#endif + +public: + Opaque(Opaque &&other) noexcept + : internal(std::move(other.internal)), + workspace_size(other.workspace_size) + // clang-format off +#ifdef ENABLE_CUDNN_API + , input_desc(other.input_desc) + , output_desc(other.output_desc) + , pooling_desc(other.pooling_desc) +#endif + // clang-format on + { +#ifdef ENABLE_CUDNN_API + other.input_desc = nullptr; + other.output_desc = nullptr; + other.pooling_desc = nullptr; +#endif + other.workspace_size = 0; + } + + ~Opaque() { +#ifdef ENABLE_CUDNN_API + CLEANUP_CUDNN_DESCRIPTORS(); +#endif + } + + static inline utils::Result + create(std::shared_ptr internal_ptr, + AvgPoolInfo &info, infiniDtype_t data_type) { +#ifdef ENABLE_CUDNN_API + Opaque opaque(internal_ptr); + auto status = opaque.initializeCudnnContext(info, data_type); + if (status != INFINI_STATUS_SUCCESS) { + return status; + } + return utils::Result(std::move(opaque)); +#else + return INFINI_STATUS_NOT_IMPLEMENTED; +#endif + } +}; + +Descriptor::~Descriptor() { + if (_opaque) { + delete _opaque; + } +} + +infiniStatus_t Descriptor::create(infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, void *strides, void *pads, + bool ceil_mode) { + +#ifdef ENABLE_CUDNN_API + auto handle = reinterpret_cast(handle_); + auto dtype = input_desc->dtype(); + + CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_F32, INFINI_DTYPE_BF16); + + auto result = AvgPoolInfo::create(output_desc, input_desc, kernel_size, + strides, pads, ceil_mode); + CHECK_RESULT(result); + auto info = result.take(); + + auto opaque_result = Opaque::create(handle->internal(), info, dtype); + CHECK_RESULT(opaque_result); + auto opaque = new Opaque(opaque_result.take()); + + *desc_ptr = new Descriptor(dtype, std::move(info), opaque->workspace_size, + opaque, handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +#else + return INFINI_STATUS_NOT_IMPLEMENTED; +#endif +} + +infiniStatus_t Descriptor::calculate(void *workspace, size_t workspace_size, + void *output, const void *input, + void *stream) const { + +#ifdef ENABLE_CUDNN_API + const float alpha = 1.0f, beta = 0.0f; + + CHECK_STATUS(_opaque->internal->useCudnn( + (cudaStream_t)stream, [&](cudnnHandle_t handle) { + CHECK_CUDNN(cudnnPoolingForward(handle, _opaque->pooling_desc, &alpha, + _opaque->input_desc, input, &beta, + _opaque->output_desc, output)); + return INFINI_STATUS_SUCCESS; + })); + + return INFINI_STATUS_SUCCESS; +#else + return INFINI_STATUS_NOT_IMPLEMENTED; +#endif +} + +} // namespace op::averagepool::nvidia diff --git a/src/infiniop/ops/averagepool/nvidia/averagepool_nvidia.cuh b/src/infiniop/ops/averagepool/nvidia/averagepool_nvidia.cuh new file mode 100644 index 000000000..ef19aa1dc --- /dev/null +++ b/src/infiniop/ops/averagepool/nvidia/averagepool_nvidia.cuh @@ -0,0 +1,8 @@ +#ifndef __AVERAGEPOOL_CUDA_CUH__ +#define __AVERAGEPOOL_CUDA_CUH__ + +#include "../averagepool.h" + +DESCRIPTOR(nvidia) + +#endif // __AVERAGEPOOL_CUDA_CUH__ diff --git a/src/infiniop/ops/averagepool/operator.cc b/src/infiniop/ops/averagepool/operator.cc new file mode 100644 index 000000000..233ec4736 --- /dev/null +++ b/src/infiniop/ops/averagepool/operator.cc @@ -0,0 +1,147 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/averagepool.h" + +#ifdef ENABLE_CPU_API +#include "cpu/averagepool_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/averagepool_nvidia.cuh" +#endif + +__C infiniStatus_t infiniopCreateAvgPoolDescriptor( + infiniopHandle_t handle, + infiniopAvgPoolDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, + void *strides, + void *pads, + bool ceil_mode) { + +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::averagepool::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + output_desc, \ + input_desc, \ + kernel_size, \ + strides, \ + pads, \ + ceil_mode) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetAvgPoolWorkspaceSize(infiniopAvgPoolDescriptor_t desc, size_t *size) { + +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET + + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; +} + +__C infiniStatus_t infiniopAvgPool( + infiniopAvgPoolDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc) \ + ->calculate(workspace, workspace_size, output, input, stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyAvgPoolDescriptor(infiniopAvgPoolDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/batch_norm/batch_norm.h b/src/infiniop/ops/batch_norm/batch_norm.h new file mode 100644 index 000000000..3bee6b5bb --- /dev/null +++ b/src/infiniop/ops/batch_norm/batch_norm.h @@ -0,0 +1,54 @@ +#ifndef __BATCH_NORM_H__ +#define __BATCH_NORM_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" +#include "info.h" + +#define DESCRIPTOR(NAMESPACE) \ + namespace op::batch_norm::NAMESPACE { \ + class Descriptor final : public InfiniopDescriptor { \ + struct Opaque; \ + Opaque *_opaque; \ + BatchNormInfo _info; \ + size_t _workspace_size; \ + Descriptor( \ + infiniDtype_t dtype, \ + BatchNormInfo info, \ + size_t workspace_size_, \ + Opaque *opaque, \ + infiniDevice_t device_type, \ + int device_id) : InfiniopDescriptor{device_type, device_id}, \ + _opaque(opaque), \ + _info(info), \ + _workspace_size(workspace_size_) {} \ + \ + public: \ + ~Descriptor(); \ + size_t workspaceSize() const { return _workspace_size; } \ + static infiniStatus_t create( \ + infiniopHandle_t handle, \ + Descriptor **desc_ptr, \ + infiniopTensorDescriptor_t output_desc, \ + infiniopTensorDescriptor_t running_mean_desc, \ + infiniopTensorDescriptor_t running_var_desc, \ + infiniopTensorDescriptor_t input_desc, \ + infiniopTensorDescriptor_t weight_desc, \ + infiniopTensorDescriptor_t bias_desc, \ + float momentum, \ + float eps); \ + infiniStatus_t calculate( \ + void *workspace, \ + size_t workspace_size, \ + void *output, \ + void *running_mean, \ + void *running_var, \ + const void *input, \ + const void *weight, \ + const void *bias, \ + void *stream) const; \ + }; \ + } + +#endif \ No newline at end of file diff --git a/src/infiniop/ops/batch_norm/cpu/batch_norm_cpu.cc b/src/infiniop/ops/batch_norm/cpu/batch_norm_cpu.cc new file mode 100644 index 000000000..876b82904 --- /dev/null +++ b/src/infiniop/ops/batch_norm/cpu/batch_norm_cpu.cc @@ -0,0 +1,118 @@ +#include "batch_norm_cpu.h" +#include "../../../devices/cpu/common_cpu.h" +#include "../../../reduce/cpu/reduce.h" + +namespace op::batch_norm::cpu { + +Descriptor::~Descriptor() = default; + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t running_mean_desc, + infiniopTensorDescriptor_t running_var_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t weight_desc, + infiniopTensorDescriptor_t bias_desc, + float momentum, + float eps) { + auto handle = reinterpret_cast(handle_); + auto dtype = input_desc->dtype(); + CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_F32, INFINI_DTYPE_BF16); + auto result = BatchNormInfo::createBatchNormInfo( + output_desc, + running_mean_desc, + running_var_desc, + input_desc, + weight_desc, + bias_desc, + momentum, + eps); + CHECK_RESULT(result); + const BatchNormInfo &info = result.take(); + size_t WorkSpaceSize = 0; + *desc_ptr = new Descriptor( + dtype, std::move(info), WorkSpaceSize, + nullptr, + handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +} + +template +infiniStatus_t calculate_batch_norm( + const BatchNormInfo &info, + Tdata *output, + Tdata *running_mean, + Tdata *running_var, + const Tdata *input, + const Tdata *weight, + const Tdata *bias) { + +#pragma omp parallel for + for (int c = 0; c < static_cast(info.channel_size); c++) { + float sum_sq = 0., sum = 0.; + for (size_t b = 0; b < info.batch_size; b++) { + sum += op::common_cpu::reduce_op::sum( + input + (b * info.channel_size + static_cast(c)) * info.dim_size, + info.dim_size, + 1); + sum_sq += op::common_cpu::reduce_op::sumSquared( + input + (b * info.channel_size + static_cast(c)) * info.dim_size, + info.dim_size, + 1); + } + float batch_and_dim_size = static_cast(info.batch_size * info.dim_size); + float E = sum / batch_and_dim_size; + float var_biased = sum_sq / batch_and_dim_size - E * E; + float var_unbiased = var_biased * batch_and_dim_size / (batch_and_dim_size - 1.0f); + + auto running_mean_ptr = running_mean + static_cast(c) * info.running_mean_stride; + auto running_var_ptr = running_var + static_cast(c) * info.running_var_stride; + *running_mean_ptr = utils::cast((1 - info.momentum) * utils::cast(*running_mean_ptr) + info.momentum * E); + *running_var_ptr = utils::cast((1 - info.momentum) * utils::cast(*running_var_ptr) + info.momentum * var_unbiased); + + for (size_t b = 0; b < info.batch_size; b++) { + for (size_t d = 0; d < info.dim_size; d++) { + auto input_ptr = input + ((b * info.channel_size + static_cast(c)) * info.dim_size) + d; + auto output_ptr = output + ((b * info.channel_size + static_cast(c)) * info.dim_size) + d; + ; + auto weight_ptr = weight + static_cast(c) * info.weight_stride; + auto bias_ptr = bias + static_cast(c) * info.bias_stride; + *output_ptr = utils::cast( + (utils::cast(*input_ptr) - E) / std::sqrt(var_biased + info.eps) * utils::cast(*weight_ptr) + utils::cast(*bias_ptr)); + } + } + } + return INFINI_STATUS_SUCCESS; +} + +#define CALCULATE_BATCH_NORM(TDATA) \ + CHECK_STATUS(calculate_batch_norm(_info, \ + (TDATA *)output, (TDATA *)running_mean, (TDATA *)running_var, (const TDATA *)input, (const TDATA *)weight, (const TDATA *)bias)) + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + void *running_mean, + void *running_var, + const void *input, + const void *weight, + const void *bias, + void *stream) const { + + if (_info.dtype == INFINI_DTYPE_F16) { + CALCULATE_BATCH_NORM(fp16_t); + } else if (_info.dtype == INFINI_DTYPE_BF16) { + CALCULATE_BATCH_NORM(bf16_t); + } else if (_info.dtype == INFINI_DTYPE_F32) { + CALCULATE_BATCH_NORM(float); + } else { + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + return INFINI_STATUS_SUCCESS; +} +} // namespace op::batch_norm::cpu diff --git a/src/infiniop/ops/batch_norm/cpu/batch_norm_cpu.h b/src/infiniop/ops/batch_norm/cpu/batch_norm_cpu.h new file mode 100644 index 000000000..722ebc6ec --- /dev/null +++ b/src/infiniop/ops/batch_norm/cpu/batch_norm_cpu.h @@ -0,0 +1,8 @@ +#ifndef __BATCH_NORM_CPU_H__ +#define __BATCH_NORM_CPU_H__ + +#include "../batch_norm.h" + +DESCRIPTOR(cpu) + +#endif // __BATCH_NORM_CPU_H__ diff --git a/src/infiniop/ops/batch_norm/cuda/kernel.cuh b/src/infiniop/ops/batch_norm/cuda/kernel.cuh new file mode 100644 index 000000000..6132b1afe --- /dev/null +++ b/src/infiniop/ops/batch_norm/cuda/kernel.cuh @@ -0,0 +1,57 @@ +#ifndef __BATCH_NORM_KERNEL_CUH__ +#define __BATCH_NORM_KERNEL_CUH__ + +#include "../../../reduce/cuda/reduce.cuh" +#include + +template +__device__ void batchNormKernel( + Tdata *output, + Tdata *running_mean, + Tdata *running_var, + const Tdata *input, + const Tdata *weight, + const Tdata *bias, + + size_t batch_size, + size_t channel_size, + size_t dim_size, + ptrdiff_t running_mean_stride, + ptrdiff_t running_var_stride, + ptrdiff_t weight_stride, + ptrdiff_t bias_stride, + float momentum, + float eps) { + auto output_ptr = output + dim_size * blockIdx.x; + auto input_ptr = input + dim_size * blockIdx.x; + + auto running_mean_ptr = running_mean + running_mean_stride * blockIdx.x; + auto running_var_ptr = running_var + running_var_stride * blockIdx.x; + auto weight_ptr = weight + weight_stride * blockIdx.x; + auto bias_ptr = bias + bias_stride * blockIdx.x; + + Tcompute sum_squared = 0., sum = 0.; + for (size_t b = 0; b < batch_size; b++) { + sum += op::common_cuda::reduce_op::sum( + input_ptr + b * (channel_size * dim_size), dim_size); + sum_squared += op::common_cuda::reduce_op::sumSquared( + input_ptr + b * (channel_size * dim_size), dim_size); + } + + __shared__ Tcompute E, var_biased; + if (threadIdx.x == 0) { + E = sum / Tcompute(batch_size * dim_size); + var_biased = sum_squared / Tcompute(batch_size * dim_size) - E * E; + Tcompute var_unbiased = var_biased * Tcompute(batch_size * dim_size) / Tcompute(batch_size * dim_size - 1); + *running_mean_ptr = Tcompute(1 - momentum) * Tcompute(*running_mean_ptr) + Tcompute(momentum) * E; + *running_var_ptr = Tcompute(1 - momentum) * Tcompute(*running_var_ptr) + Tcompute(momentum) * var_unbiased; + } + __syncthreads(); + + for (size_t n = threadIdx.x; n < batch_size * dim_size; n += BLOCK_SIZE) { + size_t b = n / dim_size, d = n % dim_size; + *(output_ptr + b * channel_size * dim_size + d) = (Tcompute(*(input_ptr + b * channel_size * dim_size + d)) - E) / sqrtf(float(var_biased + Tcompute(eps))) * Tcompute(*weight_ptr) + Tcompute(*bias_ptr); + } +} + +#endif // __BATCH_NORM_KERNEL_CUH__ \ No newline at end of file diff --git a/src/infiniop/ops/batch_norm/info.h b/src/infiniop/ops/batch_norm/info.h new file mode 100644 index 000000000..c27479865 --- /dev/null +++ b/src/infiniop/ops/batch_norm/info.h @@ -0,0 +1,69 @@ +#ifndef __BATCH_NORM_INFO_H__ +#define __BATCH_NORM_INFO_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" + +namespace op::batch_norm { + +class BatchNormInfo { +private: + BatchNormInfo() = default; + +public: + // ---------------------------- start: define member variables of Info ---------------------------- + infiniDtype_t dtype; + size_t batch_size, channel_size, dim_size; + + ptrdiff_t running_mean_stride; + ptrdiff_t running_var_stride; + ptrdiff_t weight_stride; + ptrdiff_t bias_stride; + float momentum; + float eps; + + // ----------------------------- end: define member variables of Info ----------------------------- + + static utils::Result createBatchNormInfo( + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t running_mean_desc, + infiniopTensorDescriptor_t running_var_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t weight_desc, + infiniopTensorDescriptor_t bias_desc, + float momentum, + float eps) { + // ------------------------- start: check tensor shape and input validity ------------------------- + CHECK_OR_RETURN( + input_desc->ndim() == 3, + INFINI_STATUS_BAD_TENSOR_SHAPE); + CHECK_SAME_SHAPE(output_desc->shape(), input_desc->shape()); + size_t batch_size = output_desc->dim(0), + channel_size = output_desc->dim(1), + dim_size = output_desc->dim(2); + CHECK_SAME_SHAPE( + running_mean_desc->shape(), running_var_desc->shape(), + weight_desc->shape(), bias_desc->shape()); + CHECK_OR_RETURN( + running_mean_desc->ndim() == 1 && running_mean_desc->dim(0) == channel_size, + INFINI_STATUS_BAD_TENSOR_SHAPE); + + // -------------------------- end: check tensor shape and input validity -------------------------- + return utils::Result(BatchNormInfo{ + // ------------------------------ start: create an instance of Info ------------------------------- + output_desc->dtype(), + batch_size, channel_size, dim_size, + running_mean_desc->stride(0), + running_var_desc->stride(0), + weight_desc->stride(0), + bias_desc->stride(0), + momentum, + eps + // ------------------------------- end: create an instance of Info -------------------------------- + }); + } +}; +} // namespace op::batch_norm + +#endif // __BATCH_NORM_INFO_H__ diff --git a/src/infiniop/ops/batch_norm/nvidia/batch_norm_nvidia.cu b/src/infiniop/ops/batch_norm/nvidia/batch_norm_nvidia.cu new file mode 100644 index 000000000..e5e132c89 --- /dev/null +++ b/src/infiniop/ops/batch_norm/nvidia/batch_norm_nvidia.cu @@ -0,0 +1,176 @@ +#include "../../../devices/nvidia/nvidia_common.cuh" +#include "../../../devices/nvidia/nvidia_handle.cuh" +#include "../../../devices/nvidia/nvidia_kernel_common.cuh" + +#include "batch_norm_nvidia.cuh" + +#include "../../../reduce/cuda/reduce.cuh" +#include "../cuda/kernel.cuh" +#include + +#include "../info.h" + +namespace op::batch_norm::nvidia { + +// ---------------------- start: launchKernel: call kernel function of CUDA ----------------------- +template +INFINIOP_CUDA_KERNEL launchKernel( + Tdata *output, + Tdata *running_mean, + Tdata *running_var, + const Tdata *input, + const Tdata *weight, + const Tdata *bias, + + size_t batch_size, + size_t channel_size, + size_t dim_size, + ptrdiff_t running_mean_stride, + ptrdiff_t running_var_stride, + ptrdiff_t weight_stride, + ptrdiff_t bias_stride, + + float momentum, + float eps) { + + batchNormKernel( + output, + running_mean, + running_var, + input, + weight, + bias, + + batch_size, + channel_size, + dim_size, + + running_mean_stride, + running_var_stride, + weight_stride, + bias_stride, + + momentum, + eps); +} +// ----------------------- end: launchKernel: call kernel function of CUDA ------------------------ + +// ----------------------------------- start: call launchKernel ----------------------------------- +template +infiniStatus_t calculate_batch_norm( + const BatchNormInfo &info, + Tdata *output, + Tdata *running_mean, + Tdata *running_var, + const Tdata *input, + const Tdata *weight, + const Tdata *bias, + + cudaStream_t stream) { + launchKernel<<>>( + output, + running_mean, + running_var, + input, + weight, + bias, + + info.batch_size, + info.channel_size, + info.dim_size, + + info.running_mean_stride, + info.running_var_stride, + info.weight_stride, + info.bias_stride, + info.momentum, + info.eps); + return INFINI_STATUS_SUCCESS; +} +// ------------------------------------ end: call launchKernel ------------------------------------ + +struct Descriptor::Opaque { + std::shared_ptr internal; +}; + +Descriptor::~Descriptor() { + delete _opaque; +} + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t running_mean_desc, + infiniopTensorDescriptor_t running_var_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t weight_desc, + infiniopTensorDescriptor_t bias_desc, + float momentum, + float eps) { + auto handle = reinterpret_cast(handle_); + // --------------------- start: check data type and calculate workspace size ---------------------- + auto dtype = output_desc->dtype(); + CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_F32, INFINI_DTYPE_BF16); + size_t WorkSpaceSize = 0; + // ---------------------- end: check data type and calculate workspace size ----------------------- + auto result = BatchNormInfo::createBatchNormInfo( + output_desc, + running_mean_desc, + running_var_desc, + input_desc, + weight_desc, + bias_desc, + momentum, + eps); + CHECK_RESULT(result); + const BatchNormInfo &info = result.take(); + *desc_ptr = new Descriptor( + dtype, std::move(info), WorkSpaceSize, + new Opaque{handle->internal()}, + handle->device, handle->device_id); + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + void *running_mean, + void *running_var, + const void *input, + const void *weight, + const void *bias, + void *stream_) const { + if (workspace_size < _workspace_size) { + return INFINI_STATUS_INSUFFICIENT_WORKSPACE; + } + cudaStream_t stream = (cudaStream_t)stream_; + +#define CALCULATE_BATCH_NORM(BLOCK_SIZE, TDATA) \ + calculate_batch_norm(_info, (TDATA *)output, (TDATA *)running_mean, (TDATA *)running_var, (const TDATA *)input, (const TDATA *)weight, (const TDATA *)bias, stream) +#define CALCULATE_BATCH_NORM_WITH_BLOCK_SIZE(BLOCK_SIZE) \ + { \ + if (_info.dtype == INFINI_DTYPE_F16) \ + return CALCULATE_BATCH_NORM(BLOCK_SIZE, half); \ + else if (_info.dtype == INFINI_DTYPE_F32) \ + return CALCULATE_BATCH_NORM(BLOCK_SIZE, float); \ + else if (_info.dtype == INFINI_DTYPE_BF16) \ + return CALCULATE_BATCH_NORM(BLOCK_SIZE, __nv_bfloat16); \ + else \ + return INFINI_STATUS_BAD_TENSOR_DTYPE; \ + } + + if (_opaque->internal->maxThreadsPerBlock() == CUDA_BLOCK_SIZE_1024) { + CALCULATE_BATCH_NORM_WITH_BLOCK_SIZE(CUDA_BLOCK_SIZE_1024) + } else if (_opaque->internal->maxThreadsPerBlock() == CUDA_BLOCK_SIZE_512) { + CALCULATE_BATCH_NORM_WITH_BLOCK_SIZE(CUDA_BLOCK_SIZE_512) + } else if (_opaque->internal->maxThreadsPerBlock() == CUDA_BLOCK_SIZE_4096) { + CALCULATE_BATCH_NORM_WITH_BLOCK_SIZE(CUDA_BLOCK_SIZE_4096) + } else { + return INFINI_STATUS_DEVICE_ARCHITECTURE_NOT_SUPPORTED; + } + + return INFINI_STATUS_SUCCESS; +} +} // namespace op::batch_norm::nvidia diff --git a/src/infiniop/ops/batch_norm/nvidia/batch_norm_nvidia.cuh b/src/infiniop/ops/batch_norm/nvidia/batch_norm_nvidia.cuh new file mode 100644 index 000000000..33c93f2b4 --- /dev/null +++ b/src/infiniop/ops/batch_norm/nvidia/batch_norm_nvidia.cuh @@ -0,0 +1,10 @@ +#ifndef __BATCH_NORM_NVIDIA_API_H__ +#define __BATCH_NORM_NVIDIA_API_H__ + +// #ifdef ENABLE_NINETOOTHED +#include "../batch_norm.h" +DESCRIPTOR(nvidia) + +// #endif + +#endif // __BATCH_NORM_NVIDIA_API_H__ diff --git a/src/infiniop/ops/batch_norm/operator.cc b/src/infiniop/ops/batch_norm/operator.cc new file mode 100644 index 000000000..a87dfff60 --- /dev/null +++ b/src/infiniop/ops/batch_norm/operator.cc @@ -0,0 +1,156 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/batch_norm.h" + +#ifdef ENABLE_CPU_API +#include "cpu/batch_norm_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/batch_norm_nvidia.cuh" +#endif + +__C infiniStatus_t infiniopCreateBatchNormDescriptor( + infiniopHandle_t handle, + infiniopBatchNormDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t running_mean_desc, + infiniopTensorDescriptor_t running_var_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t weight_desc, + infiniopTensorDescriptor_t bias_desc, + float momentum, + float eps) { + +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::batch_norm::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + output_desc, \ + running_mean_desc, \ + running_var_desc, \ + input_desc, \ + weight_desc, \ + bias_desc, \ + momentum, \ + eps) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetBatchNormWorkspaceSize(infiniopBatchNormDescriptor_t desc, size_t *size) { + +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET + + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; +} + +__C infiniStatus_t infiniopBatchNorm( + infiniopBatchNormDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + void *running_mean, + void *running_var, + const void *input, + const void *weight, + const void *bias, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc) \ + ->calculate(workspace, workspace_size, output, running_mean, running_var, \ + input, weight, bias, stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyBatchNormDescriptor(infiniopBatchNormDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/cross_entropy_loss/cpu/cross_entropy_loss_cpu.cc b/src/infiniop/ops/cross_entropy_loss/cpu/cross_entropy_loss_cpu.cc new file mode 100644 index 000000000..af97c1d09 --- /dev/null +++ b/src/infiniop/ops/cross_entropy_loss/cpu/cross_entropy_loss_cpu.cc @@ -0,0 +1,321 @@ +#include "cross_entropy_loss_cpu.h" +#include "../../../devices/cpu/common_cpu.h" +#include "../../../devices/cpu/cpu_handle.h" +#include "../info.h" +#include +#include +#include +#include + +namespace op::cross_entropy_loss::cpu { + +struct Descriptor::Opaque { + device::cpu::Handle *handle; + std::vector logits_shape; + size_t workspace_size = 0; + +private: + Opaque(device::cpu::Handle *handle_ptr, const std::vector &shape) + : handle(handle_ptr), logits_shape(shape) { + // 计算workspace大小:需要存储per-sample loss + size_t N = logits_shape[0]; + size_t inner_size = 1; + for (size_t i = 2; i < logits_shape.size(); ++i) { + inner_size *= logits_shape[i]; + } + workspace_size = N * inner_size * sizeof(float); + } + + void cross_entropy_f16_as_float(float *workspace, float *loss_result, + const fp16_t *logits, const int64_t *target) const { + size_t N = logits_shape[0]; + size_t C = logits_shape[1]; + size_t inner_size = 1; + for (size_t i = 2; i < logits_shape.size(); ++i) { + inner_size *= logits_shape[i]; + } + + // 转换F16 logits为float + size_t total_logits_size = N * C * inner_size; + std::vector float_logits(total_logits_size); + for (size_t i = 0; i < total_logits_size; ++i) { + float_logits[i] = utils::cast(logits[i]); + } + + // 使用float精度计算 + cross_entropy_cpu_float(workspace, loss_result, float_logits.data(), target); + } + + // 通用的float版本交叉熵计算 + void cross_entropy_cpu_float(float *workspace, float *loss_result, + const float *logits, const int64_t *target) const { + size_t N = logits_shape[0]; + size_t C = logits_shape[1]; + size_t inner_size = 1; + for (size_t i = 2; i < logits_shape.size(); ++i) { + inner_size *= logits_shape[i]; + } + + const int64_t ignore_index = -100; + float *per_sample_loss = workspace; + + // 计算每个样本的损失 + for (size_t n = 0; n < N; ++n) { + for (size_t inner = 0; inner < inner_size; ++inner) { + size_t sample_idx = n * inner_size + inner; + int64_t t = target[sample_idx]; + + // 检查ignore_index或无效target + if (t == ignore_index || t < 0 || t >= static_cast(C)) { + per_sample_loss[sample_idx] = 0.0f; + continue; + } + + // 计算这个位置的logits基址 + size_t base_offset = n * C * inner_size + inner; + + // 数值稳定的softmax计算:先找最大值 + float max_logit = -std::numeric_limits::infinity(); + for (size_t c = 0; c < C; ++c) { + size_t logit_idx = base_offset + c * inner_size; + max_logit = std::max(max_logit, logits[logit_idx]); + } + + // 计算exp的和(减去最大值保证数值稳定) + float sum_exp = 0.0f; + for (size_t c = 0; c < C; ++c) { + size_t logit_idx = base_offset + c * inner_size; + sum_exp += std::exp(logits[logit_idx] - max_logit); + } + + // 计算目标类别的logit + size_t target_logit_idx = base_offset + static_cast(t) * inner_size; + float target_logit = logits[target_logit_idx]; + + // 计算交叉熵损失:log_softmax[target] = logit[target] - log(sum_exp) - max_logit + // 所以 -log_softmax[target] = log(sum_exp) + max_logit - logit[target] + per_sample_loss[sample_idx] = std::log(sum_exp) + max_logit - target_logit; + } + } + + // 计算平均损失(忽略ignore_index的样本) + double total_loss = 0.0; + size_t valid_count = 0; + size_t total_samples = N * inner_size; + + for (size_t i = 0; i < total_samples; ++i) { + if (target[i] != ignore_index && target[i] >= 0 && target[i] < static_cast(C)) { + total_loss += static_cast(per_sample_loss[i]); + valid_count++; + } + } + + *loss_result = valid_count > 0 ? static_cast(total_loss / valid_count) : 0.0f; + } + + // 通用模板版本(用于F32和BF16) + template + void cross_entropy_cpu_generic(float *workspace, T *loss_result, + const T *logits, const int64_t *target) const { + size_t N = logits_shape[0]; + size_t C = logits_shape[1]; + size_t inner_size = 1; + for (size_t i = 2; i < logits_shape.size(); ++i) { + inner_size *= logits_shape[i]; + } + + const int64_t ignore_index = -100; + float *per_sample_loss = workspace; + + // 计算每个样本的损失 + for (size_t n = 0; n < N; ++n) { + for (size_t inner = 0; inner < inner_size; ++inner) { + size_t sample_idx = n * inner_size + inner; + int64_t t = target[sample_idx]; + + // 检查ignore_index或无效target + if (t == ignore_index || t < 0 || t >= static_cast(C)) { + per_sample_loss[sample_idx] = 0.0f; + continue; + } + + // 计算这个位置的logits基址 + size_t base_offset = n * C * inner_size + inner; + + // 数值稳定的softmax计算:先找最大值 + float max_logit = -std::numeric_limits::infinity(); + for (size_t c = 0; c < C; ++c) { + size_t logit_idx = base_offset + c * inner_size; + float logit_val; + if constexpr (std::is_same::value) { + logit_val = utils::cast(logits[logit_idx]); + } else { + logit_val = logits[logit_idx]; + } + max_logit = std::max(max_logit, logit_val); + } + + // 计算exp的和 + float sum_exp = 0.0f; + for (size_t c = 0; c < C; ++c) { + size_t logit_idx = base_offset + c * inner_size; + float logit_val; + if constexpr (std::is_same::value) { + logit_val = utils::cast(logits[logit_idx]); + } else { + logit_val = logits[logit_idx]; + } + sum_exp += std::exp(logit_val - max_logit); + } + + // 计算目标类别的logit + size_t target_logit_idx = base_offset + static_cast(t) * inner_size; + float target_logit; + if constexpr (std::is_same::value) { + target_logit = utils::cast(logits[target_logit_idx]); + } else { + target_logit = logits[target_logit_idx]; + } + + // 计算交叉熵损失 + per_sample_loss[sample_idx] = std::log(sum_exp) + max_logit - target_logit; + } + } + + // 计算平均损失 + double total_loss = 0.0; + size_t valid_count = 0; + size_t total_samples = N * inner_size; + + for (size_t i = 0; i < total_samples; ++i) { + if (target[i] != ignore_index && target[i] >= 0 && target[i] < static_cast(C)) { + total_loss += static_cast(per_sample_loss[i]); + valid_count++; + } + } + + float mean_loss = valid_count > 0 ? static_cast(total_loss / valid_count) : 0.0f; + + // 转换回输出类型 + if constexpr (std::is_same::value) { + *loss_result = utils::cast(mean_loss); + } else { + *loss_result = static_cast(mean_loss); + } + } + +public: + Opaque(Opaque &&other) noexcept + : handle(other.handle), + logits_shape(std::move(other.logits_shape)), + workspace_size(other.workspace_size) { + other.handle = nullptr; + other.workspace_size = 0; + } + + ~Opaque() = default; + + static inline utils::Result + create(device::cpu::Handle *handle_ptr, const std::vector &shape) { + Opaque opaque(handle_ptr, shape); + return utils::Result(std::move(opaque)); + } + + infiniStatus_t calculate(void *workspace, size_t workspace_size, + void *loss, const void *logits, const void *target, + infiniDtype_t dtype) const { + if (!workspace || !loss || !logits || !target) { + return INFINI_STATUS_BAD_PARAM; + } + + if (workspace_size < this->workspace_size) { + return INFINI_STATUS_INTERNAL_ERROR; + } + + float *workspace_ptr = static_cast(workspace); + const int64_t *target_ptr = static_cast(target); + + switch (dtype) { + case INFINI_DTYPE_F32: { + const float *logits_ptr = static_cast(logits); + float *loss_ptr = static_cast(loss); + cross_entropy_cpu_generic(workspace_ptr, loss_ptr, logits_ptr, target_ptr); + break; + } + + case INFINI_DTYPE_F16: { + const fp16_t *logits_ptr = static_cast(logits); + fp16_t *loss_ptr = static_cast(loss); + + // F16特殊处理:使用float计算 + float temp_loss; + cross_entropy_f16_as_float(workspace_ptr, &temp_loss, logits_ptr, target_ptr); + *loss_ptr = utils::cast(temp_loss); + break; + } + + case INFINI_DTYPE_BF16: { + const bf16_t *logits_ptr = static_cast(logits); + bf16_t *loss_ptr = static_cast(loss); + cross_entropy_cpu_generic(workspace_ptr, loss_ptr, logits_ptr, target_ptr); + break; + } + + default: + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + return INFINI_STATUS_SUCCESS; + } + + size_t get_workspace_size() const { + return workspace_size; + } +}; + +Descriptor::~Descriptor() { + if (_opaque) { + delete _opaque; + } +} + +infiniStatus_t Descriptor::create(infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t /*loss_desc*/, + infiniopTensorDescriptor_t logits_desc, + infiniopTensorDescriptor_t /*target_desc*/) { + auto handle = reinterpret_cast(handle_); + auto dtype = logits_desc->dtype(); + + CHECK_DTYPE(dtype, INFINI_DTYPE_F32, INFINI_DTYPE_F16, INFINI_DTYPE_BF16); + + const auto &orig_shape = logits_desc->shape(); + std::vector logits_shape; + + if (orig_shape.size() == 1) { + logits_shape = {1, orig_shape[0]}; + } else { + logits_shape = orig_shape; + } + + if (logits_shape.size() < 2) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + auto opaque_result = Opaque::create(handle, logits_shape); + CHECK_RESULT(opaque_result); + auto opaque = new Opaque(opaque_result.take()); + + *desc_ptr = new Descriptor(dtype, opaque->get_workspace_size(), opaque, + handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate(void *workspace, size_t workspace_size, + void *loss, const void *logits, + const void *target, void *stream) const { + return _opaque->calculate(workspace, workspace_size, loss, logits, target, _dtype); +} + +} // namespace op::cross_entropy_loss::cpu diff --git a/src/infiniop/ops/cross_entropy_loss/cpu/cross_entropy_loss_cpu.h b/src/infiniop/ops/cross_entropy_loss/cpu/cross_entropy_loss_cpu.h new file mode 100644 index 000000000..8afec63d0 --- /dev/null +++ b/src/infiniop/ops/cross_entropy_loss/cpu/cross_entropy_loss_cpu.h @@ -0,0 +1,8 @@ +#ifndef __CROSS_ENTROPY_LOSS_CPU_H__ +#define __CROSS_ENTROPY_LOSS_CPU_H__ + +#include "../cross_entropy_loss.h" + +DESCRIPTOR(cpu) + +#endif // __CROSS_ENTROPY_LOSS_CPU_H__ diff --git a/src/infiniop/ops/cross_entropy_loss/cross_entropy_loss.h b/src/infiniop/ops/cross_entropy_loss/cross_entropy_loss.h new file mode 100644 index 000000000..dad108d78 --- /dev/null +++ b/src/infiniop/ops/cross_entropy_loss/cross_entropy_loss.h @@ -0,0 +1,48 @@ +#ifndef __CROSS_ENTROPY_LOSS_H__ +#define __CROSS_ENTROPY_LOSS_H__ + +#include "../../operator.h" +#include "info.h" + +#define DESCRIPTOR(NAMESPACE) \ + \ + namespace op::cross_entropy_loss::NAMESPACE { \ + class Descriptor final : public InfiniopDescriptor { \ + struct Opaque; \ + Opaque *_opaque; \ + infiniDtype_t _dtype; \ + size_t _workspace_size; \ + \ + Descriptor( \ + infiniDtype_t dtype, \ + size_t workspace_size_, \ + Opaque *opaque, \ + infiniDevice_t device_type, \ + int device_id) \ + : InfiniopDescriptor{device_type, device_id}, \ + _opaque(opaque), \ + _dtype(dtype), \ + _workspace_size(workspace_size_) {} \ + \ + public: \ + ~Descriptor(); \ + \ + size_t workspaceSize() const { return _workspace_size; } \ + \ + static infiniStatus_t create( \ + infiniopHandle_t handle, \ + Descriptor **desc_ptr, \ + infiniopTensorDescriptor_t loss_desc, \ + infiniopTensorDescriptor_t logits_desc, \ + infiniopTensorDescriptor_t target_desc); \ + \ + infiniStatus_t calculate( \ + void *workspace, size_t workspace_size, \ + void *loss, \ + const void *logits, \ + const void *target, \ + void *stream) const; \ + }; \ + } + +#endif // __CROSS_ENTROPY_LOSS_H__ diff --git a/src/infiniop/ops/cross_entropy_loss/info.h b/src/infiniop/ops/cross_entropy_loss/info.h new file mode 100644 index 000000000..5278bf912 --- /dev/null +++ b/src/infiniop/ops/cross_entropy_loss/info.h @@ -0,0 +1,36 @@ +#ifndef __CROSS_ENTROPY_LOSS_INFO_H__ +#define __CROSS_ENTROPY_LOSS_INFO_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" + +namespace op::cross_entropy_loss { + +class CrossEntropyInfo { +public: + CrossEntropyInfo() = default; + size_t batch = 0; + size_t num_classes = 0; + infiniDtype_t dtype; + + static utils::Result create( + infiniopTensorDescriptor_t loss, + infiniopTensorDescriptor_t logits, + infiniopTensorDescriptor_t target) { + + if (logits->ndim() != 2 || loss->ndim() != 1 || target->ndim() != 1) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + CrossEntropyInfo info; + info.batch = logits->dim(0); + info.num_classes = logits->dim(1); + info.dtype = logits->dtype(); + return utils::Result(std::move(info)); + } +}; + +} // namespace op::cross_entropy_loss + +#endif // __CROSS_ENTROPY_LOSS_INFO_H__ diff --git a/src/infiniop/ops/cross_entropy_loss/nvidia/cross_entropy_loss_nvidia.cu b/src/infiniop/ops/cross_entropy_loss/nvidia/cross_entropy_loss_nvidia.cu new file mode 100644 index 000000000..3d795a67a --- /dev/null +++ b/src/infiniop/ops/cross_entropy_loss/nvidia/cross_entropy_loss_nvidia.cu @@ -0,0 +1,217 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../../../devices/nvidia/nvidia_common.cuh" +#include "../../../devices/nvidia/nvidia_handle.cuh" +#include "cross_entropy_loss_nvidia.cuh" + +namespace op::cross_entropy_loss::nvidia { +namespace cuda { + +__device__ __forceinline__ float to_float(float v) { return v; } +__device__ __forceinline__ float to_float(double v) { return (float)v; } +__device__ __forceinline__ float to_float(half v) { return __half2float(v); } +__device__ __forceinline__ float to_float(__nv_bfloat16 v) { + return __bfloat162float(v); +} + +template +__global__ void +softmaxCrossEntropy_per_sample(T_out *__restrict__ loss, + const T_in *__restrict__ logits, + const int64_t *__restrict__ target, int N, int C, + long long inner_size, int64_t ignore_index) { + long long total = (long long)N * inner_size; + long long idx = (long long)blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= total) { + return; + } + + int n = (int)(idx / inner_size); + int inr = (int)(idx % inner_size); + + int64_t t = target[(long long)n * inner_size + inr]; + if (ignore_index != LLONG_MIN && t == ignore_index) { + loss[idx] = (T_out)0; + return; + } + if (t < 0 || t >= C) { + loss[idx] = (T_out)0; + return; + } + + const long long base = ((long long)n * C * inner_size) + inr; + + // 数值稳定 LSE:lse = log(sum exp(x - m)) + m + float m = -CUDART_INF_F; + for (int c = 0; c < C; ++c) { + m = fmaxf(m, to_float(logits[base + (long long)c * inner_size])); + } + + float sum_exp = 0.f; + for (int c = 0; c < C; ++c) { + sum_exp += expf(to_float(logits[base + (long long)c * inner_size]) - m); + } + + float lse = logf(sum_exp) + m; + float logit_t = to_float(logits[base + (long long)(int)t * inner_size]); + loss[idx] = (T_out)(lse - logit_t); +} + +} // namespace cuda + +struct Descriptor::Opaque { + std::shared_ptr internal; + std::vector logits_shape; + Opaque(std::shared_ptr p) : internal(p) {} + ~Opaque() = default; +}; + +Descriptor::~Descriptor() { + if (_opaque) { + delete _opaque; + } +} + +infiniStatus_t Descriptor::create(infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t /*loss_desc*/, + infiniopTensorDescriptor_t logits_desc, + infiniopTensorDescriptor_t /*target_desc*/) { +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) + auto handle = reinterpret_cast(handle_); + auto dtype = logits_desc->dtype(); + CHECK_DTYPE(dtype, INFINI_DTYPE_F32, INFINI_DTYPE_F16, INFINI_DTYPE_BF16); + + const auto &orig = logits_desc->shape(); + auto opaque = new Opaque(handle->internal()); + + if (orig.size() == 1) { + opaque->logits_shape = {1, orig[0]}; + } else { + opaque->logits_shape = orig; + } + + const auto &s = opaque->logits_shape; + long long N = (long long)s[0]; + long long inner = 1; + for (size_t i = 2; i < s.size(); ++i) { + inner *= (long long)s[i]; + } + + size_t workspace_size = (size_t)(N * inner) * sizeof(float); + *desc_ptr = new Descriptor(dtype, workspace_size, opaque, handle->device, + handle->device_id); + return INFINI_STATUS_SUCCESS; +#else + return INFINI_STATUS_NOT_IMPLEMENTED; +#endif +} + +infiniStatus_t Descriptor::calculate(void *workspace, size_t workspace_size, + void *loss, const void *logits, + const void *target, void *stream) const { +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) + const auto &s = _opaque->logits_shape; + int N = (int)s[0]; + int C = (int)s[1]; + long long inner = 1; + for (size_t i = 2; i < s.size(); ++i) { + inner *= (long long)s[i]; + } + long long total = (long long)N * inner; + + size_t need_ws = (size_t)total * sizeof(float); + if (workspace_size < need_ws) { + return INFINI_STATUS_INTERNAL_ERROR; + } + float *per_sample = reinterpret_cast(workspace); + + const int64_t *tgt_i64 = reinterpret_cast(target); + const int64_t ignore_index = -100; + + // 1) 写 per-sample loss -> workspace(float) + dim3 block(256); + dim3 grid((total + block.x - 1) / block.x); + cudaStream_t st = (cudaStream_t)stream; + + if (_dtype == INFINI_DTYPE_F32) { + cuda::softmaxCrossEntropy_per_sample<<>>( + per_sample, (const float *)logits, tgt_i64, N, C, inner, ignore_index); + } else if (_dtype == INFINI_DTYPE_F16) { + cuda::softmaxCrossEntropy_per_sample<<>>( + per_sample, (const half *)logits, tgt_i64, N, C, inner, ignore_index); + } else if (_dtype == INFINI_DTYPE_BF16) { + cuda::softmaxCrossEntropy_per_sample<__nv_bfloat16, float> + <<>>(per_sample, (const __nv_bfloat16 *)logits, + tgt_i64, N, C, inner, ignore_index); + } + { + auto err = cudaGetLastError(); + if (err != cudaSuccess) { + return INFINI_STATUS_INTERNAL_ERROR; + } + } + + // 2) host 侧 mean(仅统计 target != ignore_index) + std::vector h_loss((size_t)total); + std::vector h_tgt((size_t)total); + if (cudaMemcpyAsync(h_loss.data(), per_sample, need_ws, + cudaMemcpyDeviceToHost, st) + != cudaSuccess) { + return INFINI_STATUS_INTERNAL_ERROR; + } + if (cudaMemcpyAsync(h_tgt.data(), tgt_i64, (size_t)total * sizeof(int64_t), + cudaMemcpyDeviceToHost, st) + != cudaSuccess) { + return INFINI_STATUS_INTERNAL_ERROR; + } + if (cudaStreamSynchronize(st) != cudaSuccess) { + return INFINI_STATUS_INTERNAL_ERROR; + } + + double acc = 0.0; + long long cnt = 0; + for (long long i = 0; i < total; ++i) { + if (h_tgt[i] != ignore_index) { + acc += (double)h_loss[i]; + ++cnt; + } + } + double mean = (cnt > 0) ? (acc / (double)cnt) : 0.0; + + // 3) 把标量 mean 写回 device 的 loss 指针(按输入 dtype 写 1 个元素) + if (_dtype == INFINI_DTYPE_F32) { + float v = (float)mean; + if (cudaMemcpyAsync(loss, &v, sizeof(float), cudaMemcpyHostToDevice, st) != cudaSuccess) { + return INFINI_STATUS_INTERNAL_ERROR; + } + } else if (_dtype == INFINI_DTYPE_F16) { + half v = __float2half((float)mean); + if (cudaMemcpyAsync(loss, &v, sizeof(half), cudaMemcpyHostToDevice, st) != cudaSuccess) { + return INFINI_STATUS_INTERNAL_ERROR; + } + } else if (_dtype == INFINI_DTYPE_BF16) { + __nv_bfloat16 v = __float2bfloat16((float)mean); + if (cudaMemcpyAsync(loss, &v, sizeof(__nv_bfloat16), cudaMemcpyHostToDevice, + st) + != cudaSuccess) { + return INFINI_STATUS_INTERNAL_ERROR; + } + } + if (cudaStreamSynchronize(st) != cudaSuccess) { + return INFINI_STATUS_INTERNAL_ERROR; + } + + return INFINI_STATUS_SUCCESS; +#else + return INFINI_STATUS_NOT_IMPLEMENTED; +#endif +} +} // namespace op::cross_entropy_loss::nvidia diff --git a/src/infiniop/ops/cross_entropy_loss/nvidia/cross_entropy_loss_nvidia.cuh b/src/infiniop/ops/cross_entropy_loss/nvidia/cross_entropy_loss_nvidia.cuh new file mode 100644 index 000000000..843fc943d --- /dev/null +++ b/src/infiniop/ops/cross_entropy_loss/nvidia/cross_entropy_loss_nvidia.cuh @@ -0,0 +1,8 @@ +#ifndef __CROSS_ENTROPY_LOSS_CUDA_CUH__ +#define __CROSS_ENTROPY_LOSS_CUDA_CUH__ + +#include "../cross_entropy_loss.h" + +DESCRIPTOR(nvidia) + +#endif // __CROSS_ENTROPY_LOSS_CUDA_CUH__ diff --git a/src/infiniop/ops/cross_entropy_loss/operator.cc b/src/infiniop/ops/cross_entropy_loss/operator.cc new file mode 100644 index 000000000..8668dc574 --- /dev/null +++ b/src/infiniop/ops/cross_entropy_loss/operator.cc @@ -0,0 +1,142 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/cross_entropy_loss.h" + +#ifdef ENABLE_CPU_API +#include "cpu/cross_entropy_loss_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/cross_entropy_loss_nvidia.cuh" +#endif + +__C infiniStatus_t infiniopCreateCrossEntropyLossDescriptor( + infiniopHandle_t handle, + infiniopCrossEntropyLossDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t loss_desc, + infiniopTensorDescriptor_t logits_desc, + infiniopTensorDescriptor_t target_desc) { + +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::cross_entropy_loss::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + loss_desc, \ + logits_desc, \ + target_desc) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetCrossEntropyLossWorkspaceSize(infiniopCrossEntropyLossDescriptor_t desc, size_t *size) { + +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET + + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; +} + +__C infiniStatus_t infiniopCrossEntropyLoss( + infiniopCrossEntropyLossDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *loss, + const void *logits, + const void *target, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc) \ + ->calculate(workspace, workspace_size, loss, logits, target, stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyCrossEntropyLossDescriptor(infiniopCrossEntropyLossDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/exp/cpu/exp_cpu.cc b/src/infiniop/ops/exp/cpu/exp_cpu.cc new file mode 100644 index 000000000..58a6d0f2d --- /dev/null +++ b/src/infiniop/ops/exp/cpu/exp_cpu.cc @@ -0,0 +1,52 @@ +#include "exp_cpu.h" + +namespace op::exp::cpu { + +Descriptor::~Descriptor() = default; + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t out_desc, + std::vector input_desc_vec) { + + auto handle = reinterpret_cast(handle_); + auto dtype = out_desc->dtype(); + + const auto &input_desc = input_desc_vec.at(0); + const auto &output_shape = out_desc->shape(); + const auto &input_shape = input_desc->shape(); + + CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_F32, INFINI_DTYPE_F64, INFINI_DTYPE_BF16); + + CHECK_SAME_SHAPE(output_shape, input_shape); + + // create CPU elementwise descriptor + CREATE_ELEMENTWISE_CPU_DESCRIPTOR(handle, dtype, out_desc, input_desc_vec); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + std::vector inputs, + void *stream) const { + + switch (_dtype) { + case INFINI_DTYPE_F16: + return _device_info->calculate(_info, output, inputs, stream); + case INFINI_DTYPE_F32: + return _device_info->calculate(_info, output, inputs, stream); + case INFINI_DTYPE_F64: + return _device_info->calculate(_info, output, inputs, stream); + case INFINI_DTYPE_BF16: + return _device_info->calculate(_info, output, inputs, stream); + default: + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + return INFINI_STATUS_SUCCESS; +} +} // namespace op::exp::cpu diff --git a/src/infiniop/ops/exp/cpu/exp_cpu.h b/src/infiniop/ops/exp/cpu/exp_cpu.h new file mode 100644 index 000000000..867c7afa5 --- /dev/null +++ b/src/infiniop/ops/exp/cpu/exp_cpu.h @@ -0,0 +1,21 @@ +#ifndef __EXP_CPU_H__ +#define __EXP_CPU_H__ + +#include "../../../elementwise/cpu/elementwise_cpu.h" +#include + +ELEMENTWISE_DESCRIPTOR(exp, cpu) + +namespace op::exp::cpu { +typedef struct ExpOp { +public: + static constexpr size_t num_inputs = 1; + + template + T operator()(const T &input) const { + return std::exp(input); + } +} ExpOp; +} // namespace op::exp::cpu + +#endif // __EXP_CPU_H__ diff --git a/src/infiniop/ops/exp/cuda/kernel.cuh b/src/infiniop/ops/exp/cuda/kernel.cuh new file mode 100644 index 000000000..12446f31a --- /dev/null +++ b/src/infiniop/ops/exp/cuda/kernel.cuh @@ -0,0 +1,39 @@ +#ifndef __EXP_CUDA_H__ +#define __EXP_CUDA_H__ + +#include +#include +#include + +namespace op::exp::cuda { +typedef struct ExpOp { + static constexpr size_t num_inputs = 1; + + template + __device__ __forceinline__ T operator()(const T &input) const { + if constexpr (std::is_same_v) { + float2 vf = __half22float2(input); + float2 vr = make_float2(__expf(vf.x), __expf(vf.y)); + return __float22half2_rn(vr); + } else if constexpr (std::is_same_v) { + float inputf = __half2float(input); + return __float2half_rn(__expf(inputf)); + } else if constexpr (std::is_same_v) { + float f0 = __bfloat162float(__low2bfloat16(input)); + float f1 = __bfloat162float(__high2bfloat16(input)); + return __floats2bfloat162_rn(__expf(f0), __expf(f1)); + } else if constexpr (std::is_same_v) { + float inputf = __bfloat162float(input); + return __float2bfloat16_rn(__expf(inputf)); + } else if constexpr (std::is_same_v) { + return __expf(input); + } else if constexpr (std::is_same_v) { + return std::exp(input); + } else { + return std::exp(input); + } + } +} ExpOp; +} // namespace op::exp::cuda + +#endif // __EXP_CUDA_H__ diff --git a/src/infiniop/ops/exp/nvidia/exp_nvidia.cu b/src/infiniop/ops/exp/nvidia/exp_nvidia.cu new file mode 100644 index 000000000..3bdf2eb45 --- /dev/null +++ b/src/infiniop/ops/exp/nvidia/exp_nvidia.cu @@ -0,0 +1,59 @@ +#include "../../../elementwise/nvidia/elementwise_nvidia.cuh" + +#include "../cuda/kernel.cuh" +#include "exp_nvidia.cuh" + +namespace op::exp::nvidia { + +Descriptor::~Descriptor() = default; + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t out_desc, + std::vector input_desc_vec) { + + auto handle = reinterpret_cast(handle_); + auto dtype = out_desc->dtype(); + + const auto &input_desc = input_desc_vec.at(0); + const auto &output_shape = out_desc->shape(); + const auto &input_shape = input_desc->shape(); + + CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_F32, INFINI_DTYPE_F64, INFINI_DTYPE_BF16); + + CHECK_SAME_SHAPE(output_shape, input_shape); + + // create CUDA elementwise descriptor + CREATE_ELEMENTWISE_CUDA_DESCRIPTOR(handle, dtype, out_desc, input_desc_vec) + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + std::vector inputs, + void *stream) const { + + if (workspace_size < _workspace_size) { + return INFINI_STATUS_INSUFFICIENT_WORKSPACE; + } + + switch (_dtype) { + case INFINI_DTYPE_F16: + return _device_info->calculate<256, cuda::ExpOp, half>(_info, workspace, output, inputs, stream); + case INFINI_DTYPE_BF16: + return _device_info->calculate<256, cuda::ExpOp, cuda_bfloat16>(_info, workspace, output, inputs, stream); + case INFINI_DTYPE_F32: + return _device_info->calculate<256, cuda::ExpOp, float>(_info, workspace, output, inputs, stream); + case INFINI_DTYPE_F64: + return _device_info->calculate<256, cuda::ExpOp, double>(_info, workspace, output, inputs, stream); + default: + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + return INFINI_STATUS_SUCCESS; +} +} // namespace op::exp::nvidia diff --git a/src/infiniop/ops/exp/nvidia/exp_nvidia.cuh b/src/infiniop/ops/exp/nvidia/exp_nvidia.cuh new file mode 100644 index 000000000..7545e8f3e --- /dev/null +++ b/src/infiniop/ops/exp/nvidia/exp_nvidia.cuh @@ -0,0 +1,8 @@ +#ifndef __EXP_CUDA_API_H__ +#define __EXP_CUDA_API_H__ + +#include "../../../elementwise/nvidia/elementwise_nvidia_api.cuh" + +ELEMENTWISE_DESCRIPTOR(exp, nvidia) + +#endif // __EXP_CUDA_API_H__ diff --git a/src/infiniop/ops/exp/operator.cc b/src/infiniop/ops/exp/operator.cc new file mode 100644 index 000000000..cc369d660 --- /dev/null +++ b/src/infiniop/ops/exp/operator.cc @@ -0,0 +1,139 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/exp.h" + +#ifdef ENABLE_CPU_API +#include "cpu/exp_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/exp_nvidia.cuh" +#endif + +__C infiniStatus_t infiniopCreateExpDescriptor( + infiniopHandle_t handle, + infiniopExpDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc) { + +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::exp::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + output_desc, \ + {input_desc}) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetExpWorkspaceSize(infiniopExpDescriptor_t desc, size_t *size) { + +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET + + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; +} + +__C infiniStatus_t infiniopExp( + infiniopExpDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc) \ + ->calculate(workspace, workspace_size, output, {input}, stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyExpDescriptor(infiniopExpDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/gather/cpu/gather_cpu.cc b/src/infiniop/ops/gather/cpu/gather_cpu.cc new file mode 100644 index 000000000..f7251bdd7 --- /dev/null +++ b/src/infiniop/ops/gather/cpu/gather_cpu.cc @@ -0,0 +1,96 @@ +#include "gather_cpu.h" +#include "../../../devices/cpu/common_cpu.h" +#include "../../../reduce/cpu/reduce.h" +#include "../info.h" + +namespace op::gather::cpu { + +infiniStatus_t calculate_gather( + const GatherInfo &info, + char *output, + const char *input, + const int64_t *index) { + // -------------------------------- start: perform operator on CPU -------------------------------- + std::vector contiguous_strides(info.ndim); + ptrdiff_t last_dim = 1; + ptrdiff_t last_stride = 1; + for (size_t d = 0; d < info.ndim; d++) { + contiguous_strides[d] = last_dim * last_stride; + last_dim = info.output_shape[d]; + last_stride = contiguous_strides[d]; + } + size_t total_size = last_dim * last_stride; + + int gather_dim = static_cast(info.dim); + size_t element_size = infiniSizeOf(info.dtype); + +#pragma omp parallel for + for (int i = 0; i < static_cast(total_size); i++) { + auto output_ptr = output; + auto input_ptr = input; + auto index_ptr = index; + size_t rem = static_cast(i); + for (int d = static_cast(info.ndim) - 1; d >= 0; d--) { + size_t dim_index = rem / contiguous_strides[d]; + rem = rem % contiguous_strides[d]; + output_ptr += dim_index * element_size * info.output_strides[d]; + index_ptr += dim_index * info.index_strides[d]; + if (d != gather_dim) { + input_ptr += dim_index * element_size * info.input_strides[d]; + } + } + int64_t gather_number = *index_ptr; + input_ptr += gather_number * element_size * info.input_strides[gather_dim]; + // *output_ptr = *input_ptr; + memcpy( + output_ptr, + input_ptr, + element_size); + } + // --------------------------------- end: perform operator on CPU --------------------------------- + return INFINI_STATUS_SUCCESS; +} + +Descriptor::~Descriptor() = default; + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + auto handle = reinterpret_cast(handle_); + + // --------------------- start: check data type and calculate workspace size ---------------------- + auto dtype = output_desc->dtype(); + size_t WorkSpaceSize = 0; + // ---------------------- end: check data type and calculate workspace size ----------------------- + + auto result = GatherInfo::createGatherInfo( + output_desc, + input_desc, + index_desc, + dim); + CHECK_RESULT(result); + const GatherInfo &info = result.take(); + + *desc_ptr = new Descriptor( + dtype, std::move(info), WorkSpaceSize, + nullptr, + handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + const void *input, + const void *index, + void *stream) const { + + return calculate_gather(_info, (char *)output, (const char *)input, (const int64_t *)index); +} +} // namespace op::gather::cpu diff --git a/src/infiniop/ops/gather/cpu/gather_cpu.h b/src/infiniop/ops/gather/cpu/gather_cpu.h new file mode 100644 index 000000000..dac3583ac --- /dev/null +++ b/src/infiniop/ops/gather/cpu/gather_cpu.h @@ -0,0 +1,8 @@ +#ifndef __GATHER_CPU_H__ +#define __GATHER_CPU_H__ + +#include "../gather.h" + +DESCRIPTOR(cpu) + +#endif // __GATHER_CPU_H__ diff --git a/src/infiniop/ops/gather/cuda/kernel.cuh b/src/infiniop/ops/gather/cuda/kernel.cuh new file mode 100644 index 000000000..dbb818e83 --- /dev/null +++ b/src/infiniop/ops/gather/cuda/kernel.cuh @@ -0,0 +1,37 @@ +#ifndef __GATHER_KERNEL_CUH__ +#define __GATHER_KERNEL_CUH__ +// ------------------------------- start: perform operator on CUDA -------------------------------- +template +__device__ void gatherKernel( + Tdata *output, + const Tdata *input, + const int64_t *index, + size_t ndim, + size_t index_gather_size, + ptrdiff_t *output_strides, + ptrdiff_t *input_strides, + ptrdiff_t *index_strides, + ptrdiff_t *contiguous_strides, + int gather_dim) { + auto output_ptr = output; + auto input_ptr = input; + auto index_ptr = index; + size_t rem = blockIdx.x; + for (int d = ndim - 1; d >= 0; d--) { + if (d == gather_dim) { + continue; + } + size_t dim_index = rem / contiguous_strides[d]; + rem = rem % contiguous_strides[d]; + output_ptr += dim_index * output_strides[d]; + input_ptr += dim_index * input_strides[d]; + index_ptr += dim_index * index_strides[d]; + } + for (size_t c = threadIdx.x; c < index_gather_size; c++) { + int64_t gather_number = *(index_ptr + c * index_strides[gather_dim]); + *(output_ptr + c * output_strides[gather_dim]) = *(input_ptr + gather_number * input_strides[gather_dim]); + } +} +// -------------------------------- end: perform operator on CUDA --------------------------------- + +#endif // __GATHER_KERNEL_CUH__ diff --git a/src/infiniop/ops/gather/gather.h b/src/infiniop/ops/gather/gather.h new file mode 100644 index 000000000..d4c88b0c4 --- /dev/null +++ b/src/infiniop/ops/gather/gather.h @@ -0,0 +1,47 @@ +#ifndef __GATHER_H__ +#define __GATHER_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" +#include "info.h" + +#define DESCRIPTOR(NAMESPACE) \ + namespace op::gather::NAMESPACE { \ + class Descriptor final : public InfiniopDescriptor { \ + struct Opaque; \ + Opaque *_opaque; \ + GatherInfo _info; \ + size_t _workspace_size; \ + Descriptor( \ + infiniDtype_t dtype, \ + GatherInfo info, \ + size_t workspace_size_, \ + Opaque *opaque, \ + infiniDevice_t device_type, \ + int device_id) : InfiniopDescriptor{device_type, device_id}, \ + _opaque(opaque), \ + _info(info), \ + _workspace_size(workspace_size_) {} \ + \ + public: \ + ~Descriptor(); \ + size_t workspaceSize() const { return _workspace_size; } \ + static infiniStatus_t create( \ + infiniopHandle_t handle, \ + Descriptor **desc_ptr, \ + infiniopTensorDescriptor_t output_desc, \ + infiniopTensorDescriptor_t input_desc, \ + infiniopTensorDescriptor_t index_desc, \ + size_t dim); \ + infiniStatus_t calculate( \ + void *workspace, \ + size_t workspace_size, \ + void *output, \ + const void *input, \ + const void *index, \ + void *stream) const; \ + }; \ + } + +#endif \ No newline at end of file diff --git a/src/infiniop/ops/gather/info.h b/src/infiniop/ops/gather/info.h new file mode 100644 index 000000000..0098c7ea1 --- /dev/null +++ b/src/infiniop/ops/gather/info.h @@ -0,0 +1,58 @@ +#ifndef __GATHER_INFO_H__ +#define __GATHER_INFO_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" + +namespace op::gather { + +class GatherInfo { +private: + GatherInfo() = default; + +public: + // ---------------------------- start: define member variables of Info ---------------------------- + infiniDtype_t dtype; + size_t ndim; + std::vector output_shape; + size_t input_dim_size; + std::vector output_strides; + std::vector input_strides; + std::vector index_strides; + size_t dim; + + // ----------------------------- end: define member variables of Info ----------------------------- + + static utils::Result createGatherInfo( + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + // ------------------------- start: check tensor shape and input validity ------------------------- + CHECK_SAME_SHAPE(output_desc->shape(), index_desc->shape()); + size_t ndim = output_desc->ndim(); + for (size_t d = 0; d < ndim; d++) { + if (d != dim) { + CHECK_OR_RETURN(input_desc->dim(d) == output_desc->dim(d), INFINI_STATUS_BAD_TENSOR_SHAPE); + } + } + CHECK_OR_RETURN(ndim > dim, INFINI_STATUS_BAD_PARAM); + // -------------------------- end: check tensor shape and input validity -------------------------- + return utils::Result(GatherInfo{ + // ------------------------------ start: create an instance of Info ------------------------------- + output_desc->dtype(), + ndim, + output_desc->shape(), + input_desc->dim(dim), + output_desc->strides(), + input_desc->strides(), + index_desc->strides(), + dim + // ------------------------------- end: create an instance of Info -------------------------------- + }); + } +}; +} // namespace op::gather + +#endif // __GATHER_INFO_H__ diff --git a/src/infiniop/ops/gather/nvidia/gather_nvidia.cu b/src/infiniop/ops/gather/nvidia/gather_nvidia.cu new file mode 100644 index 000000000..f0a2a7fb3 --- /dev/null +++ b/src/infiniop/ops/gather/nvidia/gather_nvidia.cu @@ -0,0 +1,179 @@ +#include "../../../devices/nvidia/nvidia_common.cuh" +#include "../../../devices/nvidia/nvidia_handle.cuh" +#include "../../../devices/nvidia/nvidia_kernel_common.cuh" +#include "../cuda/kernel.cuh" +#include "../info.h" +#include "gather_nvidia.cuh" + +namespace op::gather::nvidia { + +// ---------------------- start: launchKernel: call kernel function of CUDA ----------------------- +template +INFINIOP_CUDA_KERNEL launchKernel( + Tdata *output, + const Tdata *input, + const int64_t *index, + size_t ndim, + size_t index_gather_size, + ptrdiff_t *output_strides, + ptrdiff_t *input_strides, + ptrdiff_t *index_strides, + ptrdiff_t *contiguous_strides, + int gather_dim) { + gatherKernel( + output, + input, + index, + ndim, + index_gather_size, + output_strides, + input_strides, + index_strides, + contiguous_strides, + gather_dim); +} +// ----------------------- end: launchKernel: call kernel function of CUDA ------------------------ + +// ----------------------------------- start: call launchKernel ----------------------------------- +template +infiniStatus_t calculate_gather( + const GatherInfo &info, + Tdata *output, + const Tdata *input, + const int64_t *index, + cudaStream_t stream, + void *workspace) { + size_t ndim = info.ndim; + ptrdiff_t *contiguous_strides = new ptrdiff_t[ndim]; + size_t last_dim = 1, last_stride = 1; + size_t gather_dim = info.dim; + for (size_t d = 0; d < ndim; d++) { + if (d == gather_dim) { + continue; + } + contiguous_strides[d] = last_dim * last_stride; + last_dim = info.output_shape[d]; + last_stride = contiguous_strides[d]; + } + size_t batch_size = last_dim * last_stride; + + ptrdiff_t *contiguous_strides_cuda = reinterpret_cast(workspace); + ptrdiff_t *input_strides_cuda = contiguous_strides_cuda + ndim; + ptrdiff_t *output_strides_cuda = input_strides_cuda + ndim; + ptrdiff_t *index_strides_cuda = output_strides_cuda + ndim; + + CHECK_CUDA(cudaMemcpyAsync(contiguous_strides_cuda, contiguous_strides, sizeof(ptrdiff_t) * ndim, cudaMemcpyHostToDevice, stream)); + CHECK_CUDA(cudaMemcpyAsync(input_strides_cuda, info.input_strides.data(), sizeof(ptrdiff_t) * ndim, cudaMemcpyHostToDevice, stream)); + CHECK_CUDA(cudaMemcpyAsync(output_strides_cuda, info.output_strides.data(), sizeof(ptrdiff_t) * ndim, cudaMemcpyHostToDevice, stream)); + CHECK_CUDA(cudaMemcpyAsync(index_strides_cuda, info.index_strides.data(), sizeof(ptrdiff_t) * ndim, cudaMemcpyHostToDevice, stream)); + + launchKernel<1, Tdata><<>>( + output, + input, + index, + ndim, + info.output_shape[gather_dim], + output_strides_cuda, + input_strides_cuda, + index_strides_cuda, + contiguous_strides_cuda, + info.dim); + delete[] contiguous_strides; + return INFINI_STATUS_SUCCESS; +} +// ------------------------------------ end: call launchKernel ------------------------------------ + +struct Descriptor::Opaque { + std::shared_ptr internal; +}; + +Descriptor::~Descriptor() { + delete _opaque; +} + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + auto handle = reinterpret_cast(handle_); + // --------------------- start: check data type and calculate workspace size ---------------------- + auto dtype = output_desc->dtype(); + size_t WorkSpaceSize = sizeof(ptrdiff_t) * input_desc->ndim() * 4; + // ---------------------- end: check data type and calculate workspace size ----------------------- + auto result = GatherInfo::createGatherInfo( + output_desc, + input_desc, + index_desc, + dim); + CHECK_RESULT(result); + const GatherInfo &info = result.take(); + *desc_ptr = new Descriptor( + dtype, std::move(info), WorkSpaceSize, + new Opaque{handle->internal()}, + handle->device, handle->device_id); + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + const void *input, + const void *index, + void *stream_) const { + if (workspace_size < _workspace_size) { + return INFINI_STATUS_INSUFFICIENT_WORKSPACE; + } + cudaStream_t stream = (cudaStream_t)stream_; + +#define CALCULATE_GATHER(BLOCK_SIZE, TDATA) \ + calculate_gather(_info, (TDATA *)output, (const TDATA *)input, (const int64_t *)index, stream, workspace) +#define CALCULATE_GATHER_WITH_BLOCK_SIZE(BLOCK_SIZE) \ + switch (_info.dtype) { \ + case INFINI_DTYPE_BOOL: \ + return CALCULATE_GATHER(BLOCK_SIZE, bool); \ + case INFINI_DTYPE_U8: \ + return CALCULATE_GATHER(BLOCK_SIZE, uint8_t); \ + case INFINI_DTYPE_U16: \ + return CALCULATE_GATHER(BLOCK_SIZE, uint16_t); \ + case INFINI_DTYPE_U32: \ + return CALCULATE_GATHER(BLOCK_SIZE, uint32_t); \ + case INFINI_DTYPE_U64: \ + return CALCULATE_GATHER(BLOCK_SIZE, uint64_t); \ + case INFINI_DTYPE_I8: \ + return CALCULATE_GATHER(BLOCK_SIZE, int8_t); \ + case INFINI_DTYPE_I16: \ + return CALCULATE_GATHER(BLOCK_SIZE, int16_t); \ + case INFINI_DTYPE_I32: \ + return CALCULATE_GATHER(BLOCK_SIZE, int32_t); \ + case INFINI_DTYPE_I64: \ + return CALCULATE_GATHER(BLOCK_SIZE, int64_t); \ + case INFINI_DTYPE_F16: \ + return CALCULATE_GATHER(BLOCK_SIZE, half); \ + case INFINI_DTYPE_F32: \ + return CALCULATE_GATHER(BLOCK_SIZE, float); \ + case INFINI_DTYPE_BF16: \ + return CALCULATE_GATHER(BLOCK_SIZE, cuda_bfloat16); \ + default: \ + return INFINI_STATUS_BAD_TENSOR_DTYPE; \ + } + + if (_opaque->internal->maxThreadsPerBlock() == CUDA_BLOCK_SIZE_1024) { + CALCULATE_GATHER_WITH_BLOCK_SIZE(CUDA_BLOCK_SIZE_1024) + } else if (_opaque->internal->maxThreadsPerBlock() == CUDA_BLOCK_SIZE_512) { + CALCULATE_GATHER_WITH_BLOCK_SIZE(CUDA_BLOCK_SIZE_512) + } else if (_opaque->internal->maxThreadsPerBlock() == CUDA_BLOCK_SIZE_4096) { + CALCULATE_GATHER_WITH_BLOCK_SIZE(CUDA_BLOCK_SIZE_4096) + } else { + return INFINI_STATUS_DEVICE_ARCHITECTURE_NOT_SUPPORTED; + } + +#undef CALCULATE_GATHER_WITH_BLOCK_SIZE +#undef CALCULATE_GATHER + + return INFINI_STATUS_SUCCESS; +} +} // namespace op::gather::nvidia diff --git a/src/infiniop/ops/gather/nvidia/gather_nvidia.cuh b/src/infiniop/ops/gather/nvidia/gather_nvidia.cuh new file mode 100644 index 000000000..46d42fa0c --- /dev/null +++ b/src/infiniop/ops/gather/nvidia/gather_nvidia.cuh @@ -0,0 +1,7 @@ +#ifndef __GATHER_NVIDIA_API_H__ +#define __GATHER_NVIDIA_API_H__ +#include "../gather.h" + +DESCRIPTOR(nvidia) + +#endif // __GATHER_NVIDIA_API_H__ diff --git a/src/infiniop/ops/gather/operator.cc b/src/infiniop/ops/gather/operator.cc new file mode 100644 index 000000000..706009e9b --- /dev/null +++ b/src/infiniop/ops/gather/operator.cc @@ -0,0 +1,144 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/gather.h" + +#ifdef ENABLE_CPU_API +#include "cpu/gather_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/gather_nvidia.cuh" +#endif + +__C infiniStatus_t infiniopCreateGatherDescriptor( + infiniopHandle_t handle, + infiniopGatherDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::gather::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + output_desc, \ + input_desc, \ + index_desc, \ + dim) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetGatherWorkspaceSize(infiniopGatherDescriptor_t desc, size_t *size) { + +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET + + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; +} + +__C infiniStatus_t infiniopGather( + infiniopGatherDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + const void *index, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc) \ + ->calculate(workspace, workspace_size, output, input, index, stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyGatherDescriptor(infiniopGatherDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/hardswish/cpu/hardswish_cpu.cc b/src/infiniop/ops/hardswish/cpu/hardswish_cpu.cc new file mode 100644 index 000000000..e7b68508a --- /dev/null +++ b/src/infiniop/ops/hardswish/cpu/hardswish_cpu.cc @@ -0,0 +1,52 @@ +#include "hardswish_cpu.h" + +namespace op::hardswish::cpu { + +Descriptor::~Descriptor() = default; + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t out_desc, + std::vector input_desc_vec) { + + auto handle = reinterpret_cast(handle_); + auto dtype = out_desc->dtype(); + + const auto &input_desc = input_desc_vec.at(0); + const auto &output_shape = out_desc->shape(); + const auto &input_shape = input_desc->shape(); + + CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_F32, INFINI_DTYPE_F64, INFINI_DTYPE_BF16); + + CHECK_SAME_SHAPE(output_shape, input_shape); + + // create CPU elementwise descriptor + CREATE_ELEMENTWISE_CPU_DESCRIPTOR(handle, dtype, out_desc, input_desc_vec); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + std::vector inputs, + void *stream) const { + + switch (_dtype) { + case INFINI_DTYPE_F16: + return _device_info->calculate(_info, output, inputs, stream); + case INFINI_DTYPE_F32: + return _device_info->calculate(_info, output, inputs, stream); + case INFINI_DTYPE_F64: + return _device_info->calculate(_info, output, inputs, stream); + case INFINI_DTYPE_BF16: + return _device_info->calculate(_info, output, inputs, stream); + default: + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + return INFINI_STATUS_SUCCESS; +} +} // namespace op::hardswish::cpu diff --git a/src/infiniop/ops/hardswish/cpu/hardswish_cpu.h b/src/infiniop/ops/hardswish/cpu/hardswish_cpu.h new file mode 100644 index 000000000..e137be8a0 --- /dev/null +++ b/src/infiniop/ops/hardswish/cpu/hardswish_cpu.h @@ -0,0 +1,30 @@ +#ifndef __HARDSWISH_CPU_H__ +#define __HARDSWISH_CPU_H__ + +#include "../../../elementwise/cpu/elementwise_cpu.h" +#include + +ELEMENTWISE_DESCRIPTOR(hardswish, cpu) + +namespace op::hardswish::cpu { +typedef struct HardswishOp { +public: + static constexpr size_t num_inputs = 1; + + template + T operator()(const T &input) const { + if constexpr (std::is_integral_v) { + return static_cast(0); + } else { + // x * clamp(x + 3, 0, 6) / 6 + auto x = static_cast(input); + double y = x + 3.0; + y = std::min(std::max(y, 0.0), 6.0); + double out = x * (y / 6.0); + return static_cast(out); + } + } +} HardswishOp; +} // namespace op::hardswish::cpu + +#endif // __HARDSWISH_CPU_H__ diff --git a/src/infiniop/ops/hardswish/cuda/kernel.cuh b/src/infiniop/ops/hardswish/cuda/kernel.cuh new file mode 100644 index 000000000..d5b369bce --- /dev/null +++ b/src/infiniop/ops/hardswish/cuda/kernel.cuh @@ -0,0 +1,55 @@ +#ifndef __HARDSWISH_CUDA_H__ +#define __HARDSWISH_CUDA_H__ + +#include +#include +#include + +namespace op::hardswish::cuda { + +typedef struct HardswishOp { + static constexpr size_t num_inputs = 1; + + // Hardswish: f(x) = x * clamp(x + 3, 0, 6) / 6 + __device__ __forceinline__ float hswish_f32(float x) const { + float y = x + 3.0f; + y = y < 0.0f ? 0.0f : (y > 6.0f ? 6.0f : y); + return x * (y * (1.0f / 6.0f)); + } + + template + __device__ __forceinline__ T operator()(const T &input) const { + if constexpr (std::is_same_v) { + float2 vf = __half22float2(input); + float2 vr = make_float2( + hswish_f32(vf.x), + hswish_f32(vf.y)); + return __float22half2_rn(vr); + } else if constexpr (std::is_same_v) { + float xf = __half2float(input); + float yf = hswish_f32(xf); + return __float2half_rn(yf); + } else if constexpr (std::is_same_v) { + float f0 = __bfloat162float(__low2bfloat16(input)); + float f1 = __bfloat162float(__high2bfloat16(input)); + return __floats2bfloat162_rn(hswish_f32(f0), hswish_f32(f1)); + } else if constexpr (std::is_same_v) { + float xf = __bfloat162float(input); + return __float2bfloat16_rz(hswish_f32(xf)); + } else if constexpr (std::is_same_v) { + return hswish_f32(input); + } else if constexpr (std::is_same_v) { + double xd = static_cast(input); + double yd = xd * (std::fmin(std::fmax(xd + 3.0, 0.0), 6.0) / 6.0); + return static_cast(yd); + } else { + double xd = static_cast(input); + double yd = xd * (std::fmin(std::fmax(xd + 3.0, 0.0), 6.0) / 6.0); + return static_cast(yd); + } + } +} HardswishOp; + +} // namespace op::hardswish::cuda + +#endif // __HARDSWISH_CUDA_H__ diff --git a/src/infiniop/ops/hardswish/nvidia/hardswish_nvidia.cu b/src/infiniop/ops/hardswish/nvidia/hardswish_nvidia.cu new file mode 100644 index 000000000..9e279c2ef --- /dev/null +++ b/src/infiniop/ops/hardswish/nvidia/hardswish_nvidia.cu @@ -0,0 +1,59 @@ +#include "../../../elementwise/nvidia/elementwise_nvidia.cuh" + +#include "../cuda/kernel.cuh" +#include "hardswish_nvidia.cuh" + +namespace op::hardswish::nvidia { + +Descriptor::~Descriptor() = default; + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t out_desc, + std::vector input_desc_vec) { + + auto handle = reinterpret_cast(handle_); + auto dtype = out_desc->dtype(); + + const auto &input_desc = input_desc_vec.at(0); + const auto &output_shape = out_desc->shape(); + const auto &input_shape = input_desc->shape(); + + CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_F32, INFINI_DTYPE_F64, INFINI_DTYPE_BF16); + + CHECK_SAME_SHAPE(output_shape, input_shape); + + // create CUDA elementwise descriptor + CREATE_ELEMENTWISE_CUDA_DESCRIPTOR(handle, dtype, out_desc, input_desc_vec) + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + std::vector inputs, + void *stream) const { + + if (workspace_size < _workspace_size) { + return INFINI_STATUS_INSUFFICIENT_WORKSPACE; + } + + switch (_dtype) { + case INFINI_DTYPE_F16: + return _device_info->calculate<256, cuda::HardswishOp, half>(_info, workspace, output, inputs, stream); + case INFINI_DTYPE_BF16: + return _device_info->calculate<256, cuda::HardswishOp, cuda_bfloat16>(_info, workspace, output, inputs, stream); + case INFINI_DTYPE_F32: + return _device_info->calculate<256, cuda::HardswishOp, float>(_info, workspace, output, inputs, stream); + case INFINI_DTYPE_F64: + return _device_info->calculate<256, cuda::HardswishOp, double>(_info, workspace, output, inputs, stream); + default: + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + return INFINI_STATUS_SUCCESS; +} +} // namespace op::hardswish::nvidia diff --git a/src/infiniop/ops/hardswish/nvidia/hardswish_nvidia.cuh b/src/infiniop/ops/hardswish/nvidia/hardswish_nvidia.cuh new file mode 100644 index 000000000..f869ad52f --- /dev/null +++ b/src/infiniop/ops/hardswish/nvidia/hardswish_nvidia.cuh @@ -0,0 +1,8 @@ +#ifndef __HARDSWISH_CUDA_API_H__ +#define __HARDSWISH_CUDA_API_H__ + +#include "../../../elementwise/nvidia/elementwise_nvidia_api.cuh" + +ELEMENTWISE_DESCRIPTOR(hardswish, nvidia) + +#endif // __HARDSWISH_CUDA_API_H__ diff --git a/src/infiniop/ops/hardswish/operator.cc b/src/infiniop/ops/hardswish/operator.cc new file mode 100644 index 000000000..c51b18777 --- /dev/null +++ b/src/infiniop/ops/hardswish/operator.cc @@ -0,0 +1,139 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/hardswish.h" + +#ifdef ENABLE_CPU_API +#include "cpu/hardswish_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/hardswish_nvidia.cuh" +#endif + +__C infiniStatus_t infiniopCreateHardswishDescriptor( + infiniopHandle_t handle, + infiniopHardswishDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc) { + +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::hardswish::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + output_desc, \ + {input_desc}) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetHardswishWorkspaceSize(infiniopHardswishDescriptor_t desc, size_t *size) { + +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET + + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; +} + +__C infiniStatus_t infiniopHardswish( + infiniopHardswishDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc) \ + ->calculate(workspace, workspace_size, output, {input}, stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyHardswishDescriptor(infiniopHardswishDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/index_copy_inplace/cpu/index_copy_inplace_cpu.cc b/src/infiniop/ops/index_copy_inplace/cpu/index_copy_inplace_cpu.cc new file mode 100644 index 000000000..68015ba6b --- /dev/null +++ b/src/infiniop/ops/index_copy_inplace/cpu/index_copy_inplace_cpu.cc @@ -0,0 +1,93 @@ +#include "index_copy_inplace_cpu.h" +#include "../../../devices/cpu/common_cpu.h" +#include "../../../reduce/cpu/reduce.h" +#include "../../rearrange/cpu/rearrange_cpu.h" +#include "../info.h" + +namespace op::index_copy_inplace::cpu { + +Descriptor::~Descriptor() = default; + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + auto handle = reinterpret_cast(handle_); + + // --------------------- start: check data type and calculate workspace size ---------------------- + auto dtype = output_desc->dtype(); + + auto result = IndexCopyInplaceInfo::createIndexCopyInplaceInfo( + output_desc, + input_desc, + index_desc, + dim); + CHECK_RESULT(result); + const IndexCopyInplaceInfo &info = result.take(); + size_t WorkSpaceSize = (info.total_input_size + info.total_output_size) * infiniSizeOf(dtype); + // ---------------------- end: check data type and calculate workspace size ----------------------- + InfiniopTensorDescriptor *rearrange_in_desc = new InfiniopTensorDescriptor( + dtype, input_desc->ndim(), input_desc->shape().data(), info.meta_strides.data()); + InfiniopTensorDescriptor *rearrange_out_desc = new InfiniopTensorDescriptor( + dtype, input_desc->ndim(), output_desc->shape().data(), info.meta_strides.data()); + + void *in_rearrange_descriptor = nullptr; + void *out_rearrange_descriptor = nullptr; + + op::rearrange::cpu::Descriptor::create( + handle_, reinterpret_cast(&in_rearrange_descriptor), + rearrange_in_desc, input_desc); + op::rearrange::cpu::Descriptor::create( + handle_, reinterpret_cast(&out_rearrange_descriptor), + output_desc, rearrange_out_desc); + + *desc_ptr = new Descriptor( + dtype, std::move(info), WorkSpaceSize, + nullptr, + handle->device, handle->device_id, + in_rearrange_descriptor, + out_rearrange_descriptor); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + const void *input, + const void *index, + void *stream) const { + size_t size_of_dtype = infiniSizeOf(_info.dtype); + auto index_ptr = reinterpret_cast(index); + + char *workspace_in = reinterpret_cast(workspace); + char *workspace_out = workspace_in + size_of_dtype * _info.total_input_size; + + reinterpret_cast(_rearrange_desc_in)->calculate(workspace_in, input, stream); + memset(workspace_out, 0, _info.total_output_size * size_of_dtype); + size_t copy_unit_size = _info.meta_strides[_info.dim] * size_of_dtype; +#pragma omp parallel for + for (int dst_index = 0; dst_index < static_cast(_info.output_shape[_info.dim]); dst_index++) { + size_t src_index = _info.index_shape[0] - 1; + while (true) { + if (*(index_ptr + src_index * _info.index_strides[0]) == static_cast(dst_index)) { + std::memcpy( + workspace_out + size_of_dtype * dst_index * _info.meta_strides[_info.dim], + workspace_in + size_of_dtype * src_index * _info.meta_strides[_info.dim], + copy_unit_size); + break; + } else if (src_index == 0) { + break; + } + src_index--; + } + } + reinterpret_cast(_rearrange_desc_out)->calculate(output, workspace_out, stream); + + return INFINI_STATUS_SUCCESS; +} +} // namespace op::index_copy_inplace::cpu diff --git a/src/infiniop/ops/index_copy_inplace/cpu/index_copy_inplace_cpu.h b/src/infiniop/ops/index_copy_inplace/cpu/index_copy_inplace_cpu.h new file mode 100644 index 000000000..49ba41f42 --- /dev/null +++ b/src/infiniop/ops/index_copy_inplace/cpu/index_copy_inplace_cpu.h @@ -0,0 +1,8 @@ +#ifndef __INDEX_COPY_INPLACE_CPU_H__ +#define __INDEX_COPY_INPLACE_CPU_H__ + +#include "../index_copy_inplace.h" + +INDEX_COPY_INPLACE_DESCRIPTOR(cpu) + +#endif // __INDEX_COPY_INPLACE_CPU_H__ diff --git a/src/infiniop/ops/index_copy_inplace/index_copy_inplace.h b/src/infiniop/ops/index_copy_inplace/index_copy_inplace.h new file mode 100644 index 000000000..80849ee2e --- /dev/null +++ b/src/infiniop/ops/index_copy_inplace/index_copy_inplace.h @@ -0,0 +1,53 @@ +#ifndef __INDEX_COPY_INPLACE_H__ +#define __INDEX_COPY_INPLACE_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" +#include "info.h" + +#define INDEX_COPY_INPLACE_DESCRIPTOR(NAMESPACE) \ + namespace op::index_copy_inplace::NAMESPACE { \ + class Descriptor final : public InfiniopDescriptor { \ + struct Opaque; \ + Opaque *_opaque; \ + IndexCopyInplaceInfo _info; \ + size_t _workspace_size; \ + void *_rearrange_desc_in; \ + void *_rearrange_desc_out; \ + Descriptor( \ + infiniDtype_t dtype, \ + IndexCopyInplaceInfo info, \ + size_t workspace_size_, \ + Opaque *opaque, \ + infiniDevice_t device_type, \ + int device_id, \ + void *rearrange_desc_in, \ + void *rearrange_desc_out) : InfiniopDescriptor{device_type, device_id}, \ + _opaque(opaque), \ + _info(info), \ + _workspace_size(workspace_size_), \ + _rearrange_desc_in(rearrange_desc_in), \ + _rearrange_desc_out(rearrange_desc_out) {} \ + \ + public: \ + ~Descriptor(); \ + size_t workspaceSize() const { return _workspace_size; } \ + static infiniStatus_t create( \ + infiniopHandle_t handle, \ + Descriptor **desc_ptr, \ + infiniopTensorDescriptor_t output_desc, \ + infiniopTensorDescriptor_t input_desc, \ + infiniopTensorDescriptor_t index_desc, \ + size_t dim); \ + infiniStatus_t calculate( \ + void *workspace, \ + size_t workspace_size, \ + void *output, \ + const void *input, \ + const void *index, \ + void *stream) const; \ + }; \ + } + +#endif \ No newline at end of file diff --git a/src/infiniop/ops/index_copy_inplace/info.h b/src/infiniop/ops/index_copy_inplace/info.h new file mode 100644 index 000000000..d7ad41d6f --- /dev/null +++ b/src/infiniop/ops/index_copy_inplace/info.h @@ -0,0 +1,74 @@ +#ifndef __INDEX_COPY_INPLACE_INFO_H__ +#define __INDEX_COPY_INPLACE_INFO_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" + +namespace op::index_copy_inplace { + +class IndexCopyInplaceInfo { +private: + IndexCopyInplaceInfo() = default; + +public: + // ---------------------------- start: define member variables of Info ---------------------------- + infiniDtype_t dtype; + size_t total_input_size; + size_t total_output_size; + std::vector output_shape; + std::vector input_shape; + std::vector index_shape; + std::vector output_strides; + std::vector input_strides; + std::vector index_strides; + std::vector meta_strides; + size_t dim; + + // ----------------------------- end: define member variables of Info ----------------------------- + + static utils::Result createIndexCopyInplaceInfo( + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + // ------------------------- start: check tensor shape and input validity ------------------------- + CHECK_OR_RETURN(output_desc->ndim() == input_desc->ndim(), INFINI_STATUS_BAD_TENSOR_STRIDES); + std::vector meta_strides(input_desc->ndim()); + ptrdiff_t last_dim = 1; + ptrdiff_t last_stride = 1; + size_t total_input_size = 1; + size_t total_output_size = 1; + for (size_t d = 0; d < input_desc->ndim(); d++) { + total_input_size *= input_desc->dim(d); + total_output_size *= output_desc->dim(d); + if (d == dim) { + continue; + } else { + meta_strides[d] = last_dim * last_stride; + last_dim = input_desc->dim(d); + last_stride = meta_strides[d]; + } + } + meta_strides[dim] = last_dim * last_stride; + // -------------------------- end: check tensor shape and input validity -------------------------- + return utils::Result(IndexCopyInplaceInfo{ + // ------------------------------ start: create an instance of Info ------------------------------- + output_desc->dtype(), + total_input_size, + total_output_size, + output_desc->shape(), + input_desc->shape(), + index_desc->shape(), + output_desc->strides(), + input_desc->strides(), + index_desc->strides(), + meta_strides, + dim + // ------------------------------- end: create an instance of Info -------------------------------- + }); + } +}; +} // namespace op::index_copy_inplace + +#endif // __INDEX_COPY_INPLACE_INFO_H__ diff --git a/src/infiniop/ops/index_copy_inplace/nvidia/index_copy_inplace_nvidia.cu b/src/infiniop/ops/index_copy_inplace/nvidia/index_copy_inplace_nvidia.cu new file mode 100644 index 000000000..70772fe67 --- /dev/null +++ b/src/infiniop/ops/index_copy_inplace/nvidia/index_copy_inplace_nvidia.cu @@ -0,0 +1,127 @@ +#include "../../../devices/nvidia/nvidia_common.cuh" +#include "../../../devices/nvidia/nvidia_handle.cuh" +#include "../../../devices/nvidia/nvidia_kernel_common.cuh" +#include "../../rearrange/nvidia/rearrange_nvidia.cuh" +#include "../info.h" +#include "index_copy_inplace_nvidia.cuh" + +namespace op::index_copy_inplace::nvidia { + +infiniStatus_t calculate_index_copy_inplace( + char *output, + const char *input, + const int64_t *index, + size_t copy_unit_size, + size_t output_len, + size_t index_len, + ptrdiff_t index_stride, + cudaStream_t stream) { + int64_t *dst_index = new int64_t; + size_t sizeof_int64_t = sizeof(int64_t); + for (size_t src_index = 0; src_index < index_len; src_index++) { + CHECK_CUDA(cudaMemcpyAsync( + dst_index, + index + src_index * index_stride, + sizeof_int64_t, + cudaMemcpyDeviceToHost, + stream)); + cudaStreamSynchronize(stream); + CHECK_CUDA(cudaMemcpyAsync( + output + (size_t)(*dst_index) * copy_unit_size, + input + src_index * copy_unit_size, + copy_unit_size, + cudaMemcpyDeviceToDevice, + stream)); + cudaStreamSynchronize(stream); + } + delete dst_index; + return INFINI_STATUS_SUCCESS; +} + +struct Descriptor::Opaque { + std::shared_ptr internal; +}; + +Descriptor::~Descriptor() { + delete reinterpret_cast(_rearrange_desc_in); + delete reinterpret_cast(_rearrange_desc_out); + delete _opaque; +} + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + auto handle = reinterpret_cast(handle_); + // --------------------- start: check data type and calculate workspace size ---------------------- + auto dtype = output_desc->dtype(); + // ---------------------- end: check data type and calculate workspace size ----------------------- + auto result = IndexCopyInplaceInfo::createIndexCopyInplaceInfo( + output_desc, + input_desc, + index_desc, + dim); + CHECK_RESULT(result); + const IndexCopyInplaceInfo &info = result.take(); + size_t WorkSpaceSize = (info.total_input_size + info.total_output_size) * infiniSizeOf(dtype); + + InfiniopTensorDescriptor *rearrange_in_desc = new InfiniopTensorDescriptor( + dtype, input_desc->ndim(), input_desc->shape().data(), info.meta_strides.data()); + InfiniopTensorDescriptor *rearrange_out_desc = new InfiniopTensorDescriptor( + dtype, input_desc->ndim(), output_desc->shape().data(), info.meta_strides.data()); + + void *in_rearrange_descriptor = nullptr; + void *out_rearrange_descriptor = nullptr; + + op::rearrange::nvidia::Descriptor::create( + handle_, reinterpret_cast(&in_rearrange_descriptor), + rearrange_in_desc, input_desc); + op::rearrange::nvidia::Descriptor::create( + handle_, reinterpret_cast(&out_rearrange_descriptor), + output_desc, rearrange_out_desc); + + *desc_ptr = new Descriptor( + dtype, std::move(info), WorkSpaceSize, + new Opaque{handle->internal()}, + handle->device, handle->device_id, + in_rearrange_descriptor, + out_rearrange_descriptor); + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + const void *input, + const void *index, + void *stream_) const { + if (workspace_size < _workspace_size) { + return INFINI_STATUS_INSUFFICIENT_WORKSPACE; + } + cudaStream_t stream = (cudaStream_t)stream_; + + size_t elem_size = infiniSizeOf(_info.dtype); + char *workspace_in = reinterpret_cast(workspace); + char *workspace_out = workspace_in + elem_size * _info.total_input_size; + CHECK_STATUS(reinterpret_cast(_rearrange_desc_in)->calculate(workspace_in, input, stream)); + cudaMemsetAsync(workspace_out, 0, _info.total_output_size * elem_size, stream); + cudaDeviceSynchronize(); + CHECK_STATUS(calculate_index_copy_inplace( + reinterpret_cast(workspace_out), + reinterpret_cast(workspace_in), + reinterpret_cast(index), + elem_size * _info.meta_strides[_info.dim], + _info.output_shape[_info.dim], + _info.index_shape[0], + _info.index_strides[0], + stream)); + cudaDeviceSynchronize(); + + CHECK_STATUS(reinterpret_cast(_rearrange_desc_out)->calculate(output, workspace_out, stream)); + return INFINI_STATUS_SUCCESS; +} +} // namespace op::index_copy_inplace::nvidia diff --git a/src/infiniop/ops/index_copy_inplace/nvidia/index_copy_inplace_nvidia.cuh b/src/infiniop/ops/index_copy_inplace/nvidia/index_copy_inplace_nvidia.cuh new file mode 100644 index 000000000..04c3c86f7 --- /dev/null +++ b/src/infiniop/ops/index_copy_inplace/nvidia/index_copy_inplace_nvidia.cuh @@ -0,0 +1,7 @@ +#ifndef __INDEX_COPY_INPLACE_NVIDIA_API_H__ +#define __INDEX_COPY_INPLACE_NVIDIA_API_H__ +#include "../index_copy_inplace.h" + +INDEX_COPY_INPLACE_DESCRIPTOR(nvidia) + +#endif // __INDEX_COPY_INPLACE_NVIDIA_API_H__ diff --git a/src/infiniop/ops/index_copy_inplace/operator.cc b/src/infiniop/ops/index_copy_inplace/operator.cc new file mode 100644 index 000000000..00e6bca3a --- /dev/null +++ b/src/infiniop/ops/index_copy_inplace/operator.cc @@ -0,0 +1,144 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/index_copy_inplace.h" + +#ifdef ENABLE_CPU_API +#include "cpu/index_copy_inplace_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/index_copy_inplace_nvidia.cuh" +#endif + +__C infiniStatus_t infiniopCreateIndexCopyInplaceDescriptor( + infiniopHandle_t handle, + infiniopIndexCopyInplaceDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::index_copy_inplace::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + output_desc, \ + input_desc, \ + index_desc, \ + dim) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetIndexCopyInplaceWorkspaceSize(infiniopIndexCopyInplaceDescriptor_t desc, size_t *size) { + +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET + + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; +} + +__C infiniStatus_t infiniopIndexCopyInplace( + infiniopIndexCopyInplaceDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + const void *index, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc) \ + ->calculate(workspace, workspace_size, output, input, index, stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyIndexCopyInplaceDescriptor(infiniopIndexCopyInplaceDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/interpolate_nearest/cpu/interpolate_nearest_cpu.cc b/src/infiniop/ops/interpolate_nearest/cpu/interpolate_nearest_cpu.cc new file mode 100644 index 000000000..508dcecc6 --- /dev/null +++ b/src/infiniop/ops/interpolate_nearest/cpu/interpolate_nearest_cpu.cc @@ -0,0 +1,284 @@ +#include "interpolate_nearest_cpu.h" +#include "../../../devices/cpu/common_cpu.h" +#include "../../../devices/cpu/cpu_handle.h" +#include "../info.h" +#include +#include +#include + +namespace op::interpolate_nearest::cpu { + +struct Descriptor::Opaque { + device::cpu::Handle *handle; + InterpolateNearestInfo info; + size_t workspace_size = 0; + +private: + Opaque(device::cpu::Handle *handle_ptr, const InterpolateNearestInfo &interpolate_info) + : handle(handle_ptr), info(interpolate_info) { + workspace_size = 0; + } + + template + size_t compute_input_index_1d(size_t idx) const { + size_t temp = idx; + + // 1D插值:3D张量 (N, C, W) + size_t w = temp % info.output_size[0]; + temp /= info.output_size[0]; + size_t c = temp % info.channels; + size_t b = temp / info.channels; + + float inv_scale = static_cast(info.input_size[0]) / static_cast(info.output_size[0]); + size_t input_w = std::min(static_cast(std::floor(static_cast(w) * inv_scale)), + info.input_size[0] - 1); + + return b * info.input_stride[0] + c * info.input_stride[1] + input_w * info.input_stride[2]; + } + + // 计算2D插值的输入索引 + template + size_t compute_input_index_2d(size_t idx) const { + size_t temp = idx; + + // 2D插值:4D张量 (N, C, H, W) + size_t w = temp % info.output_size[1]; // width在索引1 + temp /= info.output_size[1]; + size_t h = temp % info.output_size[0]; // height在索引0 + temp /= info.output_size[0]; + size_t c = temp % info.channels; + size_t b = temp / info.channels; + + float inv_scale_h = static_cast(info.input_size[0]) / static_cast(info.output_size[0]); + float inv_scale_w = static_cast(info.input_size[1]) / static_cast(info.output_size[1]); + + size_t input_h = std::min(static_cast(std::floor(static_cast(h) * inv_scale_h)), + info.input_size[0] - 1); + size_t input_w = std::min(static_cast(std::floor(static_cast(w) * inv_scale_w)), + info.input_size[1] - 1); + + return b * info.input_stride[0] + c * info.input_stride[1] + input_h * info.input_stride[2] + input_w * info.input_stride[3]; + } + + // 计算3D插值的输入索引 + template + size_t compute_input_index_3d(size_t idx) const { + size_t temp = idx; + + // 3D插值:5D张量 (N, C, D, H, W) + size_t w = temp % info.output_size[2]; // width在索引2 + temp /= info.output_size[2]; + size_t h = temp % info.output_size[1]; // height在索引1 + temp /= info.output_size[1]; + size_t d = temp % info.output_size[0]; // depth在索引0 + temp /= info.output_size[0]; + size_t c = temp % info.channels; + size_t b = temp / info.channels; + + float inv_scale_d = static_cast(info.input_size[0]) / static_cast(info.output_size[0]); + float inv_scale_h = static_cast(info.input_size[1]) / static_cast(info.output_size[1]); + float inv_scale_w = static_cast(info.input_size[2]) / static_cast(info.output_size[2]); + + size_t input_d = std::min(static_cast(std::floor(static_cast(d) * inv_scale_d)), + info.input_size[0] - 1); + size_t input_h = std::min(static_cast(std::floor(static_cast(h) * inv_scale_h)), + info.input_size[1] - 1); + size_t input_w = std::min(static_cast(std::floor(static_cast(w) * inv_scale_w)), + info.input_size[2] - 1); + + return b * info.input_stride[0] + c * info.input_stride[1] + input_d * info.input_stride[2] + input_h * info.input_stride[3] + input_w * info.input_stride[4]; + } + + // 计算输出索引 + template + size_t compute_output_index(size_t idx) const { + size_t temp = idx; + size_t w, h, d, c, b; + + switch (info.dim) { + case INTERPOLATE_1D: { + // 3D张量 (N, C, W) + w = temp % info.output_size[0]; + temp /= info.output_size[0]; + c = temp % info.channels; + b = temp / info.channels; + return b * info.output_stride[0] + c * info.output_stride[1] + w * info.output_stride[2]; + } + + case INTERPOLATE_2D: { + // 4D张量 (N, C, H, W) + w = temp % info.output_size[1]; + temp /= info.output_size[1]; + h = temp % info.output_size[0]; + temp /= info.output_size[0]; + c = temp % info.channels; + b = temp / info.channels; + return b * info.output_stride[0] + c * info.output_stride[1] + h * info.output_stride[2] + w * info.output_stride[3]; + } + + case INTERPOLATE_3D: { + // 5D张量 (N, C, D, H, W) + w = temp % info.output_size[2]; + temp /= info.output_size[2]; + h = temp % info.output_size[1]; + temp /= info.output_size[1]; + d = temp % info.output_size[0]; + temp /= info.output_size[0]; + c = temp % info.channels; + b = temp / info.channels; + return b * info.output_stride[0] + c * info.output_stride[1] + d * info.output_stride[2] + h * info.output_stride[3] + w * info.output_stride[4]; + } + + default: + return 0; + } + } + + // 计算总元素数 + size_t calculate_total_elements() const { + size_t total = info.batch_size * info.channels; + switch (info.dim) { + case INTERPOLATE_1D: + total *= info.output_size[0]; // width + break; + case INTERPOLATE_2D: + total *= info.output_size[0] * info.output_size[1]; // height * width + break; + case INTERPOLATE_3D: + total *= info.output_size[0] * info.output_size[1] * info.output_size[2]; // depth * height * width + break; + } + return total; + } + + // 主要的插值计算函数 + template + void interpolate_nearest_cpu(T *output, const T *input) const { + size_t total_elements = calculate_total_elements(); + +#pragma omp parallel for schedule(static) + for (ptrdiff_t idx = 0; idx < static_cast(total_elements); ++idx) { + size_t input_idx; + + switch (info.dim) { + case INTERPOLATE_1D: + input_idx = compute_input_index_1d(idx); + break; + case INTERPOLATE_2D: + input_idx = compute_input_index_2d(idx); + break; + case INTERPOLATE_3D: + input_idx = compute_input_index_3d(idx); + break; + default: + continue; + } + + size_t output_idx = compute_output_index(idx); + output[output_idx] = input[input_idx]; + } + } + +public: + Opaque(Opaque &&other) noexcept + : handle(other.handle), + info(std::move(other.info)), + workspace_size(other.workspace_size) { + other.handle = nullptr; + other.workspace_size = 0; + } + + ~Opaque() = default; + + static inline utils::Result + create(device::cpu::Handle *handle_ptr, + const InterpolateNearestInfo &info, + infiniDtype_t data_type) { + if (data_type != INFINI_DTYPE_F32 && data_type != INFINI_DTYPE_F16 && data_type != INFINI_DTYPE_BF16 && data_type != INFINI_DTYPE_I8) { + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + Opaque opaque(handle_ptr, info); + return utils::Result(std::move(opaque)); + } + + infiniStatus_t calculate(void *workspace, size_t workspace_size, + void *output, const void *input, infiniDtype_t dtype) const { + + if (!output || !input) { + return INFINI_STATUS_BAD_PARAM; + } + + switch (dtype) { + case INFINI_DTYPE_F32: { + float *typed_output = static_cast(output); + const float *typed_input = static_cast(input); + interpolate_nearest_cpu(typed_output, typed_input); + break; + } + + case INFINI_DTYPE_F16: { + fp16_t *typed_output = static_cast(output); + const fp16_t *typed_input = static_cast(input); + interpolate_nearest_cpu(typed_output, typed_input); + break; + } + + case INFINI_DTYPE_BF16: { + bf16_t *typed_output = static_cast(output); + const bf16_t *typed_input = static_cast(input); + interpolate_nearest_cpu(typed_output, typed_input); + break; + } + + case INFINI_DTYPE_I8: { + int8_t *typed_output = static_cast(output); + const int8_t *typed_input = static_cast(input); + interpolate_nearest_cpu(typed_output, typed_input); + break; + } + + default: + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + return INFINI_STATUS_SUCCESS; + } +}; + +Descriptor::~Descriptor() { + if (_opaque) { + delete _opaque; + } +} + +infiniStatus_t Descriptor::create(infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc) { + auto handle = reinterpret_cast(handle_); + auto dtype = output_desc->dtype(); + + // 检查数据类型支持 + CHECK_DTYPE(dtype, INFINI_DTYPE_F32, INFINI_DTYPE_F16, INFINI_DTYPE_BF16, INFINI_DTYPE_I8); + + InterpolateNearestInfo info; + CHECK_STATUS(InterpolateNearestInfo::create(&info, output_desc, input_desc)); + + auto opaque_result = Opaque::create(handle, info, dtype); + CHECK_RESULT(opaque_result); + auto opaque = new Opaque(opaque_result.take()); + + *desc_ptr = new Descriptor(dtype, info, opaque->workspace_size, opaque, + handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate(void *workspace, size_t workspace_size, + void *output, const void *input, + void *stream) const { + return _opaque->calculate(workspace, workspace_size, output, input, _dtype); +} + +} // namespace op::interpolate_nearest::cpu diff --git a/src/infiniop/ops/interpolate_nearest/cpu/interpolate_nearest_cpu.h b/src/infiniop/ops/interpolate_nearest/cpu/interpolate_nearest_cpu.h new file mode 100644 index 000000000..78dd3ff97 --- /dev/null +++ b/src/infiniop/ops/interpolate_nearest/cpu/interpolate_nearest_cpu.h @@ -0,0 +1,8 @@ +#ifndef __INTERPOLATE_NEAREST_CPU_H__ +#define __INTERPOLATE_NEAREST_CPU_H__ + +#include "../interpolate_nearest.h" + +DESCRIPTOR(cpu) + +#endif // __INTERPOLATE_NEAREST_CPU_H__ diff --git a/src/infiniop/ops/interpolate_nearest/cuda/kernel.cuh b/src/infiniop/ops/interpolate_nearest/cuda/kernel.cuh new file mode 100644 index 000000000..60c798792 --- /dev/null +++ b/src/infiniop/ops/interpolate_nearest/cuda/kernel.cuh @@ -0,0 +1,168 @@ +#ifndef INTERPOLATE_NEAREST_KERNEL_CUH +#define INTERPOLATE_NEAREST_KERNEL_CUH + +#include "../info.h" +#include + +template +__device__ inline size_t +compute_input_index_1d(size_t idx, const InterpolateNearestInfo &info) { + size_t temp = idx; + + // 1D 插值:3D 张量 (N, C, W) + size_t w = temp % info.output_size[0]; // width 在索引 0 + temp /= info.output_size[0]; + size_t c = temp % info.channels; + size_t b = temp / info.channels; + + float inv_scale = static_cast(info.input_size[0]) / static_cast(info.output_size[0]); + size_t input_w = min(static_cast(floorf(static_cast(w) * inv_scale)), + info.input_size[0] - 1); + + return b * info.input_stride[0] + c * info.input_stride[1] + input_w * info.input_stride[2]; +} + +template +__device__ inline size_t +compute_input_index_2d(size_t idx, const InterpolateNearestInfo &info) { + size_t temp = idx; + + // 2D 插值:4D 张量 (N, C, H, W) + size_t w = temp % info.output_size[1]; // width 在索引 1 + temp /= info.output_size[1]; + size_t h = temp % info.output_size[0]; // height 在索引 0 + temp /= info.output_size[0]; + size_t c = temp % info.channels; + size_t b = temp / info.channels; + + float inv_scale_h = static_cast(info.input_size[0]) / static_cast(info.output_size[0]); + float inv_scale_w = static_cast(info.input_size[1]) / static_cast(info.output_size[1]); + + size_t input_h = min(static_cast(floorf(static_cast(h) * inv_scale_h)), + info.input_size[0] - 1); + size_t input_w = min(static_cast(floorf(static_cast(w) * inv_scale_w)), + info.input_size[1] - 1); + + return b * info.input_stride[0] + c * info.input_stride[1] + input_h * info.input_stride[2] + input_w * info.input_stride[3]; +} + +template +__device__ inline size_t +compute_input_index_3d(size_t idx, const InterpolateNearestInfo &info) { + size_t temp = idx; + + // 3D 插值:5D 张量 (N, C, D, H, W) + size_t w = temp % info.output_size[2]; // width 在索引 2 + temp /= info.output_size[2]; + size_t h = temp % info.output_size[1]; // height 在索引 1 + temp /= info.output_size[1]; + size_t d = temp % info.output_size[0]; // depth 在索引 0 + temp /= info.output_size[0]; + size_t c = temp % info.channels; + size_t b = temp / info.channels; + + float inv_scale_d = static_cast(info.input_size[0]) / static_cast(info.output_size[0]); + float inv_scale_h = static_cast(info.input_size[1]) / static_cast(info.output_size[1]); + float inv_scale_w = static_cast(info.input_size[2]) / static_cast(info.output_size[2]); + + size_t input_d = min(static_cast(floorf(static_cast(d) * inv_scale_d)), + info.input_size[0] - 1); + size_t input_h = min(static_cast(floorf(static_cast(h) * inv_scale_h)), + info.input_size[1] - 1); + size_t input_w = min(static_cast(floorf(static_cast(w) * inv_scale_w)), + info.input_size[2] - 1); + + return b * info.input_stride[0] + c * info.input_stride[1] + input_d * info.input_stride[2] + input_h * info.input_stride[3] + input_w * info.input_stride[4]; +} + +template +__device__ inline size_t +compute_output_index(size_t idx, const InterpolateNearestInfo &info) { + size_t temp = idx; + size_t w, h, d, c, b; + + switch (info.dim) { + case INTERPOLATE_1D: { + // 3D 张量 (N, C, W) + w = temp % info.output_size[0]; + temp /= info.output_size[0]; + c = temp % info.channels; + b = temp / info.channels; + return b * info.output_stride[0] + c * info.output_stride[1] + w * info.output_stride[2]; + } + + case INTERPOLATE_2D: { + // 4D 张量 (N, C, H, W) + w = temp % info.output_size[1]; + temp /= info.output_size[1]; + h = temp % info.output_size[0]; + temp /= info.output_size[0]; + c = temp % info.channels; + b = temp / info.channels; + return b * info.output_stride[0] + c * info.output_stride[1] + h * info.output_stride[2] + w * info.output_stride[3]; + } + + case INTERPOLATE_3D: { + // 5D 张量 (N, C, D, H, W) + w = temp % info.output_size[2]; + temp /= info.output_size[2]; + h = temp % info.output_size[1]; + temp /= info.output_size[1]; + d = temp % info.output_size[0]; + temp /= info.output_size[0]; + c = temp % info.channels; + b = temp / info.channels; + return b * info.output_stride[0] + c * info.output_stride[1] + d * info.output_stride[2] + h * info.output_stride[3] + w * info.output_stride[4]; + } + + default: + return 0; + } +} + +__host__ __device__ inline size_t +calculate_total_elements(const InterpolateNearestInfo &info) { + size_t total = info.batch_size * info.channels; + switch (info.dim) { + case INTERPOLATE_1D: + total *= info.output_size[0]; // width + break; + case INTERPOLATE_2D: + total *= info.output_size[0] * info.output_size[1]; // height * width + break; + case INTERPOLATE_3D: + total *= info.output_size[0] * info.output_size[1] * info.output_size[2]; // depth * height * width + break; + } + return total; +} + +template +__global__ void interpolate_nearest_kernel(T *output, const T *input, + InterpolateNearestInfo info) { + size_t idx = blockIdx.x * blockDim.x + threadIdx.x; + size_t total_elements = calculate_total_elements(info); + + if (idx < total_elements) { + size_t input_idx; + + switch (info.dim) { + case INTERPOLATE_1D: + input_idx = compute_input_index_1d(idx, info); + break; + case INTERPOLATE_2D: + input_idx = compute_input_index_2d(idx, info); + break; + case INTERPOLATE_3D: + input_idx = compute_input_index_3d(idx, info); + break; + default: + return; + } + + size_t output_idx = compute_output_index(idx, info); + output[output_idx] = input[input_idx]; + } +} + +#endif // INTERPOLATE_NEAREST_KERNEL_CUH diff --git a/src/infiniop/ops/interpolate_nearest/info.h b/src/infiniop/ops/interpolate_nearest/info.h new file mode 100644 index 000000000..162d6eb02 --- /dev/null +++ b/src/infiniop/ops/interpolate_nearest/info.h @@ -0,0 +1,118 @@ +#ifndef __INTERPOLATE_NEAREST_INFO_H__ +#define __INTERPOLATE_NEAREST_INFO_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" +#include + +enum InterpolateDim { + INTERPOLATE_1D = 1, // 3D 张量 (N, C, W) + INTERPOLATE_2D = 2, // 4D 张量 (N, C, H, W) + INTERPOLATE_3D = 3 // 5D 张量 (N, C, D, H, W) +}; + +struct InterpolateNearestInfo { + size_t batch_size; + size_t channels; + + // 输入和输出的空间维度大小 + size_t input_size[3]; // [depth/height/width] 根据维度使用不同数量 + size_t output_size[3]; // [depth/height/width] 根据维度使用不同数量 + + InterpolateDim dim; // 插值维度:1D, 2D, 3D + infiniDtype_t dtype; + + // 张量步长(最多支持 5D 张量) + size_t input_stride[5]; + size_t output_stride[5]; + + static infiniStatus_t create( + InterpolateNearestInfo *info, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc) { + + // 检查数据类型 + if (input_desc->dtype() != output_desc->dtype()) { + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + auto input_shape = input_desc->shape(); + auto output_shape = output_desc->shape(); + auto input_stride = input_desc->strides(); + auto output_stride = output_desc->strides(); + + // 根据张量维度确定插值类型 + if (input_desc->ndim() == 3 && output_desc->ndim() == 3) { + // 1D 插值:3D 张量 (N, C, W) + info->dim = INTERPOLATE_1D; + info->batch_size = input_shape[0]; + info->channels = input_shape[1]; + info->input_size[0] = input_shape[2]; // width + info->output_size[0] = output_shape[2]; // width + + // 检查 N,C 维度匹配 + if (input_shape[0] != output_shape[0] || input_shape[1] != output_shape[1]) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + // 复制步长 + for (int i = 0; i < 3; ++i) { + info->input_stride[i] = input_stride[i]; + info->output_stride[i] = output_stride[i]; + } + + } else if (input_desc->ndim() == 4 && output_desc->ndim() == 4) { + // 2D 插值:4D 张量 (N, C, H, W) + info->dim = INTERPOLATE_2D; + info->batch_size = input_shape[0]; + info->channels = input_shape[1]; + info->input_size[0] = input_shape[2]; // height + info->input_size[1] = input_shape[3]; // width + info->output_size[0] = output_shape[2]; // height + info->output_size[1] = output_shape[3]; // width + + // 检查 N,C 维度匹配 + if (input_shape[0] != output_shape[0] || input_shape[1] != output_shape[1]) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + // 复制步长 + for (int i = 0; i < 4; ++i) { + info->input_stride[i] = input_stride[i]; + info->output_stride[i] = output_stride[i]; + } + + } else if (input_desc->ndim() == 5 && output_desc->ndim() == 5) { + // 3D 插值:5D 张量 (N, C, D, H, W) + info->dim = INTERPOLATE_3D; + info->batch_size = input_shape[0]; + info->channels = input_shape[1]; + info->input_size[0] = input_shape[2]; // depth + info->input_size[1] = input_shape[3]; // height + info->input_size[2] = input_shape[4]; // width + info->output_size[0] = output_shape[2]; // depth + info->output_size[1] = output_shape[3]; // height + info->output_size[2] = output_shape[4]; // width + + // 检查 N,C 维度匹配 + if (input_shape[0] != output_shape[0] || input_shape[1] != output_shape[1]) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + // 复制步长 + for (int i = 0; i < 5; ++i) { + info->input_stride[i] = input_stride[i]; + info->output_stride[i] = output_stride[i]; + } + + } else { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + info->dtype = input_desc->dtype(); + return INFINI_STATUS_SUCCESS; + } +}; + +#endif // __INTERPOLATE_NEAREST_INFO_H__ diff --git a/src/infiniop/ops/interpolate_nearest/interpolate_nearest.h b/src/infiniop/ops/interpolate_nearest/interpolate_nearest.h new file mode 100644 index 000000000..73499c2ff --- /dev/null +++ b/src/infiniop/ops/interpolate_nearest/interpolate_nearest.h @@ -0,0 +1,51 @@ +#ifndef __INTERPOLATE_NEAREST_H__ +#define __INTERPOLATE_NEAREST_H__ + +#include "../../operator.h" +#include "info.h" + +#define DESCRIPTOR(NAMESPACE) \ + \ + namespace op::interpolate_nearest::NAMESPACE { \ + class Descriptor final : public InfiniopDescriptor { \ + struct Opaque; \ + Opaque *_opaque; \ + \ + InterpolateNearestInfo _info; \ + infiniDtype_t _dtype; \ + size_t _workspace_size; \ + \ + Descriptor( \ + infiniDtype_t dtype, \ + InterpolateNearestInfo info, \ + size_t workspace_size, \ + Opaque *opaque, \ + infiniDevice_t device_type, \ + int device_id) \ + : InfiniopDescriptor{device_type, device_id}, \ + _opaque(opaque), \ + _info(info), \ + _dtype(dtype), \ + _workspace_size(workspace_size) {} \ + \ + public: \ + ~Descriptor(); \ + \ + static infiniStatus_t create( \ + infiniopHandle_t handle, \ + Descriptor **desc_ptr, \ + infiniopTensorDescriptor_t output_desc, \ + infiniopTensorDescriptor_t input_desc); \ + \ + size_t workspaceSize() const { return _workspace_size; } \ + \ + infiniStatus_t calculate( \ + void *workspace, \ + size_t workspace_size, \ + void *output, \ + const void *input, \ + void *stream) const; \ + }; \ + } + +#endif // __INTERPOLATE_NEAREST_H__ diff --git a/src/infiniop/ops/interpolate_nearest/nvidia/interpolate_nearest_nvidia.cu b/src/infiniop/ops/interpolate_nearest/nvidia/interpolate_nearest_nvidia.cu new file mode 100644 index 000000000..a7b63c6f4 --- /dev/null +++ b/src/infiniop/ops/interpolate_nearest/nvidia/interpolate_nearest_nvidia.cu @@ -0,0 +1,93 @@ +#include "../../../devices/nvidia/nvidia_common.cuh" +#include "../../../devices/nvidia/nvidia_kernel_common.cuh" +#include "../cuda/kernel.cuh" +#include "interpolate_nearest_nvidia.cuh" +#include +#include +#include + +namespace op::interpolate_nearest::nvidia { + +struct Descriptor::Opaque { + std::shared_ptr internal; + + Opaque(std::shared_ptr internal_) + : internal(internal_) {} +}; + +Descriptor::~Descriptor() { delete _opaque; } + +infiniStatus_t Descriptor::create(infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc) { + + auto handle = reinterpret_cast(handle_); + auto dtype = output_desc->dtype(); + + // Check supported data types + if (dtype != INFINI_DTYPE_F16 && dtype != INFINI_DTYPE_F32 && dtype != INFINI_DTYPE_BF16 && dtype != INFINI_DTYPE_I8) { + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + InterpolateNearestInfo info; + CHECK_STATUS(InterpolateNearestInfo::create(&info, output_desc, input_desc)); + + *desc_ptr = new Descriptor(dtype, info, 0, new Opaque{handle->internal()}, + handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate(void *workspace, size_t workspace_size, + void *output, const void *input, + void *stream) const { + + auto cuda_stream = reinterpret_cast(stream); + + size_t total_elements = calculate_total_elements(_info); + + int block_size = 256; + int grid_size = (total_elements + block_size - 1) / block_size; + + switch (_dtype) { + case INFINI_DTYPE_F32: { + float *typed_output = reinterpret_cast(output); + const float *typed_input = reinterpret_cast(input); + interpolate_nearest_kernel + <<>>(typed_output, typed_input, + _info); + } break; + + case INFINI_DTYPE_F16: { + half *typed_output = reinterpret_cast(output); + const half *typed_input = reinterpret_cast(input); + interpolate_nearest_kernel<<>>( + typed_output, typed_input, _info); + } break; + + case INFINI_DTYPE_BF16: { + auto typed_output = reinterpret_cast<__nv_bfloat16 *>(output); + auto typed_input = reinterpret_cast(input); + interpolate_nearest_kernel<__nv_bfloat16> + <<>>(typed_output, typed_input, + _info); + } break; + + case INFINI_DTYPE_I8: { + auto typed_output = reinterpret_cast(output); + auto typed_input = reinterpret_cast(input); + interpolate_nearest_kernel + <<>>(typed_output, typed_input, + _info); + } break; + default: + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + CHECK_CUDA(cudaGetLastError()); + CHECK_CUDA(cudaStreamSynchronize(cuda_stream)); + return INFINI_STATUS_SUCCESS; +} + +} // namespace op::interpolate_nearest::nvidia diff --git a/src/infiniop/ops/interpolate_nearest/nvidia/interpolate_nearest_nvidia.cuh b/src/infiniop/ops/interpolate_nearest/nvidia/interpolate_nearest_nvidia.cuh new file mode 100644 index 000000000..aab5f7882 --- /dev/null +++ b/src/infiniop/ops/interpolate_nearest/nvidia/interpolate_nearest_nvidia.cuh @@ -0,0 +1,9 @@ +#ifndef __INTERPOLATE_NEAREST_NVIDIA_CUH__ +#define __INTERPOLATE_NEAREST_NVIDIA_CUH__ + +#include "../../../devices/nvidia/nvidia_handle.h" +#include "../interpolate_nearest.h" + +DESCRIPTOR(nvidia) + +#endif // __INTERPOLATE_NEAREST_NVIDIA_CUH__ diff --git a/src/infiniop/ops/interpolate_nearest/operator.cc b/src/infiniop/ops/interpolate_nearest/operator.cc new file mode 100644 index 000000000..df367cfde --- /dev/null +++ b/src/infiniop/ops/interpolate_nearest/operator.cc @@ -0,0 +1,139 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/interpolate_nearest.h" + +#ifdef ENABLE_CPU_API +#include "cpu/interpolate_nearest_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/interpolate_nearest_nvidia.cuh" +#endif + +__C infiniStatus_t infiniopCreateInterpolateNearestDescriptor( + infiniopHandle_t handle, + infiniopInterpolateNearestDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc) { + +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::interpolate_nearest::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + output_desc, \ + input_desc) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetInterpolateNearestWorkspaceSize(infiniopInterpolateNearestDescriptor_t desc, size_t *size) { + +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET + + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; +} + +__C infiniStatus_t infiniopInterpolateNearest( + infiniopInterpolateNearestDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc) \ + ->calculate(workspace, workspace_size, output, input, stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyInterpolateNearestDescriptor(infiniopInterpolateNearestDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/maxpool/cpu/maxpool_cpu.cc b/src/infiniop/ops/maxpool/cpu/maxpool_cpu.cc new file mode 100644 index 000000000..16c859985 --- /dev/null +++ b/src/infiniop/ops/maxpool/cpu/maxpool_cpu.cc @@ -0,0 +1,322 @@ +#include "maxpool_cpu.h" +#include "../../../devices/cpu/common_cpu.h" +#include "../../../devices/cpu/cpu_handle.h" +#include "../info.h" +#include +#include +#include +#include + +namespace op::maxpool::cpu { + +struct Descriptor::Opaque { + device::cpu::Handle *handle; + MaxPoolInfo info; + size_t workspace_size = 0; + +private: + Opaque(device::cpu::Handle *handle_ptr, const MaxPoolInfo &maxpool_info) + : handle(handle_ptr), info(maxpool_info) { + // CPU实现通常不需要额外的工作空间 + workspace_size = 0; + } + + // 获取数据类型的最小值 + template + static T get_min_value() { + if constexpr (std::is_same::value) { + return -std::numeric_limits::infinity(); + } else if constexpr (std::is_same::value) { + return _f32_to_f16(-std::numeric_limits::infinity()); + } else if constexpr (std::is_same::value) { + return _f32_to_bf16(-std::numeric_limits::infinity()); + } else { + return std::numeric_limits::lowest(); + } + } + + // 比较两个值的大小(处理半精度类型) + template + static bool is_greater(const T &a, const T &b) { + if constexpr (std::is_same::value) { + return utils::cast(a) > utils::cast(b); + } else if constexpr (std::is_same::value) { + return utils::cast(a) > utils::cast(b); + } else { + return a > b; + } + } + + // 1D最大池化 + template + void maxpool_1d(T *output, const T *input) const { + size_t batch_size = info.batch; + size_t channels = info.channels; + size_t input_width = info.input_dims[0]; + size_t output_width = info.output_dims[0]; + size_t kernel_width = info.kernel_sizes[0]; + size_t stride_width = info.strides[0]; + size_t pad_width = info.pads[0]; + + // 并行处理每个批次和通道 +#pragma omp parallel for schedule(static) + for (int b = 0; b < static_cast(batch_size); ++b) { + for (int c = 0; c < static_cast(channels); ++c) { + size_t input_offset = static_cast(b) * channels * input_width + static_cast(c) * input_width; + size_t output_offset = static_cast(b) * channels * output_width + static_cast(c) * output_width; + + for (size_t ow = 0; ow < output_width; ++ow) { + T max_val = get_min_value(); + bool found_valid = false; + + int start_w = static_cast(ow * stride_width) - static_cast(pad_width); + int end_w = start_w + static_cast(kernel_width); + + for (int kw = start_w; kw < end_w; ++kw) { + if (kw >= 0 && kw < static_cast(input_width)) { + T val = input[input_offset + kw]; + if (!found_valid || is_greater(val, max_val)) { + max_val = val; + found_valid = true; + } + } + } + + output[output_offset + ow] = max_val; + } + } + } + } + + // 2D最大池化 + template + void maxpool_2d(T *output, const T *input) const { + size_t batch_size = info.batch; + size_t channels = info.channels; + size_t input_height = info.input_dims[0]; + size_t input_width = info.input_dims[1]; + size_t output_height = info.output_dims[0]; + size_t output_width = info.output_dims[1]; + size_t kernel_height = info.kernel_sizes[0]; + size_t kernel_width = info.kernel_sizes[1]; + size_t stride_height = info.strides[0]; + size_t stride_width = info.strides[1]; + size_t pad_height = info.pads[0]; + size_t pad_width = info.pads[1]; + + // 并行处理每个批次和通道 +#pragma omp parallel for schedule(static) + for (int b = 0; b < static_cast(batch_size); ++b) { + for (int c = 0; c < static_cast(channels); ++c) { + size_t input_offset = static_cast(b) * channels * input_height * input_width + static_cast(c) * input_height * input_width; + size_t output_offset = static_cast(b) * channels * output_height * output_width + static_cast(c) * output_height * output_width; + + for (size_t oh = 0; oh < output_height; ++oh) { + for (size_t ow = 0; ow < output_width; ++ow) { + T max_val = get_min_value(); + bool found_valid = false; + + int start_h = static_cast(oh * stride_height) - static_cast(pad_height); + int end_h = start_h + static_cast(kernel_height); + int start_w = static_cast(ow * stride_width) - static_cast(pad_width); + int end_w = start_w + static_cast(kernel_width); + + for (int kh = start_h; kh < end_h; ++kh) { + for (int kw = start_w; kw < end_w; ++kw) { + if (kh >= 0 && kh < static_cast(input_height) && kw >= 0 && kw < static_cast(input_width)) { + T val = input[input_offset + kh * input_width + kw]; + if (!found_valid || is_greater(val, max_val)) { + max_val = val; + found_valid = true; + } + } + } + } + + output[output_offset + oh * output_width + ow] = max_val; + } + } + } + } + } + + // 3D最大池化 + template + void maxpool_3d(T *output, const T *input) const { + size_t batch_size = info.batch; + size_t channels = info.channels; + size_t input_depth = info.input_dims[0]; + size_t input_height = info.input_dims[1]; + size_t input_width = info.input_dims[2]; + size_t output_depth = info.output_dims[0]; + size_t output_height = info.output_dims[1]; + size_t output_width = info.output_dims[2]; + size_t kernel_depth = info.kernel_sizes[0]; + size_t kernel_height = info.kernel_sizes[1]; + size_t kernel_width = info.kernel_sizes[2]; + size_t stride_depth = info.strides[0]; + size_t stride_height = info.strides[1]; + size_t stride_width = info.strides[2]; + size_t pad_depth = info.pads[0]; + size_t pad_height = info.pads[1]; + size_t pad_width = info.pads[2]; + + // 并行处理每个批次和通道 +#pragma omp parallel for schedule(static) + for (int b = 0; b < static_cast(batch_size); ++b) { + for (int c = 0; c < static_cast(channels); ++c) { + size_t input_offset = static_cast(b) * channels * input_depth * input_height * input_width + static_cast(c) * input_depth * input_height * input_width; + size_t output_offset = static_cast(b) * channels * output_depth * output_height * output_width + static_cast(c) * output_depth * output_height * output_width; + + for (size_t od = 0; od < output_depth; ++od) { + for (size_t oh = 0; oh < output_height; ++oh) { + for (size_t ow = 0; ow < output_width; ++ow) { + T max_val = get_min_value(); + bool found_valid = false; + + int start_d = static_cast(od * stride_depth) - static_cast(pad_depth); + int end_d = start_d + static_cast(kernel_depth); + int start_h = static_cast(oh * stride_height) - static_cast(pad_height); + int end_h = start_h + static_cast(kernel_height); + int start_w = static_cast(ow * stride_width) - static_cast(pad_width); + int end_w = start_w + static_cast(kernel_width); + + for (int kd = start_d; kd < end_d; ++kd) { + for (int kh = start_h; kh < end_h; ++kh) { + for (int kw = start_w; kw < end_w; ++kw) { + if (kd >= 0 && kd < static_cast(input_depth) && kh >= 0 && kh < static_cast(input_height) && kw >= 0 && kw < static_cast(input_width)) { + T val = input[input_offset + kd * input_height * input_width + kh * input_width + kw]; + if (!found_valid || is_greater(val, max_val)) { + max_val = val; + found_valid = true; + } + } + } + } + } + + output[output_offset + od * output_height * output_width + oh * output_width + ow] = max_val; + } + } + } + } + } + } + + // 主要的最大池化计算函数 + template + void maxpool_cpu(T *output, const T *input) const { + switch (info.ndim) { + case 1: + maxpool_1d(output, input); + break; + case 2: + maxpool_2d(output, input); + break; + case 3: + maxpool_3d(output, input); + break; + default: + break; + } + } + +public: + Opaque(Opaque &&other) noexcept + : handle(other.handle), + info(std::move(other.info)), + workspace_size(other.workspace_size) { + other.handle = nullptr; + other.workspace_size = 0; + } + + ~Opaque() = default; + + static inline utils::Result + create(device::cpu::Handle *handle_ptr, + MaxPoolInfo &info, + infiniDtype_t data_type) { + if (data_type != INFINI_DTYPE_F32 && data_type != INFINI_DTYPE_F16 && data_type != INFINI_DTYPE_BF16) { + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + Opaque opaque(handle_ptr, info); + return utils::Result(std::move(opaque)); + } + + infiniStatus_t calculate(void *workspace, size_t workspace_size, + void *output, const void *input, infiniDtype_t dtype) const { + + if (!output || !input) { + return INFINI_STATUS_BAD_PARAM; + } + + switch (dtype) { + case INFINI_DTYPE_F32: { + float *typed_output = static_cast(output); + const float *typed_input = static_cast(input); + maxpool_cpu(typed_output, typed_input); + break; + } + + case INFINI_DTYPE_F16: { + fp16_t *typed_output = static_cast(output); + const fp16_t *typed_input = static_cast(input); + maxpool_cpu(typed_output, typed_input); + break; + } + + case INFINI_DTYPE_BF16: { + bf16_t *typed_output = static_cast(output); + const bf16_t *typed_input = static_cast(input); + maxpool_cpu(typed_output, typed_input); + break; + } + + default: + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + + return INFINI_STATUS_SUCCESS; + } +}; + +Descriptor::~Descriptor() { + if (_opaque) { + delete _opaque; + } +} + +infiniStatus_t Descriptor::create(infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, void *strides, void *pads, + bool ceil_mode) { + auto handle = reinterpret_cast(handle_); + auto dtype = input_desc->dtype(); + + CHECK_DTYPE(dtype, INFINI_DTYPE_F32, INFINI_DTYPE_F16, INFINI_DTYPE_BF16); + + auto result = MaxPoolInfo::create(output_desc, input_desc, kernel_size, + strides, pads, ceil_mode); + CHECK_RESULT(result); + auto info = result.take(); + + auto opaque_result = Opaque::create(handle, info, dtype); + CHECK_RESULT(opaque_result); + auto opaque = new Opaque(opaque_result.take()); + + *desc_ptr = new Descriptor(dtype, std::move(info), opaque->workspace_size, + opaque, handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate(void *workspace, size_t workspace_size, + void *output, const void *input, + void *stream) const { + return _opaque->calculate(workspace, workspace_size, output, input, _dtype); +} + +} // namespace op::maxpool::cpu diff --git a/src/infiniop/ops/maxpool/cpu/maxpool_cpu.h b/src/infiniop/ops/maxpool/cpu/maxpool_cpu.h new file mode 100644 index 000000000..f3ecd349d --- /dev/null +++ b/src/infiniop/ops/maxpool/cpu/maxpool_cpu.h @@ -0,0 +1,8 @@ +#ifndef __MAX_POOL_CPU_H__ +#define __MAX_POOL_CPU_H__ + +#include "../maxpool.h" + +DESCRIPTOR(cpu) + +#endif // __MAX_POOL_CPU_H__ diff --git a/src/infiniop/ops/maxpool/info.h b/src/infiniop/ops/maxpool/info.h new file mode 100644 index 000000000..ff56fe28c --- /dev/null +++ b/src/infiniop/ops/maxpool/info.h @@ -0,0 +1,113 @@ +#ifndef __MAX_POOL_INFO_H__ +#define __MAX_POOL_INFO_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" +#include + +namespace op::maxpool { + +inline utils::Result calculateMaxPoolOutputSize( + size_t input_size, + size_t kernel_size, + size_t stride, + size_t padding = 0, + bool ceil_mode = false) { + + if (stride == 0) { + return utils::Result(INFINI_STATUS_BAD_PARAM); + } + if (kernel_size == 0) { + return utils::Result(INFINI_STATUS_BAD_PARAM); + } + + // 理论最大输出数 + size_t max_output = 0; + if (ceil_mode) { + max_output = (input_size + 2 * padding - kernel_size + stride - 1) / stride + 1; + } else { + max_output = (input_size + 2 * padding - kernel_size) / stride + 1; + } + + size_t valid_output = 0; + for (size_t i = 0; i < max_output; ++i) { + int64_t start = static_cast(i) * stride - padding; + int64_t end = start + kernel_size; + // 判断区间 [start, end) 和 [0, input_size) 是否有交集 + int64_t real_start = std::max(start, int64_t(0)); + int64_t real_end = std::min(end, int64_t(input_size)); + if (real_end > real_start) { + ++valid_output; + } + } + return utils::Result(valid_output); +} + +class MaxPoolInfo { + MaxPoolInfo() = default; + +public: + std::vector input_dims; + std::vector output_dims; + std::vector kernel_sizes; + std::vector strides; + std::vector pads; + bool ceil_mode; + size_t ndim; + size_t batch; + size_t channels; + + static utils::Result create( + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, + void *strides, + void *pads, + bool ceil_mode) { + + MaxPoolInfo info; + + if (input_desc->ndim() < 3 || input_desc->ndim() > 5) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + if (input_desc->ndim() != output_desc->ndim()) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + if (input_desc->dim(0) != output_desc->dim(0) || input_desc->dim(1) != output_desc->dim(1)) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + info.ndim = input_desc->ndim() - 2; // spatial dimensions + info.batch = input_desc->dim(0); + info.channels = input_desc->dim(1); + info.ceil_mode = ceil_mode; + + auto kernel_ptr = reinterpret_cast(kernel_size); + auto stride_ptr = reinterpret_cast(strides); + auto pad_ptr = reinterpret_cast(pads); + + // Get spatial dimensions + for (size_t i = 0; i < info.ndim; ++i) { + info.input_dims.push_back(input_desc->dim(i + 2)); + info.kernel_sizes.push_back(kernel_ptr[i]); + info.strides.push_back(stride_ptr[i]); + info.pads.push_back(pad_ptr[i]); + auto output_size = calculateMaxPoolOutputSize( + info.input_dims[i], info.kernel_sizes[i], info.strides[i], info.pads[i], info.ceil_mode); + CHECK_RESULT(output_size); + size_t expected_size = output_size.take(); + if (expected_size != output_desc->dim(i + 2)) { + return INFINI_STATUS_BAD_TENSOR_SHAPE; + } + + info.output_dims.push_back(output_desc->dim(i + 2)); + } + return utils::Result(std::move(info)); + } +}; +} // namespace op::maxpool + +#endif // __MAX_POOL_INFO_H__ diff --git a/src/infiniop/ops/maxpool/maxpool.h b/src/infiniop/ops/maxpool/maxpool.h new file mode 100644 index 000000000..5ee7703c5 --- /dev/null +++ b/src/infiniop/ops/maxpool/maxpool.h @@ -0,0 +1,53 @@ +#ifndef __MAX_POOL_H__ +#define __MAX_POOL_H__ + +#include "../../operator.h" +#include "info.h" + +#define DESCRIPTOR(NAMESPACE) \ + \ + namespace op::maxpool::NAMESPACE { \ + class Descriptor final : public InfiniopDescriptor { \ + struct Opaque; \ + Opaque *_opaque; \ + infiniDtype_t _dtype; \ + MaxPoolInfo _info; \ + size_t _workspace_size; \ + \ + Descriptor( \ + infiniDtype_t dtype, \ + MaxPoolInfo info, \ + size_t workspace_size_, \ + Opaque *opaque, \ + infiniDevice_t device_type, \ + int device_id) \ + : InfiniopDescriptor{device_type, device_id}, \ + _opaque(opaque), \ + _dtype(dtype), \ + _info(info), \ + _workspace_size(workspace_size_) {} \ + \ + public: \ + ~Descriptor(); \ + \ + size_t workspaceSize() const { return _workspace_size; } \ + \ + static infiniStatus_t create( \ + infiniopHandle_t handle, \ + Descriptor **desc_ptr, \ + infiniopTensorDescriptor_t output_desc, \ + infiniopTensorDescriptor_t input_desc, \ + void *kernel_size, \ + void *strides, \ + void *pads, \ + bool ceil_mode); \ + \ + infiniStatus_t calculate( \ + void *workspace, size_t workspace_size, \ + void *output, \ + const void *input, \ + void *stream) const; \ + }; \ + } + +#endif // __MAX_POOL_H__ diff --git a/src/infiniop/ops/maxpool/nvidia/maxpool_nvidia.cu b/src/infiniop/ops/maxpool/nvidia/maxpool_nvidia.cu new file mode 100644 index 000000000..8b94a29c1 --- /dev/null +++ b/src/infiniop/ops/maxpool/nvidia/maxpool_nvidia.cu @@ -0,0 +1,240 @@ +#include "../../../devices/nvidia/nvidia_common.cuh" +#include "../../../devices/nvidia/nvidia_handle.cuh" +#include "maxpool_nvidia.cuh" + +#define DESTROY_CUDNN_DESCRIPTOR(desc_ptr, destroy_func) \ + do { \ + if (desc_ptr) { \ + destroy_func(desc_ptr); \ + desc_ptr = nullptr; \ + } \ + } while (0) + +#define CLEANUP_CUDNN_DESCRIPTORS() \ + do { \ + DESTROY_CUDNN_DESCRIPTOR(input_desc, cudnnDestroyTensorDescriptor); \ + DESTROY_CUDNN_DESCRIPTOR(output_desc, cudnnDestroyTensorDescriptor); \ + DESTROY_CUDNN_DESCRIPTOR(pooling_desc, cudnnDestroyPoolingDescriptor); \ + } while (0) + +namespace op::maxpool::nvidia { + +struct Descriptor::Opaque { + std::shared_ptr internal; + size_t workspace_size = 0; + +#ifdef ENABLE_CUDNN_API + cudnnTensorDescriptor_t input_desc = nullptr; + cudnnTensorDescriptor_t output_desc = nullptr; + cudnnPoolingDescriptor_t pooling_desc = nullptr; +#endif + +private: + Opaque(std::shared_ptr internal_ptr) + : internal(internal_ptr) {} + +#ifdef ENABLE_CUDNN_API + infiniStatus_t getCudnnDataType(infiniDtype_t data_type, + cudnnDataType_t &cudnn_data_type) const { + if (data_type == INFINI_DTYPE_F16) { + cudnn_data_type = device::nvidia::getCudnnDtype(data_type); + } else if (data_type == INFINI_DTYPE_F32) { + cudnn_data_type = device::nvidia::getCudnnDtype(data_type); + } else if (data_type == INFINI_DTYPE_BF16) { + cudnn_data_type = device::nvidia::getCudnnDtype(data_type); + } else { + return INFINI_STATUS_BAD_TENSOR_DTYPE; + } + return INFINI_STATUS_SUCCESS; + } + + infiniStatus_t createPoolingDescriptors(const MaxPoolInfo &info, + cudnnDataType_t cudnn_data_type) { + // Create CUDNN descriptors + CHECK_CUDNN(cudnnCreateTensorDescriptor(&input_desc)); + CHECK_CUDNN(cudnnCreateTensorDescriptor(&output_desc)); + CHECK_CUDNN(cudnnCreatePoolingDescriptor(&pooling_desc)); + + // Setup tensor descriptors + std::vector input_dims_vec = {static_cast(info.batch), + static_cast(info.channels)}; + std::vector output_dims_vec = {static_cast(info.batch), + static_cast(info.channels)}; + + for (size_t i = 0; i < info.ndim; ++i) { + input_dims_vec.push_back(static_cast(info.input_dims[i])); + output_dims_vec.push_back(static_cast(info.output_dims[i])); + } + + if (info.ndim == 1) { + // For 1D pooling, add dummy dimension + input_dims_vec.push_back(1); + output_dims_vec.push_back(1); + } + + CHECK_CUDNN(cudnnSetTensorNdDescriptorEx( + input_desc, CUDNN_TENSOR_NCHW, cudnn_data_type, input_dims_vec.size(), + input_dims_vec.data())); + + CHECK_CUDNN(cudnnSetTensorNdDescriptorEx( + output_desc, CUDNN_TENSOR_NCHW, cudnn_data_type, output_dims_vec.size(), + output_dims_vec.data())); + + return INFINI_STATUS_SUCCESS; + } + + infiniStatus_t setupPoolingDescriptor(const MaxPoolInfo &info) { + // Setup pooling descriptor + std::vector kernel_vec, stride_vec, pad_vec; + for (size_t i = 0; i < info.ndim; ++i) { + kernel_vec.push_back(static_cast(info.kernel_sizes[i])); + stride_vec.push_back(static_cast(info.strides[i])); + pad_vec.push_back(static_cast(info.pads[i])); + } + + if (info.ndim == 1) { + // For 1D pooling, add dummy dimension + kernel_vec.push_back(1); + stride_vec.push_back(1); + pad_vec.push_back(0); + } + + CHECK_CUDNN(cudnnSetPoolingNdDescriptor( + pooling_desc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, + kernel_vec.size(), kernel_vec.data(), pad_vec.data(), + stride_vec.data())); + + return INFINI_STATUS_SUCCESS; + } + + infiniStatus_t initializeCudnnContext(MaxPoolInfo &info, + infiniDtype_t data_type) { + cudnnDataType_t cudnn_data_type; + CHECK_STATUS(getCudnnDataType(data_type, cudnn_data_type)); + + CHECK_STATUS(createPoolingDescriptors(info, cudnn_data_type)); + CHECK_STATUS(setupPoolingDescriptor(info)); + + // Max pooling typically doesn't need workspace + workspace_size = 0; + + return INFINI_STATUS_SUCCESS; + } +#endif + +public: + Opaque(Opaque &&other) noexcept + : internal(std::move(other.internal)), + workspace_size(other.workspace_size) + // clang-format off +#ifdef ENABLE_CUDNN_API + , input_desc(other.input_desc) + , output_desc(other.output_desc) + , pooling_desc(other.pooling_desc) +#endif + // clang-format on + { +#ifdef ENABLE_CUDNN_API + other.input_desc = nullptr; + other.output_desc = nullptr; + other.pooling_desc = nullptr; +#endif + other.workspace_size = 0; + } + + ~Opaque() { +#ifdef ENABLE_CUDNN_API + CLEANUP_CUDNN_DESCRIPTORS(); +#endif + } + + static inline utils::Result + create(std::shared_ptr internal_ptr, + MaxPoolInfo &info, infiniDtype_t data_type) { +#ifdef ENABLE_CUDNN_API + Opaque opaque(internal_ptr); + auto status = opaque.initializeCudnnContext(info, data_type); + if (status != INFINI_STATUS_SUCCESS) { + return status; + } + return utils::Result(std::move(opaque)); +#else + return INFINI_STATUS_NOT_IMPLEMENTED; +#endif + } +}; + +Descriptor::~Descriptor() { + if (_opaque) { + delete _opaque; + } +} + +infiniStatus_t Descriptor::create(infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, void *strides, void *pads, + bool ceil_mode) { + +#ifdef ENABLE_CUDNN_API + auto handle = reinterpret_cast(handle_); + auto dtype = input_desc->dtype(); + + CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_F32, INFINI_DTYPE_BF16); + + auto result = MaxPoolInfo::create(output_desc, input_desc, kernel_size, + strides, pads, ceil_mode); + CHECK_RESULT(result); + auto info = result.take(); + + auto opaque_result = Opaque::create(handle->internal(), info, dtype); + CHECK_RESULT(opaque_result); + auto opaque = new Opaque(opaque_result.take()); + + *desc_ptr = new Descriptor(dtype, std::move(info), opaque->workspace_size, + opaque, handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +#else + return INFINI_STATUS_NOT_IMPLEMENTED; +#endif +} + +infiniStatus_t Descriptor::calculate(void *workspace, size_t workspace_size, + void *output, const void *input, + void *stream) const { + +#ifdef ENABLE_CUDNN_API + const float alpha = 1.0f, beta = 0.0f; + + // 打印input展平后的前十个数据 + // printf("MaxPool input (first 10 elements): "); + // const uint16_t *input_data = static_cast(input); + // for (int i = 0; i < 10; ++i) { + // // 将BF16转换为float显示 + // union { + // uint32_t bits; + // float value; + // } converter; + // uint16_t bf16_val = input_data[i]; + // converter.bits = static_cast(bf16_val) << 16; + // printf("%f ", converter.value); + // } + // printf("\n"); + + CHECK_STATUS(_opaque->internal->useCudnn( + (cudaStream_t)stream, [&](cudnnHandle_t handle) { + CHECK_CUDNN(cudnnPoolingForward(handle, _opaque->pooling_desc, &alpha, + _opaque->input_desc, input, &beta, + _opaque->output_desc, output)); + return INFINI_STATUS_SUCCESS; + })); + + return INFINI_STATUS_SUCCESS; +#else + return INFINI_STATUS_NOT_IMPLEMENTED; +#endif +} + +} // namespace op::maxpool::nvidia diff --git a/src/infiniop/ops/maxpool/nvidia/maxpool_nvidia.cuh b/src/infiniop/ops/maxpool/nvidia/maxpool_nvidia.cuh new file mode 100644 index 000000000..539ad5a1a --- /dev/null +++ b/src/infiniop/ops/maxpool/nvidia/maxpool_nvidia.cuh @@ -0,0 +1,8 @@ +#ifndef __MAX_POOL_CUDA_CUH__ +#define __MAX_POOL_CUDA_CUH__ + +#include "../maxpool.h" + +DESCRIPTOR(nvidia) + +#endif // __MAX_POOL_CUDA_CUH__ diff --git a/src/infiniop/ops/maxpool/operator.cc b/src/infiniop/ops/maxpool/operator.cc new file mode 100644 index 000000000..fa47b5b72 --- /dev/null +++ b/src/infiniop/ops/maxpool/operator.cc @@ -0,0 +1,147 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/maxpool.h" + +#ifdef ENABLE_CPU_API +#include "cpu/maxpool_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/maxpool_nvidia.cuh" +#endif + +__C infiniStatus_t infiniopCreateMaxPoolDescriptor( + infiniopHandle_t handle, + infiniopMaxPoolDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + void *kernel_size, + void *strides, + void *pads, + bool ceil_mode) { + +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::maxpool::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + output_desc, \ + input_desc, \ + kernel_size, \ + strides, \ + pads, \ + ceil_mode) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetMaxPoolWorkspaceSize(infiniopMaxPoolDescriptor_t desc, size_t *size) { + +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET + + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; +} + +__C infiniStatus_t infiniopMaxPool( + infiniopMaxPoolDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc) \ + ->calculate(workspace, workspace_size, output, input, stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyMaxPoolDescriptor(infiniopMaxPoolDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/scatter/cpu/scatter_cpu.cc b/src/infiniop/ops/scatter/cpu/scatter_cpu.cc new file mode 100644 index 000000000..03b808bf5 --- /dev/null +++ b/src/infiniop/ops/scatter/cpu/scatter_cpu.cc @@ -0,0 +1,100 @@ +#include "scatter_cpu.h" +#include "../../../devices/cpu/common_cpu.h" +#include "../../../reduce/cpu/reduce.h" +#include "../info.h" + +namespace op::scatter::cpu { + +infiniStatus_t calculate_scatter( + const ScatterInfo &info, + char *output, + const char *input, + const int64_t *index) { + // -------------------------------- start: perform operator on CPU -------------------------------- + std::vector contiguous_strides(info.ndim); + ptrdiff_t last_dim = 1; + ptrdiff_t last_stride = 1; + for (size_t d = 0; d < info.ndim; d++) { + if (d == info.dim) { + continue; + } + contiguous_strides[d] = last_dim * last_stride; + last_dim = info.index_shape[d]; + last_stride = contiguous_strides[d]; + } + size_t batch_size = last_dim * last_stride; + int scatter_dim = static_cast(info.dim); + size_t element_size = infiniSizeOf(info.dtype); + +#pragma omp parallel for + for (int n = 0; n < static_cast(batch_size); n++) { + auto output_ptr = output; + auto input_ptr = input; + auto index_ptr = index; + size_t rem = static_cast(n); + for (int d = static_cast(info.ndim) - 1; d >= 0; d--) { + if (d == scatter_dim) { + continue; + } + size_t dim_index = rem / contiguous_strides[d]; + rem = rem % contiguous_strides[d]; + output_ptr += dim_index * element_size * info.output_strides[d]; + input_ptr += dim_index * element_size * info.input_strides[d]; + index_ptr += dim_index * info.index_strides[d]; + } + for (size_t c = 0; c < info.index_shape[scatter_dim]; c++) { + int64_t scatter_number = *(index_ptr + c * info.index_strides[scatter_dim]); + memcpy( + output_ptr + scatter_number * element_size * info.output_strides[scatter_dim], + input_ptr + c * element_size * info.input_strides[scatter_dim], + element_size); + } + } + + // --------------------------------- end: perform operator on CPU --------------------------------- + return INFINI_STATUS_SUCCESS; +} + +Descriptor::~Descriptor() = default; + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + auto handle = reinterpret_cast(handle_); + + // --------------------- start: check data type and calculate workspace size ---------------------- + auto dtype = input_desc->dtype(); + size_t WorkSpaceSize = 0; + // ---------------------- end: check data type and calculate workspace size ----------------------- + + auto result = ScatterInfo::createScatterInfo( + output_desc, + input_desc, + index_desc, + dim); + CHECK_RESULT(result); + const ScatterInfo &info = result.take(); + + *desc_ptr = new Descriptor( + dtype, std::move(info), WorkSpaceSize, + nullptr, + handle->device, handle->device_id); + + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + const void *input, + const void *index, + void *stream) const { + + return calculate_scatter(_info, (char *)output, (const char *)input, (const int64_t *)index); +} +} // namespace op::scatter::cpu diff --git a/src/infiniop/ops/scatter/cpu/scatter_cpu.h b/src/infiniop/ops/scatter/cpu/scatter_cpu.h new file mode 100644 index 000000000..ad52c7b91 --- /dev/null +++ b/src/infiniop/ops/scatter/cpu/scatter_cpu.h @@ -0,0 +1,8 @@ +#ifndef __SCATTER_CPU_H__ +#define __SCATTER_CPU_H__ + +#include "../scatter.h" + +DESCRIPTOR(cpu) + +#endif // __SCATTER_CPU_H__ diff --git a/src/infiniop/ops/scatter/cuda/kernel.cuh b/src/infiniop/ops/scatter/cuda/kernel.cuh new file mode 100644 index 000000000..733d2e14d --- /dev/null +++ b/src/infiniop/ops/scatter/cuda/kernel.cuh @@ -0,0 +1,37 @@ +#ifndef __SCATTER_KERNEL_CUH__ +#define __SCATTER_KERNEL_CUH__ +// ------------------------------- start: perform operator on CUDA -------------------------------- +template +__device__ void scatterKernel( + Tdata *output, + const Tdata *input, + const int64_t *index, + size_t ndim, + size_t index_scatter_size, + ptrdiff_t *output_strides, + ptrdiff_t *input_strides, + ptrdiff_t *index_strides, + ptrdiff_t *contiguous_strides, + int scatter_dim) { + auto output_ptr = output; + auto input_ptr = input; + auto index_ptr = index; + size_t rem = blockIdx.x; + for (int d = ndim - 1; d >= 0; d--) { + if (d == scatter_dim) { + continue; + } + size_t dim_index = rem / contiguous_strides[d]; + rem = rem % contiguous_strides[d]; + output_ptr += dim_index * output_strides[d]; + input_ptr += dim_index * input_strides[d]; + index_ptr += dim_index * index_strides[d]; + } + for (size_t c = threadIdx.x; c < index_scatter_size; c += BLOCK_SIZE) { + int64_t scatter_number = *(index_ptr + c * index_strides[scatter_dim]); + *(output_ptr + scatter_number * output_strides[scatter_dim]) = *(input_ptr + c * input_strides[scatter_dim]); + } +} +// -------------------------------- end: perform operator on CUDA --------------------------------- + +#endif // __SCATTER_KERNEL_CUH__ diff --git a/src/infiniop/ops/scatter/info.h b/src/infiniop/ops/scatter/info.h new file mode 100644 index 000000000..4ef8b9e76 --- /dev/null +++ b/src/infiniop/ops/scatter/info.h @@ -0,0 +1,64 @@ +#ifndef __SCATTER_INFO_H__ +#define __SCATTER_INFO_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" + +namespace op::scatter { + +class ScatterInfo { +private: + ScatterInfo() = default; + +public: + // ---------------------------- start: define member variables of Info ---------------------------- + infiniDtype_t dtype; + size_t ndim; + std::vector output_shape; + std::vector input_shape; + std::vector index_shape; + std::vector output_strides; + std::vector input_strides; + std::vector index_strides; + size_t dim; + + // ----------------------------- end: define member variables of Info ----------------------------- + + static utils::Result createScatterInfo( + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + // ------------------------- start: check tensor shape and input validity ------------------------- + CHECK_OR_RETURN( + input_desc->ndim() == output_desc->ndim() && output_desc->ndim() == index_desc->ndim(), + INFINI_STATUS_BAD_TENSOR_SHAPE); + size_t ndim = output_desc->ndim(); + for (size_t d = 0; d < ndim; d++) { + if (d != dim) { + CHECK_OR_RETURN( + index_desc->dim(d) <= input_desc->dim(d) && index_desc->dim(d) <= output_desc->dim(d), + INFINI_STATUS_BAD_TENSOR_SHAPE;); + } + } + CHECK_OR_RETURN(index_desc->dim(dim) <= input_desc->dim(dim), INFINI_STATUS_BAD_TENSOR_SHAPE); + // -------------------------- end: check tensor shape and input validity -------------------------- + return utils::Result(ScatterInfo{ + // ------------------------------ start: create an instance of Info ------------------------------- + output_desc->dtype(), + ndim, + output_desc->shape(), + input_desc->shape(), + index_desc->shape(), + output_desc->strides(), + input_desc->strides(), + index_desc->strides(), + dim + // ------------------------------- end: create an instance of Info -------------------------------- + }); + } +}; +} // namespace op::scatter + +#endif // __SCATTER_INFO_H__ diff --git a/src/infiniop/ops/scatter/metax/scatter_metax.h b/src/infiniop/ops/scatter/metax/scatter_metax.h new file mode 100644 index 000000000..d5ce0ef16 --- /dev/null +++ b/src/infiniop/ops/scatter/metax/scatter_metax.h @@ -0,0 +1,8 @@ +#ifndef __SCATTER_METAX_H__ +#define __SCATTER_METAX_H__ + +#include "../scatter.h" + +DESCRIPTOR(metax) + +#endif // __SCATTER_METAX_H__ diff --git a/src/infiniop/ops/scatter/metax/scatter_metax.maca b/src/infiniop/ops/scatter/metax/scatter_metax.maca new file mode 100644 index 000000000..1c742f60d --- /dev/null +++ b/src/infiniop/ops/scatter/metax/scatter_metax.maca @@ -0,0 +1,190 @@ +#include "../../../devices/metax/metax_common.h" +#include "scatter_metax.h" +#include +#include "../../../devices/metax/metax_kernel_common.h" +#include "../../../reduce/cuda/reduce.cuh" +#include "../cuda/kernel.cuh" +#include "../info.h" + +namespace op::scatter::metax { + +template +INFINIOP_METAX_KERNEL launchKernel( + Tdata * output, + const Tdata * input, + const int64_t * index, + size_t ndim, + size_t index_scatter_size, + ptrdiff_t * output_strides, + ptrdiff_t * input_strides, + ptrdiff_t * index_strides, + ptrdiff_t * contiguous_strides, + int scatter_dim +) { + scatterKernel( + output, + input, + index, + ndim, + index_scatter_size, + output_strides, + input_strides, + index_strides, + contiguous_strides, + scatter_dim + ); +} +// ----------------------- end: launchKernel: call kernel function of CUDA ------------------------ + +// ----------------------------------- start: call launchKernel ----------------------------------- +template +infiniStatus_t calculate_scatter( + const ScatterInfo &info, + Tdata * output, + const Tdata * input, + const int64_t * index, + hcStream_t stream, + void * workspace +) { + size_t ndim = info.ndim; + ptrdiff_t * contiguous_strides = new ptrdiff_t[ndim]; + size_t last_dim = 1, last_stride = 1; + size_t scatter_dim = info.dim; + for(size_t d = 0; d < ndim; d ++) + { + if (d == scatter_dim) + continue; + contiguous_strides[d] = last_dim * last_stride; + last_dim = info.index_shape[d]; + last_stride = contiguous_strides[d]; + } + + size_t batch_size = last_dim * last_stride; + + ptrdiff_t * contiguous_strides_cuda = reinterpret_cast(workspace); + ptrdiff_t * input_strides_cuda = contiguous_strides_cuda + ndim; + ptrdiff_t * output_strides_cuda = input_strides_cuda + ndim; + ptrdiff_t * index_strides_cuda = output_strides_cuda + ndim; + + CHECK_METAX(hcMemcpyAsync(contiguous_strides_cuda, contiguous_strides, sizeof(ptrdiff_t) * ndim, hcMemcpyHostToDevice, stream)); + CHECK_METAX(hcMemcpyAsync(input_strides_cuda, info.input_strides.data(), sizeof(ptrdiff_t) * ndim, hcMemcpyHostToDevice, stream)); + CHECK_METAX(hcMemcpyAsync(output_strides_cuda, info.output_strides.data(), sizeof(ptrdiff_t) * ndim, hcMemcpyHostToDevice, stream)); + CHECK_METAX(hcMemcpyAsync(index_strides_cuda, info.index_strides.data(), sizeof(ptrdiff_t) * ndim, hcMemcpyHostToDevice, stream)); + + + launchKernel<<>>( + output, + input, + index, + ndim, + info.index_shape[scatter_dim], + output_strides_cuda, + input_strides_cuda, + index_strides_cuda, + contiguous_strides_cuda, + scatter_dim + ); + delete[] contiguous_strides; + return INFINI_STATUS_SUCCESS; +} +// ------------------------------------ end: call launchKernel ------------------------------------ + + +struct Descriptor::Opaque { + std::shared_ptr internal; +}; + +Descriptor::~Descriptor() { + delete _opaque; +} + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim +) { + auto handle = reinterpret_cast(handle_); +// --------------------- start: check data type and calculate workspace size ---------------------- + auto dtype = output_desc->dtype(); + auto result = ScatterInfo::createScatterInfo( + output_desc, + input_desc, + index_desc, + dim + ); + CHECK_RESULT(result); + const ScatterInfo &info = result.take(); + size_t WorkSpaceSize = sizeof(ptrdiff_t) * input_desc->ndim() * 4; +// ---------------------- end: check data type and calculate workspace size ----------------------- + + *desc_ptr = new Descriptor( + dtype, std::move(info), WorkSpaceSize, + new Opaque{handle->internal()}, + handle->device, handle->device_id + ); + return INFINI_STATUS_SUCCESS; +} + + + +infiniStatus_t Descriptor::calculate( + void * workspace, + size_t workspace_size, + void * output, + const void * input, + const void * index, + void *stream_ +) const { + if (workspace_size < _workspace_size) + return INFINI_STATUS_INSUFFICIENT_WORKSPACE; + + hcStream_t stream = (hcStream_t)stream_; + + #define CALCULATE_SCATTER(BLOCK_SIZE, TDATA) \ + calculate_scatter(_info, (TDATA *)output, (const TDATA *)input, (const int64_t *)index, stream, workspace) + + #define CALCULATE_SCATTER_WITH_METAX_BLOCK(BLOCK_SIZE) \ + switch (_info.dtype) { \ + case INFINI_DTYPE_BOOL: \ + return CALCULATE_SCATTER(BLOCK_SIZE, bool); \ + case INFINI_DTYPE_U8: \ + return CALCULATE_SCATTER(BLOCK_SIZE, uint8_t); \ + case INFINI_DTYPE_U16: \ + return CALCULATE_SCATTER(BLOCK_SIZE, uint16_t); \ + case INFINI_DTYPE_U32: \ + return CALCULATE_SCATTER(BLOCK_SIZE, uint32_t); \ + case INFINI_DTYPE_U64: \ + return CALCULATE_SCATTER(BLOCK_SIZE, uint64_t); \ + case INFINI_DTYPE_I8: \ + return CALCULATE_SCATTER(BLOCK_SIZE, int8_t); \ + case INFINI_DTYPE_I16: \ + return CALCULATE_SCATTER(BLOCK_SIZE, int16_t); \ + case INFINI_DTYPE_I32: \ + return CALCULATE_SCATTER(BLOCK_SIZE, int32_t); \ + case INFINI_DTYPE_I64: \ + return CALCULATE_SCATTER(BLOCK_SIZE, int64_t); \ + case INFINI_DTYPE_F16: \ + return CALCULATE_SCATTER(BLOCK_SIZE, half); \ + case INFINI_DTYPE_F32: \ + return CALCULATE_SCATTER(BLOCK_SIZE, float); \ + case INFINI_DTYPE_BF16: \ + return CALCULATE_SCATTER(BLOCK_SIZE, cuda_bfloat16); \ + default: \ + return INFINI_STATUS_BAD_TENSOR_DTYPE; \ + } + + if (_opaque->internal->maxThreadsPerBlock() == METAX_BLOCK_SIZE_1024) + CALCULATE_SCATTER_WITH_METAX_BLOCK(METAX_BLOCK_SIZE_1024) + else if (_opaque->internal->maxThreadsPerBlock() == METAX_BLOCK_SIZE_512) + CALCULATE_SCATTER_WITH_METAX_BLOCK(METAX_BLOCK_SIZE_512) + else + return INFINI_STATUS_DEVICE_ARCHITECTURE_NOT_SUPPORTED; + return INFINI_STATUS_SUCCESS; + + #undef CALCULATE_SCATTER_WITH_METAX_BLOCK + #undef CALCULATE_SCATTER +} +} // namespace op::scatter::metax diff --git a/src/infiniop/ops/scatter/nvidia/scatter_nvidia.cu b/src/infiniop/ops/scatter/nvidia/scatter_nvidia.cu new file mode 100644 index 000000000..136ad1f65 --- /dev/null +++ b/src/infiniop/ops/scatter/nvidia/scatter_nvidia.cu @@ -0,0 +1,180 @@ +#include "../../../devices/nvidia/nvidia_common.cuh" +#include "../../../devices/nvidia/nvidia_handle.cuh" +#include "../../../devices/nvidia/nvidia_kernel_common.cuh" + +#include "../cuda/kernel.cuh" +#include "../info.h" +#include "scatter_nvidia.cuh" + +namespace op::scatter::nvidia { + +// ---------------------- start: launchKernel: call kernel function of CUDA ----------------------- +template +INFINIOP_CUDA_KERNEL launchKernel( + Tdata *output, + const Tdata *input, + const int64_t *index, + size_t ndim, + size_t index_scatter_size, + ptrdiff_t *output_strides, + ptrdiff_t *input_strides, + ptrdiff_t *index_strides, + ptrdiff_t *contiguous_strides, + int scatter_dim) { + scatterKernel( + output, + input, + index, + ndim, + index_scatter_size, + output_strides, + input_strides, + index_strides, + contiguous_strides, + scatter_dim); +} +// ----------------------- end: launchKernel: call kernel function of CUDA ------------------------ + +// ----------------------------------- start: call launchKernel ----------------------------------- +template +infiniStatus_t calculate_scatter( + const ScatterInfo &info, + Tdata *output, + const Tdata *input, + const int64_t *index, + cudaStream_t stream, + void *workspace) { + size_t ndim = info.ndim; + ptrdiff_t *contiguous_strides = new ptrdiff_t[ndim]; + size_t last_dim = 1, last_stride = 1; + size_t scatter_dim = info.dim; + for (size_t d = 0; d < ndim; d++) { + if (d == scatter_dim) { + continue; + } + contiguous_strides[d] = last_dim * last_stride; + last_dim = info.index_shape[d]; + last_stride = contiguous_strides[d]; + } + + size_t batch_size = last_dim * last_stride; + + ptrdiff_t *contiguous_strides_cuda = reinterpret_cast(workspace); + ptrdiff_t *input_strides_cuda = contiguous_strides_cuda + ndim; + ptrdiff_t *output_strides_cuda = input_strides_cuda + ndim; + ptrdiff_t *index_strides_cuda = output_strides_cuda + ndim; + + CHECK_CUDA(cudaMemcpyAsync(contiguous_strides_cuda, contiguous_strides, sizeof(ptrdiff_t) * ndim, cudaMemcpyHostToDevice, stream)); + CHECK_CUDA(cudaMemcpyAsync(input_strides_cuda, info.input_strides.data(), sizeof(ptrdiff_t) * ndim, cudaMemcpyHostToDevice, stream)); + CHECK_CUDA(cudaMemcpyAsync(output_strides_cuda, info.output_strides.data(), sizeof(ptrdiff_t) * ndim, cudaMemcpyHostToDevice, stream)); + CHECK_CUDA(cudaMemcpyAsync(index_strides_cuda, info.index_strides.data(), sizeof(ptrdiff_t) * ndim, cudaMemcpyHostToDevice, stream)); + + launchKernel<<>>( + output, + input, + index, + ndim, + info.index_shape[scatter_dim], + output_strides_cuda, + input_strides_cuda, + index_strides_cuda, + contiguous_strides_cuda, + scatter_dim); + delete[] contiguous_strides; + return INFINI_STATUS_SUCCESS; +} +// ------------------------------------ end: call launchKernel ------------------------------------ + +struct Descriptor::Opaque { + std::shared_ptr internal; +}; + +Descriptor::~Descriptor() { + delete _opaque; +} + +infiniStatus_t Descriptor::create( + infiniopHandle_t handle_, + Descriptor **desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { + auto handle = reinterpret_cast(handle_); + // --------------------- start: check data type and calculate workspace size ---------------------- + auto dtype = output_desc->dtype(); + size_t WorkSpaceSize = sizeof(ptrdiff_t) * input_desc->ndim() * 4; + // ---------------------- end: check data type and calculate workspace size ----------------------- + auto result = ScatterInfo::createScatterInfo( + output_desc, + input_desc, + index_desc, + dim); + CHECK_RESULT(result); + const ScatterInfo &info = result.take(); + *desc_ptr = new Descriptor( + dtype, std::move(info), WorkSpaceSize, + new Opaque{handle->internal()}, + handle->device, handle->device_id); + return INFINI_STATUS_SUCCESS; +} + +infiniStatus_t Descriptor::calculate( + void *workspace, + size_t workspace_size, + void *output, + const void *input, + const void *index, + void *stream_) const { + if (workspace_size < _workspace_size) { + return INFINI_STATUS_INSUFFICIENT_WORKSPACE; + } + cudaStream_t stream = (cudaStream_t)stream_; +#define CALCULATE_SCATTER(BLOCK_SIZE, TDATA) \ + calculate_scatter(_info, (TDATA *)output, (const TDATA *)input, (const int64_t *)index, stream, workspace) +#define CALCULATE_SCATTER_WITH_BLOCK_SIZE(BLOCK_SIZE) \ + switch (_info.dtype) { \ + case INFINI_DTYPE_BOOL: \ + return CALCULATE_SCATTER(BLOCK_SIZE, bool); \ + case INFINI_DTYPE_U8: \ + return CALCULATE_SCATTER(BLOCK_SIZE, uint8_t); \ + case INFINI_DTYPE_U16: \ + return CALCULATE_SCATTER(BLOCK_SIZE, uint16_t); \ + case INFINI_DTYPE_U32: \ + return CALCULATE_SCATTER(BLOCK_SIZE, uint32_t); \ + case INFINI_DTYPE_U64: \ + return CALCULATE_SCATTER(BLOCK_SIZE, uint64_t); \ + case INFINI_DTYPE_I8: \ + return CALCULATE_SCATTER(BLOCK_SIZE, int8_t); \ + case INFINI_DTYPE_I16: \ + return CALCULATE_SCATTER(BLOCK_SIZE, int16_t); \ + case INFINI_DTYPE_I32: \ + return CALCULATE_SCATTER(BLOCK_SIZE, int32_t); \ + case INFINI_DTYPE_I64: \ + return CALCULATE_SCATTER(BLOCK_SIZE, int64_t); \ + case INFINI_DTYPE_F16: \ + return CALCULATE_SCATTER(BLOCK_SIZE, half); \ + case INFINI_DTYPE_F32: \ + return CALCULATE_SCATTER(BLOCK_SIZE, float); \ + case INFINI_DTYPE_BF16: \ + return CALCULATE_SCATTER(BLOCK_SIZE, cuda_bfloat16); \ + default: \ + return INFINI_STATUS_BAD_TENSOR_DTYPE; \ + } + + if (_opaque->internal->maxThreadsPerBlock() == CUDA_BLOCK_SIZE_1024) { + CALCULATE_SCATTER_WITH_BLOCK_SIZE(CUDA_BLOCK_SIZE_1024) + } else if (_opaque->internal->maxThreadsPerBlock() == CUDA_BLOCK_SIZE_512) { + CALCULATE_SCATTER_WITH_BLOCK_SIZE(CUDA_BLOCK_SIZE_512) + } else if (_opaque->internal->maxThreadsPerBlock() == CUDA_BLOCK_SIZE_4096) { + CALCULATE_SCATTER_WITH_BLOCK_SIZE(CUDA_BLOCK_SIZE_4096) + } else { + return INFINI_STATUS_DEVICE_ARCHITECTURE_NOT_SUPPORTED; + } + +#undef CALCULATE_SCATTER_WITH_BLOCK_SIZE +#undef CALCULATE_SCATTER + + return INFINI_STATUS_SUCCESS; +} +} // namespace op::scatter::nvidia diff --git a/src/infiniop/ops/scatter/nvidia/scatter_nvidia.cuh b/src/infiniop/ops/scatter/nvidia/scatter_nvidia.cuh new file mode 100644 index 000000000..a199edb6e --- /dev/null +++ b/src/infiniop/ops/scatter/nvidia/scatter_nvidia.cuh @@ -0,0 +1,7 @@ +#ifndef __SCATTER_NVIDIA_API_H__ +#define __SCATTER_NVIDIA_API_H__ +#include "../scatter.h" + +DESCRIPTOR(nvidia) + +#endif // __SCATTER_NVIDIA_API_H__ diff --git a/src/infiniop/ops/scatter/operator.cc b/src/infiniop/ops/scatter/operator.cc new file mode 100644 index 000000000..95857d731 --- /dev/null +++ b/src/infiniop/ops/scatter/operator.cc @@ -0,0 +1,160 @@ +#include "../../operator.h" +#include "../../handle.h" +#include "infiniop/ops/scatter.h" + +#ifdef ENABLE_CPU_API +#include "cpu/scatter_cpu.h" +#endif +#if defined(ENABLE_NVIDIA_API) || defined(ENABLE_ILUVATAR_API) || defined(ENABLE_QY_API) +#include "nvidia/scatter_nvidia.cuh" +#endif +#ifdef ENABLE_METAX_API +#include "metax/scatter_metax.h" +#endif + +__C infiniStatus_t infiniopCreateScatterDescriptor( + infiniopHandle_t handle, + infiniopScatterDescriptor_t *desc_ptr, + infiniopTensorDescriptor_t output_desc, + infiniopTensorDescriptor_t input_desc, + infiniopTensorDescriptor_t index_desc, + size_t dim) { +#define CREATE(CASE, NAMESPACE) \ + case CASE: \ + return op::scatter::NAMESPACE::Descriptor::create( \ + handle, \ + reinterpret_cast(desc_ptr), \ + output_desc, \ + input_desc, \ + index_desc, \ + dim) + + switch (handle->device) { + +#ifdef ENABLE_CPU_API + CREATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CREATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CREATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CREATE(INFINI_DEVICE_QY, nvidia); +#endif +#ifdef ENABLE_METAX_API + CREATE(INFINI_DEVICE_METAX, metax); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CREATE +} + +__C infiniStatus_t infiniopGetScatterWorkspaceSize(infiniopScatterDescriptor_t desc, size_t *size) { +#define GET(CASE, NAMESPACE) \ + case CASE: \ + *size = reinterpret_cast(desc)->workspaceSize(); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { +#ifdef ENABLE_CPU_API + GET(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + GET(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + GET(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + GET(INFINI_DEVICE_QY, nvidia); +#endif +#ifdef ENABLE_METAX_API + GET(INFINI_DEVICE_METAX, metax); +#endif + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } +#undef GET +} + +__C infiniStatus_t infiniopScatter( + infiniopScatterDescriptor_t desc, + void *workspace, + size_t workspace_size, + void *output, + const void *input, + const void *index, + void *stream) { + +#define CALCULATE(CASE, NAMESPACE) \ + case CASE: \ + return reinterpret_cast(desc)->calculate( \ + workspace, \ + workspace_size, \ + output, \ + input, \ + index, \ + stream) + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + CALCULATE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + CALCULATE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + CALCULATE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + CALCULATE(INFINI_DEVICE_QY, nvidia); +#endif +#ifdef ENABLE_METAX_API + CALCULATE(INFINI_DEVICE_METAX, metax); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef CALCULATE +} + +__C infiniStatus_t +infiniopDestroyScatterDescriptor(infiniopScatterDescriptor_t desc) { + +#define DELETE(CASE, NAMESPACE) \ + case CASE: \ + delete reinterpret_cast(desc); \ + return INFINI_STATUS_SUCCESS; + + switch (desc->device_type) { + +#ifdef ENABLE_CPU_API + DELETE(INFINI_DEVICE_CPU, cpu); +#endif +#ifdef ENABLE_NVIDIA_API + DELETE(INFINI_DEVICE_NVIDIA, nvidia); +#endif +#ifdef ENABLE_ILUVATAR_API + DELETE(INFINI_DEVICE_ILUVATAR, nvidia); +#endif +#ifdef ENABLE_QY_API + DELETE(INFINI_DEVICE_QY, nvidia); +#endif +#ifdef ENABLE_METAX_API + DELETE(INFINI_DEVICE_METAX, metax); +#endif + + default: + return INFINI_STATUS_DEVICE_TYPE_NOT_SUPPORTED; + } + +#undef DELETE +} diff --git a/src/infiniop/ops/scatter/scatter.h b/src/infiniop/ops/scatter/scatter.h new file mode 100644 index 000000000..e1e332471 --- /dev/null +++ b/src/infiniop/ops/scatter/scatter.h @@ -0,0 +1,47 @@ +#ifndef __SCATTER_H__ +#define __SCATTER_H__ + +#include "../../../utils.h" +#include "../../operator.h" +#include "../../tensor.h" +#include "info.h" + +#define DESCRIPTOR(NAMESPACE) \ + namespace op::scatter::NAMESPACE { \ + class Descriptor final : public InfiniopDescriptor { \ + struct Opaque; \ + Opaque *_opaque; \ + ScatterInfo _info; \ + size_t _workspace_size; \ + Descriptor( \ + infiniDtype_t dtype, \ + ScatterInfo info, \ + size_t workspace_size_, \ + Opaque *opaque, \ + infiniDevice_t device_type, \ + int device_id) : InfiniopDescriptor{device_type, device_id}, \ + _opaque(opaque), \ + _info(info), \ + _workspace_size(workspace_size_) {} \ + \ + public: \ + ~Descriptor(); \ + size_t workspaceSize() const { return _workspace_size; } \ + static infiniStatus_t create( \ + infiniopHandle_t handle, \ + Descriptor **desc_ptr, \ + infiniopTensorDescriptor_t output_desc, \ + infiniopTensorDescriptor_t input_desc, \ + infiniopTensorDescriptor_t index_desc, \ + size_t dim); \ + infiniStatus_t calculate( \ + void *workspace, \ + size_t workspace_size, \ + void *output, \ + const void *input, \ + const void *index, \ + void *stream) const; \ + }; \ + } + +#endif \ No newline at end of file diff --git a/test/infiniop/averagepool.py b/test/infiniop/averagepool.py new file mode 100644 index 000000000..55d5c37cf --- /dev/null +++ b/test/infiniop/averagepool.py @@ -0,0 +1,239 @@ +import torch +import ctypes +from ctypes import c_uint64, c_bool + +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + profile_operation, + TestWorkspace, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + infiniopOperatorDescriptor_t, +) +from typing import Tuple +import math +from torch.nn import functional as F + +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + +_TEST_CASES = [ + # ============ 1D Average Pooling Tests (converted to MaxPool format) ============ + # Basic cases + ((4, 8, 128), None, (3,), (1,), (0,), False), # kernel=3, stride=1, pad=0 + ((2, 16, 256), None, (5,), (2,), (2,), False), # kernel=5, stride=2, pad=2 + ((8, 4, 64), None, (7,), (3,), (1,), False), # kernel=7, stride=3, pad=1 + # ceil_mode variations + ((1, 3, 99), None, (4,), (3,), (1,), True), # kernel=4, stride=3, pad=1 + ((3, 2, 77), None, (6,), (4,), (0,), True), # kernel=6, stride=4, pad=0 + # ============ 2D Average Pooling Tests ============ + # Basic cases with square kernels + ((2, 3, 64, 64), None, (3, 3), (1, 1), (1, 1), False), + ((4, 16, 128, 128), None, (5, 5), (2, 2), (2, 2), False), + ((1, 8, 96, 96), None, (7, 7), (3, 3), (0, 0), False), + # Rectangular kernels + ((2, 4, 80, 120), None, (3, 5), (1, 2), (1, 2), False), + ((1, 6, 72, 48), None, (7, 3), (2, 1), (3, 1), False), + ((3, 2, 56, 84), None, (2, 4), (2, 3), (0, 2), False), + # ceil_mode variations + ((1, 1, 33, 33), None, (4, 4), (3, 3), (1, 1), True), + ((2, 5, 77, 89), None, (5, 3), (4, 2), (2, 1), True), + # ============ 3D Average Pooling Tests ============ + # Basic cubic kernels + ((1, 2, 32, 32, 32), None, (3, 3, 3), (1, 1, 1), (1, 1, 1), False), + ((2, 4, 48, 48, 48), None, (5, 5, 5), (2, 2, 2), (2, 2, 2), False), + ((1, 1, 64, 64, 64), None, (7, 7, 7), (3, 3, 3), (0, 0, 0), False), + # Non-cubic kernels + ((1, 3, 24, 36, 48), None, (2, 3, 4), (1, 2, 2), (0, 1, 2), False), + ((2, 2, 40, 32, 56), None, (5, 3, 7), (2, 1, 3), (2, 1, 3), False), + ((1, 1, 28, 44, 36), None, (3, 5, 2), (2, 3, 1), (1, 2, 1), False), + # ceil_mode variations + ((1, 1, 27, 27, 27), None, (4, 4, 4), (3, 3, 3), (1, 1, 1), True), + ((2, 2, 33, 45, 39), None, (5, 3, 4), (3, 2, 3), (2, 1, 1), True), +] + +_TENSOR_DTYPES = [InfiniDtype.F32, InfiniDtype.F16, InfiniDtype.BF16] +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-3}, + InfiniDtype.F32: {"atol": 1e-4, "rtol": 1e-4}, + InfiniDtype.BF16: {"atol": 1e-2, "rtol": 1e-2}, +} + + +def averagepool(input_tensor, kernel_size, stride, padding, ceil_mode, output_tensor): + ndim = len(input_tensor.shape) - 2 + if ndim == 1: + result = F.avg_pool1d( + input_tensor.to(torch.float32), kernel_size[0], stride[0], padding[0], ceil_mode=ceil_mode + ) + elif ndim == 2: + result = F.avg_pool2d( + input_tensor.to(torch.float32), kernel_size, stride, padding, ceil_mode=ceil_mode + ) + elif ndim == 3: + result = F.avg_pool3d( + input_tensor.to(torch.float32), kernel_size, stride, padding, ceil_mode=ceil_mode + ) + else: + raise ValueError(f"Unsupported spatial dimensions: {ndim}") + + # 将计算结果转换回原始数据类型 + output_tensor.copy_(result.to(output_tensor.dtype)) + + +def infer_output_shape(input_shape, kernel_size, stride, padding, ceil_mode): + def calc_output_size(input_size, k, s, p, ceil_mode): + return ( + math.ceil((input_size + 2 * p - k) / s + 1) + if ceil_mode + else math.floor((input_size + 2 * p - k) / s + 1) + ) + + batch, channel, *spatial = input_shape + output_spatial = [ + calc_output_size(spatial[i], kernel_size[i], stride[i], padding[i], ceil_mode) + for i in range(len(spatial)) + ] + return (batch, channel) + tuple(output_spatial) + + +def tuple_to_void_p(py_tuple: Tuple): + arr = (ctypes.c_uint64 * len(py_tuple))(*py_tuple) + return ctypes.cast(arr, ctypes.c_void_p) + + +def test( + handle, + device, + input_shape, + input_stride, + kernel_size, + stride, + padding, + ceil_mode, + tensor_dtype=InfiniDtype.F16, + sync=None, +): + input_tensor = TestTensor( + input_shape, input_stride, dt=tensor_dtype, device=device, scale=1.0 + ) + output_shape = infer_output_shape( + input_shape, kernel_size, stride, padding, ceil_mode + ) + output_tensor = TestTensor(output_shape, None, dt=tensor_dtype, device=device) + + print( + f"Testing AvgPool on {InfiniDeviceNames[device]} with input_shape: {input_shape}, kernel_size: {kernel_size}, stride: {stride}, padding: {padding}, ceil_mode: {ceil_mode}, dtype: {InfiniDtypeNames[tensor_dtype]}" + ) + + averagepool( + input_tensor.torch_tensor(), + kernel_size, + stride, + padding, + ceil_mode, + output_tensor.torch_tensor(), + ) + + if sync: + sync() + + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateAvgPoolDescriptor( + handle, + ctypes.byref(descriptor), + output_tensor.descriptor, + input_tensor.descriptor, + tuple_to_void_p(kernel_size), + tuple_to_void_p(stride), + tuple_to_void_p(padding), + c_bool(ceil_mode), + ) + ) + + for tensor in [input_tensor, output_tensor]: + if tensor: + tensor.destroy_desc() + + workspace_size = ctypes.c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetAvgPoolWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, output_tensor.device) + + def lib_averagepool(): + check_error( + LIBINFINIOP.infiniopAvgPool( + descriptor, + workspace.data(), + workspace_size.value, + output_tensor.data(), + input_tensor.data(), + None, + ) + ) + + lib_averagepool() + + atol, rtol = get_tolerance(_TOLERANCE_MAP, tensor_dtype) + if DEBUG: + debug( + output_tensor.actual_tensor(), + output_tensor.torch_tensor(), + atol=atol, + rtol=rtol, + ) + + assert torch.allclose( + output_tensor.actual_tensor(), + output_tensor.torch_tensor(), + atol=atol, + rtol=rtol, + ), f"Mismatch for shape {input_shape}, kernel {kernel_size}" + + if PROFILE: + profile_operation( + "PyTorch", + lambda: averagepool( + input_tensor.torch_tensor(), + kernel_size, + stride, + padding, + ceil_mode, + output_tensor.torch_tensor(), + ), + device, + NUM_PRERUN, + NUM_ITERATIONS, + ) + profile_operation( + " lib", lib_averagepool, device, NUM_PRERUN, NUM_ITERATIONS + ) + + check_error(LIBINFINIOP.infiniopDestroyAvgPoolDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + + print("\033[92mTest passed!\033[0m") diff --git a/test/infiniop/batch_norm.py b/test/infiniop/batch_norm.py new file mode 100644 index 000000000..a7b46858f --- /dev/null +++ b/test/infiniop/batch_norm.py @@ -0,0 +1,244 @@ +import torch +import ctypes +from ctypes import c_uint64 +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + profile_operation, + TestWorkspace, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + infiniopOperatorDescriptor_t, +) +from enum import Enum, auto + +_TEST_CASES_ = [ + # shape, momentum, eps + ((13, 4, 5,), 0.1, 1e-5), + ((2, 3, 4), 0.1, 1e-4), + ((15, 16, 17,), 0.2, 1e-5), + ((50, 60, 70), 0.1, 1e-4), +] + +class Inplace(Enum): + OUT_OF_PLACE = auto() + INPLACE = auto() + + +# Inplace options applied for each test case in _TEST_CASES_ +_INPLACE = [ + Inplace.OUT_OF_PLACE, + Inplace.INPLACE, +] + +_TEST_CASES = [ + test_case + (inplace_item,) + for test_case in _TEST_CASES_ + for inplace_item in _INPLACE +] + + +# No implement for INPLACE + + +# Data types used for testing +_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.F32, InfiniDtype.BF16] + +# Tolerance map for different data types +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 1e-2, "rtol": 1e-2}, + InfiniDtype.F32: {"atol": 1e-5, "rtol": 1e-5}, + InfiniDtype.BF16: {"atol": 1e-2, "rtol": 1e-2}, +} + +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + + +def torch_batch_norm( + output: torch.Tensor, + running_mean: torch.Tensor, + running_var: torch.Tensor, + input: torch.Tensor, + weight: torch.Tensor, + bias: torch.Tensor, + init_running_mean: torch.Tensor, + init_running_var: torch.Tensor, + momentum: float, + eps: float +): + bn = torch.nn.BatchNorm1d( + num_features=input.shape[1], + eps=eps, + momentum=momentum, + dtype=input.dtype, + ) + bn.weight.data = weight + bn.bias.data = bias + bn.running_mean.data = init_running_mean + bn.running_var.data = init_running_var + output.copy_(bn(input).detach()) + running_mean.copy_(bn.running_mean.data) + running_var.copy_(bn.running_var.data) + + +def test( + handle, + device, + shape, momentum, eps, + inplace, + dtype, + sync=None, +): + running_mean = TestTensor( + [shape[1]], + None, + dtype, + device, + ) + running_var = TestTensor( + [shape[1]], + None, + dtype, + device, + ) + + input = TestTensor( + shape, + None, + dtype, + device, + ) + if inplace == Inplace.INPLACE: + output = input + else: + output = TestTensor( + shape, + None, + dtype, + device + ) + + weight = TestTensor( + [shape[1]], + None, + dtype, + device, + ) + bias = TestTensor( + [shape[1]], + None, + dtype, + device, + ) + + + print( + f"Testing BatchNorm on {InfiniDeviceNames[device]} with shape:{shape}, inplace:{inplace}, momentum:{momentum}, eps:{eps}," + f"dtype:{InfiniDtypeNames[dtype]}" + ) + + + torch_batch_norm( + output.torch_tensor(), running_mean.torch_tensor(), running_var.torch_tensor(), + input.torch_tensor(), weight.torch_tensor(), bias.torch_tensor(), + running_mean.torch_tensor(), running_var.torch_tensor(), + momentum, eps + ) + + + if sync is not None: + sync() + + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateBatchNormDescriptor( + handle, + ctypes.byref(descriptor), + output.descriptor, + running_mean.descriptor, + running_var.descriptor, + input.descriptor, + weight.descriptor, + bias.descriptor, + momentum, + eps + ) + ) + + # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel + for tensor in [output, running_mean, running_var, input, weight, bias]: + tensor.destroy_desc() + + workspace_size = c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetBatchNormWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, output.device) + + def lib_batch_norm(): + check_error( + LIBINFINIOP.infiniopBatchNorm( + descriptor, + workspace.data(), + workspace.size(), + output.data(), + running_mean.data(), + running_var.data(), + input.data(), + weight.data(), + bias.data(), + None, + ) + ) + + lib_batch_norm() + + atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype) + if DEBUG: + debug(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + debug(running_mean.actual_tensor(), running_mean.torch_tensor(), atol=atol, rtol=rtol) + debug(running_var.actual_tensor(), running_var.torch_tensor(), atol=atol, rtol=rtol) + + + assert torch.allclose(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + assert torch.allclose(running_mean.actual_tensor(), running_mean.torch_tensor(), atol=atol, rtol=rtol) + assert torch.allclose(running_var.actual_tensor(), running_var.torch_tensor(), atol=atol, rtol=rtol) + + + # Profiling workflow + if PROFILE: + # fmt: off + profile_operation("PyTorch", lambda: torch_batch_norm( + output.torch_tensor(), running_mean.torch_tensor(), running_var.torch_tensor(), + input.torch_tensor(), weight.torch_tensor(), bias.torch_tensor(), running_mean.torch_tensor(), running_var.torch_tensor(), momentum, eps + ), device, NUM_PRERUN, NUM_ITERATIONS) + profile_operation(" lib", lambda: lib_batch_norm(), device, NUM_PRERUN, NUM_ITERATIONS) + # fmt: on + check_error(LIBINFINIOP.infiniopDestroyBatchNormDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + + # Configure testing options + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + + print("\033[92mTest my BatchNorm passed!\033[0m") diff --git a/test/infiniop/cross_entropy_loss.py b/test/infiniop/cross_entropy_loss.py new file mode 100644 index 000000000..acc5cadc4 --- /dev/null +++ b/test/infiniop/cross_entropy_loss.py @@ -0,0 +1,213 @@ +import torch +import ctypes +from ctypes import c_uint64 +import numpy as np + +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + profile_operation, + infiniopOperatorDescriptor_t, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + TestWorkspace, + InfiniDeviceEnum, +) +from torch.nn import functional as F + +_TEST_CASES = [ + # Single sample classification + ((10,), 10), + ((200,), 200), + # 2D: (N, C) - batch classification + ((4, 10), 10), + ((8, 5), 5), + ((16, 100), 100), + ((32, 1000), 1000), + ((64, 21), 21), + ((128, 50), 50), + # 3D: (N, C, d1) - sequence classification + ((4, 10, 5), 10), + # 4D: (N, C, d1, d2) - image segmentation + ((2, 8, 8, 8), 8), + # 5D: (N, C, d1, d2, d3) - 3D segmentation + ((3, 10, 10, 20, 30), 10), +] + +_TENSOR_DTYPES = [InfiniDtype.F32, InfiniDtype.F16, InfiniDtype.BF16] +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-3}, + InfiniDtype.F32: {"atol": 1e-5, "rtol": 1e-5}, + InfiniDtype.BF16: {"atol": 1e-2, "rtol": 1e-2}, +} +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + + +def cross_entropy_loss_pytorch(logits, target): + return F.cross_entropy(logits.double(), target.long(), reduction="mean") + + +def test( + handle, + device, + input_shape, + num_classes, + tensor_dtype=InfiniDtype.F32, + sync=None, +): + # 根据输入形状确定logits和target的形状 + if len(input_shape) == 1: + # Shape (C,) - single sample classification + logits_shape = (num_classes,) + target_shape = (1,) # 修改:使用 (1,) 而不是标量 + else: + # Shape (N, C, [d1], [d2], ...) + logits_shape = input_shape + target_shape = (input_shape[0],) + input_shape[2:] + + print( + f"Testing CrossEntropyLoss on {InfiniDeviceNames[device]} with logits_shape: {logits_shape}, target_shape: {target_shape}, dtype:{InfiniDtypeNames[tensor_dtype]}" + ) + + # 创建logits张量 + logits = TestTensor(logits_shape, None, dt=tensor_dtype, device=device) + + # 创建target张量 + target_torch = torch.randint( + 0, + num_classes, + target_shape, + dtype=torch.long, + device=logits.torch_tensor().device, + ) + target = TestTensor.from_torch(target_torch, dt=InfiniDtype.I64, device=device) + + # 创建loss张量 + loss = TestTensor((1,), None, dt=tensor_dtype, device=device) + + # 计算PyTorch参考损失 + if len(input_shape) == 1: + # 对于一维logits,target需要是标量 + target_scalar = target.torch_tensor()[0] + pytorch_loss = cross_entropy_loss_pytorch(logits.torch_tensor(), target_scalar) + else: + pytorch_loss = cross_entropy_loss_pytorch( + logits.torch_tensor(), target.torch_tensor() + ) + + # 将参考结果存储到loss张量 + loss.torch_tensor()[0] = pytorch_loss.to(loss.torch_tensor().dtype) + + if sync: + sync() + + # 创建算子描述符 + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateCrossEntropyLossDescriptor( + handle, + ctypes.byref(descriptor), + loss.descriptor, + logits.descriptor, + target.descriptor, + ) + ) + + # 销毁tensor的描述符以防止内核直接使用 + for tensor in [logits, target, loss]: + tensor.destroy_desc() + + # 获取工作空间大小并创建工作空间 + workspace_size = c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetCrossEntropyLossWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, device) + + # PyTorch参考实现函数 + def torch_cross_entropy(): + if len(input_shape) == 1: + target_scalar = target.torch_tensor()[0] + result = cross_entropy_loss_pytorch(logits.torch_tensor(), target_scalar) + else: + result = cross_entropy_loss_pytorch( + logits.torch_tensor(), target.torch_tensor() + ) + loss.torch_tensor()[0] = result.to(loss.torch_tensor().dtype) + + # InfiniOP实现函数 + def lib_cross_entropy(): + check_error( + LIBINFINIOP.infiniopCrossEntropyLoss( + descriptor, + workspace.data(), + workspace_size.value, + loss.data(), + logits.data(), + target.data(), + None, + ) + ) + + # 执行InfiniOP算子 + lib_cross_entropy() + + if sync: + sync() + + # 验证结果 + atol, rtol = get_tolerance(_TOLERANCE_MAP, tensor_dtype) + actual_loss = loss.actual_tensor()[0] + expected_loss = loss.torch_tensor()[0] + + if DEBUG: + print(f"Expected loss: {expected_loss.item()}") + print(f"Actual loss: {actual_loss.item()}") + if target_shape: + print( + f"Target shape: {target_shape}, first few targets: {target.torch_tensor().flatten()[:5]}" + ) + else: + print(f"Target (scalar): {target.torch_tensor()[0].item()}") + debug(actual_loss, expected_loss, atol=atol, rtol=rtol) + + if not torch.allclose(actual_loss, expected_loss, atol=atol, rtol=rtol): + print("--- ERROR ANALYSIS ---") + print(f"Expected: {expected_loss.item()}, Actual: {actual_loss.item()}") + print(f"Difference: {abs(actual_loss - expected_loss).item()}") + print(f"Tolerance: atol={atol}, rtol={rtol}") + + assert torch.allclose(actual_loss, expected_loss, atol=atol, rtol=rtol) + + # Profile功能 + if PROFILE: + # fmt: off + profile_operation("PyTorch", lambda: torch_cross_entropy(), device, NUM_PRERUN, NUM_ITERATIONS) + profile_operation(" lib", lambda: lib_cross_entropy(), device, NUM_PRERUN, NUM_ITERATIONS) + # fmt: on + + check_error(LIBINFINIOP.infiniopDestroyCrossEntropyLossDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + print("\033[92mAll CrossEntropyLoss tests passed!\033[0m") diff --git a/test/infiniop/exp.py b/test/infiniop/exp.py new file mode 100644 index 000000000..eb139af12 --- /dev/null +++ b/test/infiniop/exp.py @@ -0,0 +1,165 @@ +import torch +import ctypes +from ctypes import c_uint64 +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + get_sync_func, + profile_operation, + TestWorkspace, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + infiniopOperatorDescriptor_t, +) +from enum import Enum, auto + +# ======================================================================== +# Configuration (Internal Use Only) +# ======================================================================== +_TEST_CASES_ = [ + # shape, input_stride, output_stride + ((13, 4), None, None), + ((13, 4), (10, 1), (10, 1)), + ((13, 4), (0, 1), None), + ((13, 4, 4), None, None), + ((13, 4, 4), (20, 4, 1), (20, 4, 1)), + ((13, 4, 4), (4, 0, 1), None), + ((16, 5632), None, None), + ((16, 5632), (10240, 1), (10240, 1)), + ((4, 4, 5632), None, None), + ((4, 4, 5632), (45056, 5632, 1), (45056, 5632, 1)), +] + +class Inplace(Enum): + OUT_OF_PLACE = auto() + INPLACE_INPUT = auto() + +_INPLACE = [ + Inplace.OUT_OF_PLACE, + Inplace.INPLACE_INPUT, +] + +_TEST_CASES = [ + test_case + (inplace,) + for test_case in _TEST_CASES_ + for inplace in _INPLACE +] + +_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.F32, InfiniDtype.BF16] + +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-3}, + InfiniDtype.F32: {"atol": 1e-6, "rtol": 1e-6}, + InfiniDtype.BF16: {"atol": 1e-2, "rtol": 1e-2}, +} + +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + + +def exp(output, input): + output.copy_(torch.exp(input)) + +def test( + handle, + device, + shape, + input_stride=None, + output_stride=None, + inplace=Inplace.OUT_OF_PLACE, + dtype=torch.float16, + sync=None, +): + input = TestTensor(shape, input_stride, dtype, device) + if inplace == Inplace.INPLACE_INPUT: + if input_stride != output_stride: + return + output = input + else: + output = TestTensor(shape, output_stride, dtype, device, mode="ones") + + if output.is_broadcast(): + return + + print( + f"Testing Exp on {InfiniDeviceNames[device]} with shape:{shape} input_stride:{input_stride} output_stride:{output_stride} " + f"dtype:{InfiniDtypeNames[dtype]} inplace:{inplace}" + ) + + exp(output.torch_tensor(), input.torch_tensor()) + + if sync is not None: + sync() + + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateExpDescriptor( + handle, + ctypes.byref(descriptor), + output.descriptor, + input.descriptor, + ) + ) + + # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel + for tensor in [input, output]: + tensor.destroy_desc() + + workspace_size = c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetExpWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, output.device) + + def lib_exp(): + check_error( + LIBINFINIOP.infiniopExp( + descriptor, + workspace.data(), + workspace_size.value, + output.data(), + input.data(), + None, + ) + ) + + lib_exp() + + atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype) + if DEBUG: + debug(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + assert torch.allclose(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + + # Profiling workflow + if PROFILE: + # fmt: off + profile_operation("PyTorch", lambda: exp(output.torch_tensor(), input.torch_tensor()), device, NUM_PRERUN, NUM_ITERATIONS) + profile_operation(" lib", lambda: lib_exp(), device, NUM_PRERUN, NUM_ITERATIONS) + # fmt: on + check_error(LIBINFINIOP.infiniopDestroyExpDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + + # Configure testing options + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + + print("\033[92mTest passed!\033[0m") diff --git a/test/infiniop/gather.py b/test/infiniop/gather.py new file mode 100644 index 000000000..b5c8ea93d --- /dev/null +++ b/test/infiniop/gather.py @@ -0,0 +1,160 @@ +import torch +import ctypes +from ctypes import c_uint64 +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + profile_operation, + TestWorkspace, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + infiniopOperatorDescriptor_t, +) +from enum import Enum, auto + +_TEST_CASES = [ + # input_shape, output_shape, dim, input_strides, output_strides, index_strides + ((2, 3, 7), (2, 3, 5), 2, (177, 17, 1), None, None), + ((10, 5, 4), (10, 4, 4), 1, (30, 5, 1), None, [16, 4, 1]), + ((11, 2, 2, 4), (11, 2, 2, 4), 0, None, (1007, 107, 10, 1), None), + ((11, 20, 20, 13, 37), (11, 20, 20, 13, 37), 1, None, None, None) +] + +# Data types used for testing +_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.F32, InfiniDtype.BF16] + +# Tolerance map for different data types +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 0, "rtol": 0}, + InfiniDtype.F32: {"atol": 0, "rtol": 0}, + InfiniDtype.BF16: {"atol": 0, "rtol": 0}, +} + +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + + +def torch_gather(output, input, dim, index): + torch.gather(input, dim, index, out=output) + +def test( + handle, + device, + input_shape, output_shape, dim, input_strides, output_strides, index_strides, + dtype, + sync=None, +): + print( + f"Testing Gather on {InfiniDeviceNames[device]} with input shape:{input_shape}, dim:{dim}, output_shape:{output_shape}," + f"dtype:{InfiniDtypeNames[dtype]}" + ) + + input = TestTensor( + input_shape, + input_strides, + dtype, + device + ) + torch_index = torch.randint(low=0, high=input_shape[dim], size=output_shape, dtype=torch.int64) + if index_strides: + torch_index = torch_index.as_strided(output_shape, index_strides) + index = TestTensor( + output_shape, + torch_index.stride(), + InfiniDtype.I64, + device, + "manual", + set_tensor=torch_index + ) + output = TestTensor( + output_shape, + output_strides, + dtype, + device, + ) + + torch_gather(output.torch_tensor(), input.torch_tensor(), dim, index.torch_tensor()) + + if sync is not None: + sync() + + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateGatherDescriptor( + handle, + ctypes.byref(descriptor), + output.descriptor, + input.descriptor, + index.descriptor, + dim + ) + ) + + # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel + for tensor in [input, output, index]: + tensor.destroy_desc() + + workspace_size = c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetGatherWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, input.device) + + def lib_gather(): + check_error( + LIBINFINIOP.infiniopGather( + descriptor, + workspace.data(), + workspace.size(), + output.data(), + input.data(), + index.data(), + None, + ) + ) + + lib_gather() + + atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype) + if DEBUG: + debug(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + # print("x:", input.torch_tensor()) + # print("CALCULATED:\n", output.actual_tensor(), ) + # print("GT\n", output.torch_tensor()) + assert torch.allclose(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + + # Profiling workflow + if PROFILE: + # fmt: off + profile_operation("PyTorch", lambda: torch_gather( + output.torch_tensor(), input.torch_tensor(), dim, index.torch_tensor() + ), device, NUM_PRERUN, NUM_ITERATIONS) + profile_operation(" lib", lambda: lib_gather(), device, NUM_PRERUN, NUM_ITERATIONS) + # fmt: on + check_error(LIBINFINIOP.infiniopDestroyGatherDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + + # Configure testing options + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + + print("\033[92mTest my Gather passed!\033[0m") diff --git a/test/infiniop/hardswish.py b/test/infiniop/hardswish.py new file mode 100644 index 000000000..424b30567 --- /dev/null +++ b/test/infiniop/hardswish.py @@ -0,0 +1,167 @@ +import torch +import ctypes +from ctypes import c_uint64 +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + get_sync_func, + profile_operation, + TestWorkspace, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + infiniopOperatorDescriptor_t, +) +from enum import Enum, auto + +# ======================================================================== +# Configuration (Internal Use Only) +# ======================================================================== +_TEST_CASES_ = [ + # shape, input_stride, output_stride + ((13, 4), None, None), + ((13, 4), (10, 1), (10, 1)), + ((13, 4), (0, 1), None), + ((13, 4, 4), None, None), + ((13, 4, 4), (20, 4, 1), (20, 4, 1)), + ((13, 4, 4), (4, 0, 1), None), + ((16, 5632), None, None), + ((16, 5632), (10240, 1), (10240, 1)), + ((4, 4, 5632), None, None), + ((4, 4, 5632), (45056, 5632, 1), (45056, 5632, 1)), +] + +class Inplace(Enum): + OUT_OF_PLACE = auto() + INPLACE_INPUT = auto() + +_INPLACE = [ + Inplace.OUT_OF_PLACE, + Inplace.INPLACE_INPUT, +] + +_TEST_CASES = [ + test_case + (inplace,) + for test_case in _TEST_CASES_ + for inplace in _INPLACE +] + +_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.F32, InfiniDtype.BF16] + +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-3}, + InfiniDtype.F32: {"atol": 1e-6, "rtol": 1e-6}, + InfiniDtype.BF16: {"atol": 1e-2, "rtol": 1e-2}, +} + +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + + +def hardswish(output, input): + output.copy_(input * torch.clamp(input + 3, min=0, max=6) / 6) + + +def test( + handle, + device, + shape, + input_stride=None, + output_stride=None, + inplace=Inplace.OUT_OF_PLACE, + dtype=torch.float16, + sync=None, +): + input = TestTensor(shape, input_stride, dtype, device) + if inplace == Inplace.INPLACE_INPUT: + if input_stride != output_stride: + return + output = input + else: + output = TestTensor(shape, output_stride, dtype, device, mode="ones") + + if output.is_broadcast(): + return + + print( + f"Testing Hardswish on {InfiniDeviceNames[device]} with shape:{shape} input_stride:{input_stride} output_stride:{output_stride} " + f"dtype:{InfiniDtypeNames[dtype]} inplace:{inplace}" + ) + + hardswish(output.torch_tensor(), input.torch_tensor()) + + if sync is not None: + sync() + + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateHardswishDescriptor( + handle, + ctypes.byref(descriptor), + output.descriptor, + input.descriptor, + ) + ) + + # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel + for tensor in [input, output]: + tensor.destroy_desc() + + workspace_size = c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetHardswishWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, output.device) + + def lib_hardswish(): + check_error( + LIBINFINIOP.infiniopHardswish( + descriptor, + workspace.data(), + workspace_size.value, + output.data(), + input.data(), + None, + ) + ) + + lib_hardswish() + + atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype) + if DEBUG: + debug(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + + assert torch.allclose(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + + # Profiling workflow + if PROFILE: + # fmt: off + profile_operation("PyTorch", lambda: hardswish(output.torch_tensor(), input.torch_tensor()), device, NUM_PRERUN, NUM_ITERATIONS) + profile_operation(" lib", lambda: lib_hardswish(), device, NUM_PRERUN, NUM_ITERATIONS) + # fmt: on + check_error(LIBINFINIOP.infiniopDestroyHardswishDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + + # Configure testing options + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + + print("\033[92mTest passed!\033[0m") diff --git a/test/infiniop/index_copy_inplace.py b/test/infiniop/index_copy_inplace.py new file mode 100644 index 000000000..97dbd8266 --- /dev/null +++ b/test/infiniop/index_copy_inplace.py @@ -0,0 +1,180 @@ +import torch +import ctypes +from ctypes import c_uint64 +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + profile_operation, + TestWorkspace, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + infiniopOperatorDescriptor_t, +) +from enum import Enum, auto +import random + + +class Inplace(Enum): + OUT_OF_PLACE = auto() + INPLACE = auto() + +_TEST_CASES = [ + # input_shape, output_shape, dim, output_strides, input_strides, + ([13, 1], [13, 4], 1, [37, 1], [37, 1], Inplace.OUT_OF_PLACE), + ([1333, 4], [1333, 4], 0, [1, 1333], [1, 2333], Inplace.INPLACE), + ([1333, 4], [1333, 4], 0, [1, 1333], [1, 2333], Inplace.OUT_OF_PLACE), + ([133, 23, 53], [133, 23, 53], 1, None, None, Inplace.OUT_OF_PLACE), + ([133, 23, 13, 53], [133, 23, 13, 53], 2, None, None, Inplace.OUT_OF_PLACE), +] + +# Data types used for testing +_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.F32, InfiniDtype.BF16] + +# Tolerance map for different data types +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 0, "rtol": 0}, + InfiniDtype.F32: {"atol": 0, "rtol": 0}, + InfiniDtype.BF16: {"atol": 0, "rtol": 0}, +} + + +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + + +def torch_index_copy_inplace(output, input, index, dim): + output.index_copy_(dim, index, input.clone()) + + +def test( + handle, + device, + input_shape, output_shape, dim, output_strides, input_strides, + inplace, + dtype, + sync=None, +): + print( + f"Testing index_copy_inplace on {InfiniDeviceNames[device]} with shape:{input_shape}," + f"inplace:{inplace}," + f"dtype:{InfiniDtypeNames[dtype]}" + ) + + input = TestTensor( + input_shape, + input_strides, + dtype, + device, + ) + if inplace == Inplace.INPLACE: + assert output_shape == input_shape + output = input + else: + output = TestTensor( + output_shape, + output_strides, + dtype, + device, + "zeros", + ) + + index_list = list(range(output_shape[dim])) + + random.shuffle(index_list) + torch_index = torch.tensor(index_list[:input_shape[dim]], dtype=torch.int64) + index = TestTensor( + [input_shape[dim]], + torch_index.stride(), + InfiniDtype.I64, + device, + "manual", + set_tensor=torch_index + ) + + torch_index_copy_inplace(output.torch_tensor(), input.torch_tensor(), index.torch_tensor(), dim) + + if sync is not None: + sync() + + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateIndexCopyInplaceDescriptor( + handle, + ctypes.byref(descriptor), + output.descriptor, + input.descriptor, + index.descriptor, + dim, + ) + ) + + # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel + for tensor in [output, input, index]: + tensor.destroy_desc() + + workspace_size = c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetIndexCopyInplaceWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, output.device) + + def lib_index_copy_inplace(): + check_error( + LIBINFINIOP.infiniopIndexCopyInplace( + descriptor, + workspace.data(), + workspace.size(), + output.data(), + input.data(), + index.data(), + None, + ) + ) + + lib_index_copy_inplace() + + atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype) + if DEBUG: + debug(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + # print('input:\n', input.torch_tensor()) + # print('index:\n', index.torch_tensor()) + # print('output:\n', output.torch_tensor(), '\n', output.actual_tensor(), ) + + + assert torch.allclose(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + + # Profiling workflow + if PROFILE: + # fmt: off + profile_operation("PyTorch", lambda: torch_index_copy_inplace( + output.torch_tensor(), input.torch_tensor(), index.torch_tensor(), dim + ), device, NUM_PRERUN, NUM_ITERATIONS) + profile_operation(" lib", lambda: lib_index_copy_inplace(), device, NUM_PRERUN, NUM_ITERATIONS) + # fmt: on + check_error(LIBINFINIOP.infiniopDestroyIndexCopyInplaceDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + + # Configure testing options + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + + print("\033[92mTest my index_copy_inplace passed!\033[0m") diff --git a/test/infiniop/interpolate_nearest.py b/test/infiniop/interpolate_nearest.py new file mode 100644 index 000000000..0440fdfec --- /dev/null +++ b/test/infiniop/interpolate_nearest.py @@ -0,0 +1,265 @@ +import torch +import ctypes +from ctypes import c_uint64 + +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + profile_operation, + TestWorkspace, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + infiniopOperatorDescriptor_t, +) +from enum import Enum, auto +from typing import List, Tuple +import math +from torch.nn import functional as F + +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + +# Test cases: (input_shape, input_stride, output_shape, output_stride) +_TEST_CASES = [ + # 2D test cases - simplified to one line each + ((1, 1, 2, 2), None, (1, 1, 4, 4), None), # Simple contiguous case + ((1, 3, 4, 4), (48, 16, 4, 1), (1, 3, 8, 8), (192, 64, 8, 1)), # 2D upscaling 2x + ((1, 3, 8, 8), (192, 64, 8, 1), (1, 3, 4, 4), (48, 16, 4, 1)), # 2D downscaling 2x + ((2, 4, 2, 2), (16, 4, 2, 1), (2, 4, 6, 6), (144, 36, 6, 1)), # Batch upscaling + ( + (1, 1, 3, 5), + (15, 15, 5, 1), + (1, 1, 9, 10), + (90, 90, 10, 1), + ), # Different aspect ratio + ( + (4, 64, 16, 16), + (16384, 256, 16, 1), + (4, 64, 32, 32), + (65536, 1024, 32, 1), + ), # Large batch + ((1, 1, 1, 1), (1, 1, 1, 1), (1, 1, 7, 7), (49, 49, 7, 1)), # Small to large + ( + (1, 2, 3, 4), + (24, 1, 8, 2), + (1, 2, 6, 8), + (96, 1, 16, 2), + ), # Non-contiguous layout + ((2, 3, 2, 2), (32, 8, 4, 1), (2, 3, 4, 4), (128, 32, 8, 1)), # Padded strides + # 1D test cases + ((1, 3, 8), (24, 8, 1), (1, 3, 16), (48, 16, 1)), # 1D upscaling 2x + ((2, 5, 10), (50, 10, 1), (2, 5, 5), (25, 5, 1)), # 1D downscaling 2x + ((4, 2, 32), (64, 32, 1), (4, 2, 64), (128, 64, 1)), # 1D larger upscaling + # 3D test cases + ( + (1, 2, 2, 2, 2), + (16, 8, 4, 2, 1), + (1, 2, 4, 4, 4), + (128, 64, 16, 4, 1), + ), # 3D upscaling 2x + ( + (1, 1, 2, 3, 4), + (24, 24, 12, 4, 1), + (1, 1, 4, 6, 8), + (192, 192, 48, 8, 1), + ), # 3D uniform upscaling + ( + (3, 2, 5, 5, 5), + (250, 125, 25, 5, 1), + (3, 2, 3, 3, 3), + (54, 27, 9, 3, 1), + ), # 3D non-uniform scaling +] + +# Data types used for testing +_TENSOR_DTYPES = [InfiniDtype.F32, InfiniDtype.F16, InfiniDtype.BF16, InfiniDtype.I8] + +# Tolerance map for different data types +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-3}, + InfiniDtype.F32: {"atol": 1e-4, "rtol": 1e-4}, + InfiniDtype.BF16: {"atol": 1e-2, "rtol": 1e-2}, + InfiniDtype.I8: {"atol": 0, "rtol": 0}, +} + +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + + +def interpolate_nearest(input_tensor, output_shape, output_tensor): + """ + Perform nearest neighbor interpolation using PyTorch as reference + """ + # Extract spatial dimensions (H, W) + target_size = output_shape[2:] # Skip batch and channel dimensions + + # Use PyTorch's interpolate function with nearest mode + if input_tensor.dtype in [ + torch.int8, + torch.uint8, + torch.int16, + torch.int32, + torch.int64, + ]: + # 对于整数类型,先转换为 float32,进行插值,再转换回原类型 + original_dtype = input_tensor.dtype + + # 转换为 float32 进行插值 + float_input = input_tensor.float() + result = F.interpolate(float_input, size=target_size, mode="nearest") + + # 转换回原始类型 + result = result.to(original_dtype) + else: + result = F.interpolate(input_tensor, size=target_size, mode="nearest") + + output_tensor.copy_(result) + + +def test( + handle, + device, + input_shape, + input_stride, + output_shape, + output_stride, + tensor_dtype=InfiniDtype.F16, + sync=None, +): + # Create input and output tensors + # For I8 type, use appropriate randint range (-128 to 127) and don't use scale + if tensor_dtype == InfiniDtype.I8: + input_tensor = TestTensor( + input_shape, input_stride, dt=tensor_dtype, device=device, + randint_low=-128, randint_high=128 + ) + output_tensor = TestTensor( + output_shape, output_stride, dt=tensor_dtype, device=device, + randint_low=-128, randint_high=128 + ) + else: + input_tensor = TestTensor( + input_shape, input_stride, dt=tensor_dtype, device=device, scale=1.0 + ) + output_tensor = TestTensor( + output_shape, output_stride, dt=tensor_dtype, device=device + ) + + print( + f"Testing InterpolateNearest on {InfiniDeviceNames[device]} with " + f"input_shape: {input_shape}, output_shape: {output_shape}, " + f"input_stride: {input_stride}, output_stride: {output_stride}, " + f"dtype: {InfiniDtypeNames[tensor_dtype]}" + ) + + # Compute reference result using PyTorch + interpolate_nearest( + input_tensor.torch_tensor(), output_shape, output_tensor.torch_tensor() + ) + + if sync is not None: + sync() + + # Create descriptor for our interpolate_nearest operator + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateInterpolateNearestDescriptor( + handle, + ctypes.byref(descriptor), + output_tensor.descriptor, + input_tensor.descriptor, + ) + ) + + # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel + for tensor in [input_tensor, output_tensor]: + if tensor is not None: + tensor.destroy_desc() + + # Get workspace size + workspace_size = ctypes.c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetInterpolateNearestWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, output_tensor.device) + + def lib_interpolate_nearest(): + check_error( + LIBINFINIOP.infiniopInterpolateNearest( + descriptor, + workspace.data(), + workspace_size.value, + output_tensor.data(), + input_tensor.data(), + None, + ) + ) + + # Execute the operation + lib_interpolate_nearest() + + # Check results + atol, rtol = get_tolerance(_TOLERANCE_MAP, tensor_dtype) + if DEBUG: + debug( + output_tensor.actual_tensor(), + output_tensor.torch_tensor(), + atol=atol, + rtol=rtol, + ) + + assert torch.allclose( + output_tensor.actual_tensor(), + output_tensor.torch_tensor(), + atol=atol, + rtol=rtol, + ), f"Results don't match for shape {input_shape} -> {output_shape}" + + # Profiling workflow + if PROFILE: + profile_operation( + "PyTorch", + lambda: interpolate_nearest( + input_tensor.torch_tensor(), output_shape, output_tensor.torch_tensor() + ), + device, + NUM_PRERUN, + NUM_ITERATIONS, + ) + profile_operation( + " lib", + lambda: lib_interpolate_nearest(), + device, + NUM_PRERUN, + NUM_ITERATIONS, + ) + + # Clean up + check_error(LIBINFINIOP.infiniopDestroyInterpolateNearestDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + + # Configure testing options + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + + print("\033[92mTest passed!\033[0m") diff --git a/test/infiniop/libinfiniop/op_register.py b/test/infiniop/libinfiniop/op_register.py index d3edfbbab..9a3be7e71 100644 --- a/test/infiniop/libinfiniop/op_register.py +++ b/test/infiniop/libinfiniop/op_register.py @@ -4,7 +4,7 @@ infiniopOperatorDescriptor_t, ) -from ctypes import c_int32, c_void_p, c_size_t, POINTER, c_float +from ctypes import c_int32, c_void_p, c_size_t, POINTER, c_float, c_bool class OpRegister: @@ -1146,3 +1146,322 @@ def paged_attention_prefill_(lib): lib.infiniopDestroyPagedAttentionPrefillDescriptor.argtypes = [ infiniopOperatorDescriptor_t, ] + + +@OpRegister.operator +def averagepool_(lib): + lib.infiniopCreateAvgPoolDescriptor.restype = c_int32 + lib.infiniopCreateAvgPoolDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + c_void_p, + c_void_p, + c_void_p, + c_bool, + ] + lib.infiniopGetAvgPoolWorkspaceSize.restype = c_int32 + lib.infiniopGetAvgPoolWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopAvgPool.restype = c_int32 + lib.infiniopAvgPool.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyAvgPoolDescriptor.restype = c_int32 + lib.infiniopDestroyAvgPoolDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] + + +@OpRegister.operator +def batch_norm_(lib): + lib.infiniopCreateBatchNormDescriptor.restype = c_int32 + lib.infiniopCreateBatchNormDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + c_float, + c_float, + ] + lib.infiniopGetBatchNormWorkspaceSize.restype = c_int32 + lib.infiniopGetBatchNormWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopBatchNorm.restype = c_int32 + lib.infiniopBatchNorm.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + c_void_p, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyBatchNormDescriptor.restype = c_int32 + lib.infiniopDestroyBatchNormDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] + + +@OpRegister.operator +def cross_entropy_loss_(lib): + lib.infiniopCreateCrossEntropyLossDescriptor.restype = c_int32 + lib.infiniopCreateCrossEntropyLossDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + ] + lib.infiniopGetCrossEntropyLossWorkspaceSize.restype = c_int32 + lib.infiniopGetCrossEntropyLossWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopCrossEntropyLoss.restype = c_int32 + lib.infiniopCrossEntropyLoss.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyCrossEntropyLossDescriptor.restype = c_int32 + lib.infiniopDestroyCrossEntropyLossDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] + + +@OpRegister.operator +def exp_(lib): + lib.infiniopCreateExpDescriptor.restype = c_int32 + lib.infiniopCreateExpDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + ] + lib.infiniopGetExpWorkspaceSize.restype = c_int32 + lib.infiniopGetExpWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopExp.restype = c_int32 + lib.infiniopExp.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyExpDescriptor.restype = c_int32 + lib.infiniopDestroyExpDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] + + +@OpRegister.operator +def gather_(lib): + lib.infiniopCreateGatherDescriptor.restype = c_int32 + lib.infiniopCreateGatherDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + c_size_t, + ] + lib.infiniopGetGatherWorkspaceSize.restype = c_int32 + lib.infiniopGetGatherWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopGather.restype = c_int32 + lib.infiniopGather.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyGatherDescriptor.restype = c_int32 + lib.infiniopDestroyGatherDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] + + +@OpRegister.operator +def hardswish_(lib): + lib.infiniopCreateHardswishDescriptor.restype = c_int32 + lib.infiniopCreateHardswishDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + ] + lib.infiniopGetHardswishWorkspaceSize.restype = c_int32 + lib.infiniopGetHardswishWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopHardswish.restype = c_int32 + lib.infiniopHardswish.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyHardswishDescriptor.restype = c_int32 + lib.infiniopDestroyHardswishDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] + + +@OpRegister.operator +def index_copy_inplace_(lib): + lib.infiniopCreateIndexCopyInplaceDescriptor.restype = c_int32 + lib.infiniopCreateIndexCopyInplaceDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + c_size_t, + ] + lib.infiniopGetIndexCopyInplaceWorkspaceSize.restype = c_int32 + lib.infiniopGetIndexCopyInplaceWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopIndexCopyInplace.restype = c_int32 + lib.infiniopIndexCopyInplace.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyIndexCopyInplaceDescriptor.restype = c_int32 + lib.infiniopDestroyIndexCopyInplaceDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] + + +@OpRegister.operator +def interpolate_nearest_(lib): + lib.infiniopCreateInterpolateNearestDescriptor.restype = c_int32 + lib.infiniopCreateInterpolateNearestDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + ] + lib.infiniopGetInterpolateNearestWorkspaceSize.restype = c_int32 + lib.infiniopGetInterpolateNearestWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopInterpolateNearest.restype = c_int32 + lib.infiniopInterpolateNearest.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyInterpolateNearestDescriptor.restype = c_int32 + lib.infiniopDestroyInterpolateNearestDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] + + +@OpRegister.operator +def maxpool_(lib): + lib.infiniopCreateMaxPoolDescriptor.restype = c_int32 + lib.infiniopCreateMaxPoolDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + c_void_p, + c_void_p, + c_void_p, + c_bool, + ] + lib.infiniopGetMaxPoolWorkspaceSize.restype = c_int32 + lib.infiniopGetMaxPoolWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopMaxPool.restype = c_int32 + lib.infiniopMaxPool.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyMaxPoolDescriptor.restype = c_int32 + lib.infiniopDestroyMaxPoolDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] + + +@OpRegister.operator +def scatter_(lib): + lib.infiniopCreateScatterDescriptor.restype = c_int32 + lib.infiniopCreateScatterDescriptor.argtypes = [ + infiniopHandle_t, + POINTER(infiniopOperatorDescriptor_t), + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + infiniopTensorDescriptor_t, + c_size_t, + ] + lib.infiniopGetScatterWorkspaceSize.restype = c_int32 + lib.infiniopGetScatterWorkspaceSize.argtypes = [ + infiniopOperatorDescriptor_t, + POINTER(c_size_t), + ] + lib.infiniopScatter.restype = c_int32 + lib.infiniopScatter.argtypes = [ + infiniopOperatorDescriptor_t, + c_void_p, + c_size_t, + c_void_p, + c_void_p, + c_void_p, + c_void_p, + ] + lib.infiniopDestroyScatterDescriptor.restype = c_int32 + lib.infiniopDestroyScatterDescriptor.argtypes = [ + infiniopOperatorDescriptor_t, + ] diff --git a/test/infiniop/maxpool.py b/test/infiniop/maxpool.py new file mode 100644 index 000000000..81ddce060 --- /dev/null +++ b/test/infiniop/maxpool.py @@ -0,0 +1,242 @@ +import torch +import ctypes +from ctypes import c_uint64, c_bool + +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + profile_operation, + TestWorkspace, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + infiniopOperatorDescriptor_t, +) +from enum import Enum, auto +from typing import List, Tuple +import math +from torch.nn import functional as F + +# Configuration for profiling +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + +# Test cases: (input_shape, input_stride, kernel_size, stride, padding, ceil_mode) +_TEST_CASES = [ + # 1D max pooling cases + ((1, 3, 8), None, (2,), (2,), (0,), False), + ((2, 4, 16), None, (3,), (2,), (1,), False), + ((3, 2, 77), None, (6,), (4,), (3,), True), + # 2D max pooling cases + ((1, 1, 4, 4), None, (2, 2), (2, 2), (0, 0), False), + ((2, 3, 8, 8), None, (3, 3), (2, 2), (1, 1), False), + ((1, 64, 32, 32), None, (2, 2), (2, 2), (0, 0), False), + ((4, 128, 16, 16), None, (3, 3), (1, 1), (1, 1), False), + # 3D max pooling cases + ((1, 1, 4, 4, 4), None, (2, 2, 2), (2, 2, 2), (0, 0, 0), False), + ((2, 2, 8, 8, 8), None, (2, 3, 3), (2, 2, 2), (0, 1, 1), False), + # Cases with ceil_mode=True + ((1, 1, 7, 7), None, (3, 3), (2, 2), (1, 1), True), + ((1, 2, 5), None, (3,), (2,), (0,), True), +] + +# Data types used for testing +_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.F32, InfiniDtype.BF16] + +# Tolerance map for different data types +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-3}, + InfiniDtype.F32: {"atol": 1e-4, "rtol": 1e-4}, + InfiniDtype.BF16: {"atol": 1e-2, "rtol": 1e-2}, +} + + +def max_pool(input_tensor, kernel_size, stride, padding, ceil_mode): + """ + Perform max pooling using PyTorch as reference + """ + ndim = len(input_tensor.shape) - 2 # Spatial dimensions + + if ndim == 1: + result = F.max_pool1d( + input_tensor, + kernel_size=kernel_size[0], + stride=stride[0], + padding=padding[0], + ceil_mode=ceil_mode, + ) + elif ndim == 2: + result = F.max_pool2d( + input_tensor, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ceil_mode=ceil_mode, + ) + elif ndim == 3: + result = F.max_pool3d( + input_tensor, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ceil_mode=ceil_mode, + ) + else: + raise ValueError(f"Unsupported spatial dimensions: {ndim}") + + return result + + +def tuple_to_void_p(py_tuple: Tuple): + """Convert a python tuple to a ctype void pointer""" + array = ctypes.c_uint64 * len(py_tuple) + data_array = array(*py_tuple) + return ctypes.cast(data_array, ctypes.c_void_p) + + +def test( + handle, + device, + input_shape, + input_stride, + kernel_size, + stride, + padding, + ceil_mode, + tensor_dtype=InfiniDtype.F16, + sync=None, +): + # Create input tensor + input_tensor = TestTensor( + input_shape, input_stride, dt=tensor_dtype, device=device, scale=1.0 + ) + + # Compute reference result using PyTorch + torch_ref_output = max_pool( + input_tensor.torch_tensor(), + kernel_size, + stride, + padding, + ceil_mode, + ) + + # Use PyTorch输出shape来初始化output_tensor + output_tensor = TestTensor( + torch_ref_output.shape, None, dt=tensor_dtype, device=device + ) + + print( + f"Testing MaxPool on {InfiniDeviceNames[device]} with " + f"input_shape: {input_shape}, kernel_size: {kernel_size}, " + f"stride: {stride}, padding: {padding}, ceil_mode: {ceil_mode}, " + f"dtype: {InfiniDtypeNames[tensor_dtype]}" + ) + + if sync is not None: + sync() + + # Create descriptor for our max pool operator + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateMaxPoolDescriptor( + handle, + ctypes.byref(descriptor), + output_tensor.descriptor, + input_tensor.descriptor, + tuple_to_void_p(kernel_size), + tuple_to_void_p(stride), + tuple_to_void_p(padding), + c_bool(ceil_mode), + ) + ) + + # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel + for tensor in [input_tensor, output_tensor]: + if tensor is not None: + tensor.destroy_desc() + + # Get workspace size + workspace_size = ctypes.c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetMaxPoolWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, output_tensor.device) + + def lib_max_pool(): + check_error( + LIBINFINIOP.infiniopMaxPool( + descriptor, + workspace.data(), + workspace_size.value, + output_tensor.data(), + input_tensor.data(), + None, + ) + ) + + # Execute the operation + lib_max_pool() + + # Check results + atol, rtol = get_tolerance(_TOLERANCE_MAP, tensor_dtype) + if DEBUG: + debug( + output_tensor.actual_tensor(), + torch_ref_output, + atol=atol, + rtol=rtol, + ) + + assert torch.allclose( + output_tensor.actual_tensor(), + torch_ref_output, + atol=atol, + rtol=rtol, + ), f"Results don't match for input_shape {input_shape}, kernel_size {kernel_size}" + + # Profiling workflow + if PROFILE: + profile_operation( + "PyTorch", + lambda: max_pool( + input_tensor.torch_tensor(), + kernel_size, + stride, + padding, + ceil_mode, + ), + device, + NUM_PRERUN, + NUM_ITERATIONS, + ) + profile_operation( + " lib", lambda: lib_max_pool(), device, NUM_PRERUN, NUM_ITERATIONS + ) + + # Clean up + check_error(LIBINFINIOP.infiniopDestroyMaxPoolDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + + # Configure testing options + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + + print("\033[92mTest passed!\033[0m") diff --git a/test/infiniop/scatter.py b/test/infiniop/scatter.py new file mode 100644 index 000000000..86ccdcdeb --- /dev/null +++ b/test/infiniop/scatter.py @@ -0,0 +1,196 @@ +import torch +import ctypes +from ctypes import c_uint64 +from libinfiniop import ( + LIBINFINIOP, + TestTensor, + get_test_devices, + check_error, + test_operator, + get_args, + debug, + get_tolerance, + profile_operation, + TestWorkspace, + InfiniDtype, + InfiniDtypeNames, + InfiniDeviceNames, + infiniopOperatorDescriptor_t, +) +from enum import Enum, auto +import random + +_TEST_CASES = [ + # input_shape, index_shape, output_shape, dim, input_strides, output_strides, index_strides + ((6, 7), (6, 7), (6, 7), 1, (7, 1), (1, 7), None), + ((2, 3, 7), (2, 3, 5), (2, 3, 5), 2, (1, 2, 6), None, None), + ((10, 5, 4), (10, 4, 4), (10, 4, 4), 1, None, None, [16, 4, 1]), + ((11, 2, 2, 4), (11, 2, 2, 4), (11, 2, 2, 4), 0, None, [16, 8, 4, 1], None), +] + + +_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.F32, InfiniDtype.BF16] + +# Tolerance map for different data types +_TOLERANCE_MAP = { + InfiniDtype.F16: {"atol": 0, "rtol": 0}, + InfiniDtype.F32: {"atol": 0, "rtol": 0}, + InfiniDtype.BF16: {"atol": 0, "rtol": 0}, +} + +DEBUG = False +PROFILE = False +NUM_PRERUN = 10 +NUM_ITERATIONS = 1000 + + +def torch_scatter(output: torch.Tensor, input, index, dim): + output.scatter_(dim, index, src=input) + + +def test( + handle, + device, + input_shape, index_shape, output_shape, dim, input_strides, output_strides, index_strides, + dtype, + sync=None, +): + print( + f"Testing scatter on {InfiniDeviceNames[device]} with input_shape:{input_shape}, index_shape:{index_shape}, output_shape:{output_shape}, dim:{dim}," + f"dtype:{InfiniDtypeNames[dtype]}" + ) + + output = TestTensor( + output_shape, + output_strides, + dtype, + device, + "zeros", + ) + + input = TestTensor( + input_shape, + input_strides, + dtype, + device, + ) + + def get_test_index_tensor(input_shape, index_shape, output_shape, scatter_dim): + index = torch.empty(index_shape, dtype=torch.int64) + ndim = len(input_shape) + if ndim == 2 and scatter_dim == 1: + for i in range(input.shape[0]): + row = list(range(output_shape[dim])) + random.shuffle(row) + index[i, :] = torch.tensor(row[:index_shape[dim]]).type(torch.float64) + elif ndim == 3 and scatter_dim == 2: + for i in range(input.shape[0]): + for j in range(input.shape[1]): + row = list(range(output_shape[dim])) + random.shuffle(row) + index[i, j, :] = torch.tensor(row[:index_shape[dim]]).type(torch.float64) + elif ndim == 3 and scatter_dim == 1: + for i in range(input.shape[0]): + for j in range(input.shape[2]): + row = list(range(output_shape[dim])) + random.shuffle(row) + index[i, :, j] = torch.tensor(row[:index_shape[dim]]).type(torch.float64) + elif ndim == 4 and scatter_dim == 0: + for i in range(input.shape[1]): + for j in range(input.shape[2]): + for k in range(input.shape[3]): + row = list(range(output_shape[dim])) + random.shuffle(row) + index[:, i, j, k] = torch.tensor(row[:index_shape[dim]]).type(torch.float64) + return index + + torch_index = get_test_index_tensor(input_shape, index_shape, output_shape, dim).type(torch.int64) + if index_strides: + torch_index = torch_index.as_strided(index_shape, index_strides) + index = TestTensor( + index_shape, + torch_index.stride(), + InfiniDtype.I64, + device, + "manual", + set_tensor=torch_index + ) + + torch_scatter(output.torch_tensor(), input.torch_tensor(), index.torch_tensor(), dim) + + if sync is not None: + sync() + + descriptor = infiniopOperatorDescriptor_t() + check_error( + LIBINFINIOP.infiniopCreateScatterDescriptor( + handle, + ctypes.byref(descriptor), + output.descriptor, + input.descriptor, + index.descriptor, + dim, + ) + ) + + # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel + for tensor in [output, input, index]: + tensor.destroy_desc() + + workspace_size = c_uint64(0) + check_error( + LIBINFINIOP.infiniopGetScatterWorkspaceSize( + descriptor, ctypes.byref(workspace_size) + ) + ) + workspace = TestWorkspace(workspace_size.value, output.device) + + def lib_scatter(): + check_error( + LIBINFINIOP.infiniopScatter( + descriptor, + workspace.data(), + workspace.size(), + output.data(), + input.data(), + index.data(), + None, + ) + ) + + lib_scatter() + + atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype) + if DEBUG: + debug(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + # print('input:\n', input.torch_tensor()) + # print('index:\n', index.torch_tensor()) + # print('output:\n', output.torch_tensor(), '\n', output.actual_tensor(), ) + + + assert torch.allclose(output.actual_tensor(), output.torch_tensor(), atol=atol, rtol=rtol) + + # Profiling workflow + if PROFILE: + # fmt: off + profile_operation("PyTorch", lambda: torch_scatter( + output.torch_tensor(), input.torch_tensor(), index.torch_tensor(), dim + ), device, NUM_PRERUN, NUM_ITERATIONS) + profile_operation(" lib", lambda: lib_scatter(), device, NUM_PRERUN, NUM_ITERATIONS) + # fmt: on + check_error(LIBINFINIOP.infiniopDestroyScatterDescriptor(descriptor)) + + +if __name__ == "__main__": + args = get_args() + + # Configure testing options + DEBUG = args.debug + PROFILE = args.profile + NUM_PRERUN = args.num_prerun + NUM_ITERATIONS = args.num_iterations + + for device in get_test_devices(args): + test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES) + + print("\033[92mTest my scatter passed!\033[0m")