|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#pragma once |
| 10 | + |
| 11 | +#include <executorch/backends/aoti/utils.h> |
| 12 | +#include <executorch/backends/apple/metal/runtime/shims/types.h> |
| 13 | +#include <executorch/runtime/core/exec_aten/exec_aten.h> |
| 14 | +#include <cstdint> |
| 15 | + |
| 16 | +namespace executorch { |
| 17 | +namespace backends { |
| 18 | +namespace metal { |
| 19 | + |
| 20 | +// Enum for supported data types in et-metal backend |
| 21 | +enum class SupportedDTypes : int32_t { |
| 22 | + // UINT8 = 0, // PyTorch's uint8 dtype code |
| 23 | + // INT8 = 1, // PyTorch's int8 dtype code |
| 24 | + // INT16 = 2, // PyTorch's int16 dtype code |
| 25 | + // INT32 = 3, // PyTorch's int32 dtype code |
| 26 | + INT64 = 4, // PyTorch's int64 dtype code |
| 27 | + // FLOAT16 = 5, // PyTorch's float16 dtype code |
| 28 | + FLOAT32 = 6, // PyTorch's float32 dtype code |
| 29 | + // FLOAT64 = 7, // PyTorch's float64 dtype code |
| 30 | + // BOOL = 11, // PyTorch's bool dtype code |
| 31 | + BFLOAT16 = 15 // PyTorch's bfloat16 dtype code |
| 32 | +}; |
| 33 | + |
| 34 | +extern "C" { |
| 35 | + |
| 36 | +// Helper function to check if a dtype is supported in Metal backend |
| 37 | +bool is_dtype_supported_in_et_metal(int32_t dtype); |
| 38 | + |
| 39 | +// Metal-specific dtype validation utility function |
| 40 | +AOTITorchError validate_dtype(int32_t dtype); |
| 41 | + |
| 42 | +} // extern "C" |
| 43 | + |
| 44 | +// Utility function to convert sizes pointer to vector |
| 45 | +std::vector<executorch::aten::SizesType> convert_sizes_to_vector( |
| 46 | + int64_t ndim, |
| 47 | + const int64_t* sizes_ptr); |
| 48 | + |
| 49 | +// Utility function to convert strides pointer to vector or calculate from sizes |
| 50 | +std::vector<executorch::aten::StridesType> convert_strides_to_vector( |
| 51 | + int64_t ndim, |
| 52 | + const int64_t* sizes_ptr, |
| 53 | + const int64_t* strides_ptr); |
| 54 | + |
| 55 | +// Check if tensor is in contiguous memory format (NCHW for 4D tensors) |
| 56 | +// Contiguous format means strides decrease from left to right: |
| 57 | +// For NCHW: strides = [C*H*W, H*W, W, 1] |
| 58 | +inline bool is_contiguous_tensor( |
| 59 | + std::vector<executorch::aten::SizesType> sizes, |
| 60 | + std::vector<executorch::aten::StridesType> strides) { |
| 61 | + int64_t ndim = static_cast<int64_t>(strides.size()); |
| 62 | + int64_t expected_stride = 1; |
| 63 | + for (int64_t i = ndim - 1; i >= 0; i--) { |
| 64 | + if (strides[i] != expected_stride) { |
| 65 | + return false; |
| 66 | + } |
| 67 | + expected_stride *= sizes[i]; |
| 68 | + } |
| 69 | + return true; |
| 70 | +} |
| 71 | + |
| 72 | +} // namespace metal |
| 73 | +} // namespace backends |
| 74 | +} // namespace executorch |
0 commit comments