Skip to content

Commit 3816c91

Browse files
dbortfacebook-github-bot
authored andcommitted
Remove inline Tensor decl from tensor_util.h (#4577)
Summary: Pull Request resolved: #4577 Header files must not use `using` declarations to create aliases, especially just to reduce typing. https://google.github.io/styleguide/cppguide.html#Aliases In this case it added extra confusion because it made `exec_aten::Tensor` available as `torch::executor::Tensor`; but, `torch::executor::Tensor` is the "lean" implementation of tensor, and `exec_aten::Tensor` is an alias for `torch::executor::Tensor` in lean mode, but not in aten mode. Differential Revision: D60869599 fbshipit-source-id: aaa8df51e3eac4ee33d8b6c863d08e7cf6a3eeb8
1 parent 3a5426d commit 3816c91

File tree

3 files changed

+20
-22
lines changed

3 files changed

+20
-22
lines changed

examples/models/llama2/runner/runner.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ int32_t Runner::logitsToToken(const exec_aten::Tensor& logits_tensor) {
130130
}
131131
}
132132

133-
Result<torch::executor::Tensor> Runner::prefill(
133+
Result<exec_aten::Tensor> Runner::prefill(
134134
const std::vector<uint64_t>& tokens,
135135
ManagedTensor& managed_tokens,
136136
ManagedTensor& managed_start_pos,
@@ -202,7 +202,7 @@ Result<torch::executor::Tensor> Runner::prefill(
202202
auto logits_tensor = managed_tokens.get_aliasing_tensor();
203203
while (pos < num_tokens) {
204204
// Run the model
205-
Result<torch::executor::Tensor> logits_res = run_model_step(
205+
Result<exec_aten::Tensor> logits_res = run_model_step(
206206
cur_token, managed_tokens, managed_start_pos, num_tokens);
207207

208208
ET_CHECK_OK_OR_RETURN_ERROR(logits_res.error());
@@ -243,7 +243,7 @@ Result<torch::executor::Tensor> Runner::prefill(
243243

244244
// Given an input token. Set up the inputs for the model and execute a single
245245
// step. Returning the logits tensor.
246-
Result<torch::executor::Tensor> Runner::run_model_step(
246+
Result<exec_aten::Tensor> Runner::run_model_step(
247247
int64_t input_token,
248248
ManagedTensor& managed_tokens,
249249
ManagedTensor& managed_start_pos,
@@ -411,7 +411,7 @@ Error Runner::generate(
411411
// Generate our tokens
412412
while (pos < seq_len - 1) {
413413
// Run the model
414-
Result<torch::executor::Tensor> logits_res =
414+
Result<exec_aten::Tensor> logits_res =
415415
run_model_step(cur_token, tokens_managed, start_pos_managed, seq_len);
416416

417417
ET_CHECK_OK_OR_RETURN_ERROR(logits_res.error());

examples/models/llama2/runner/runner.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,12 +45,12 @@ class Runner {
4545

4646
private:
4747
int32_t logitsToToken(const exec_aten::Tensor& logits_tensor);
48-
Result<torch::executor::Tensor> prefill(
48+
Result<exec_aten::Tensor> prefill(
4949
const std::vector<uint64_t>& tokens,
5050
ManagedTensor& managed_tokens,
5151
ManagedTensor& managed_start_pos,
5252
std::function<void(const std::string&)> token_callback);
53-
Result<torch::executor::Tensor> run_model_step(
53+
Result<exec_aten::Tensor> run_model_step(
5454
int64_t input_token,
5555
ManagedTensor& tokens,
5656
ManagedTensor& start_pos,

runtime/core/exec_aten/util/tensor_util.h

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -400,10 +400,6 @@
400400
namespace torch {
401401
namespace executor {
402402

403-
using Tensor = exec_aten::Tensor;
404-
using Scalar = exec_aten::Scalar;
405-
using ScalarType = exec_aten::ScalarType;
406-
407403
//
408404
// Utility functions for checking tensor attributes
409405
//
@@ -432,7 +428,7 @@ inline bool dim_is_valid(int64_t dim, int64_t upper_bound) {
432428
* the zero dimensional tensors in some kernels, that treat them as 1D tensors
433429
* with a single element.
434430
*/
435-
inline ssize_t nonzero_dim(const Tensor& tensor) {
431+
inline ssize_t nonzero_dim(const exec_aten::Tensor& tensor) {
436432
return tensor.dim() == 0 ? 1 : tensor.dim();
437433
}
438434

@@ -442,7 +438,7 @@ inline ssize_t nonzero_dim(const Tensor& tensor) {
442438
* the zero dimensional tensors in some kernels, that treat them as 1D tensors
443439
* with a single element.
444440
*/
445-
inline ssize_t nonempty_size(const Tensor& tensor, ssize_t dim) {
441+
inline ssize_t nonempty_size(const exec_aten::Tensor& tensor, ssize_t dim) {
446442
return tensor.dim() == 0 ? 1 : tensor.size(dim);
447443
}
448444

@@ -861,7 +857,7 @@ inline bool tensor_is_scalar(exec_aten::Tensor t) {
861857
constexpr size_t kTensorDimensionLimit = 16;
862858

863859
/// Returns the product of dim[0:dim), not including dim.
864-
inline size_t getLeadingDims(const Tensor& tensor, int64_t dim) {
860+
inline size_t getLeadingDims(const exec_aten::Tensor& tensor, int64_t dim) {
865861
ET_CHECK_MSG(
866862
dim >= 0 && dim <= tensor.dim(),
867863
"Ending dimension %" PRId64
@@ -876,7 +872,7 @@ inline size_t getLeadingDims(const Tensor& tensor, int64_t dim) {
876872
}
877873

878874
/// Returns the product of dim[dim+1:].
879-
inline size_t getTrailingDims(const Tensor& tensor, int64_t dim) {
875+
inline size_t getTrailingDims(const exec_aten::Tensor& tensor, int64_t dim) {
880876
ET_CHECK_MSG(
881877
dim >= -1 && dim < tensor.dim(),
882878
"Starting dimension %" PRId64
@@ -901,7 +897,7 @@ inline size_t getTrailingDims(const Tensor& tensor, int64_t dim) {
901897
* the tensor.
902898
*/
903899
inline size_t coordinateToIndex(
904-
const Tensor& tensor,
900+
const exec_aten::Tensor& tensor,
905901
const size_t* const coordinate) {
906902
size_t index = 0;
907903
for (int d = 0; d < tensor.dim(); ++d) {
@@ -921,8 +917,10 @@ inline size_t coordinateToIndex(
921917
* index. It is assumed that the array has kTensorDimensionLimit elements.
922918
* @returns void
923919
*/
924-
inline void
925-
indexToCoordinate(const Tensor& tensor, size_t index, size_t* coordinate) {
920+
inline void indexToCoordinate(
921+
const exec_aten::Tensor& tensor,
922+
size_t index,
923+
size_t* coordinate) {
926924
ET_CHECK(index < tensor.numel());
927925
for (auto i = 0; i < tensor.dim(); ++i) {
928926
auto dim = tensor.dim() - 1 - i;
@@ -947,12 +945,12 @@ template <
947945
typename std::enable_if<
948946
std::is_integral<INT_T>::value && !std::is_same<INT_T, bool>::value,
949947
bool>::type = true>
950-
bool extract_scalar_tensor(Tensor tensor, INT_T* out_val) {
948+
bool extract_scalar_tensor(exec_aten::Tensor tensor, INT_T* out_val) {
951949
if (tensor.numel() != 1) {
952950
return false;
953951
}
954952
#define CASE_INT_DTYPE(TENSOR_CTYPE, TENSOR_DTYPE) \
955-
case ScalarType::TENSOR_DTYPE: { \
953+
case exec_aten::ScalarType::TENSOR_DTYPE: { \
956954
const TENSOR_CTYPE val = tensor.const_data_ptr<TENSOR_CTYPE>()[0]; \
957955
if (val < std::numeric_limits<INT_T>::lowest() || \
958956
val > std::numeric_limits<INT_T>::max()) { \
@@ -984,12 +982,12 @@ template <
984982
typename FLOAT_T,
985983
typename std::enable_if<std::is_floating_point<FLOAT_T>::value, bool>::
986984
type = true>
987-
bool extract_scalar_tensor(Tensor tensor, FLOAT_T* out_val) {
985+
bool extract_scalar_tensor(exec_aten::Tensor tensor, FLOAT_T* out_val) {
988986
if (tensor.numel() != 1) {
989987
return false;
990988
}
991989
#define CASE_REAL_DTYPE(TENSOR_CTYPE, TENSOR_DTYPE) \
992-
case ScalarType::TENSOR_DTYPE: { \
990+
case exec_aten::ScalarType::TENSOR_DTYPE: { \
993991
/* ET_FORALL_REAL_TYPES guarantees TENSOR_CTYPE is a real type. */ \
994992
double val = \
995993
static_cast<double>(tensor.const_data_ptr<TENSOR_CTYPE>()[0]); \
@@ -1022,7 +1020,7 @@ template <
10221020
typename BOOL_T,
10231021
typename std::enable_if<std::is_same<BOOL_T, bool>::value, bool>::type =
10241022
true>
1025-
bool extract_scalar_tensor(Tensor tensor, BOOL_T* out_val) {
1023+
bool extract_scalar_tensor(exec_aten::Tensor tensor, BOOL_T* out_val) {
10261024
if (tensor.scalar_type() != exec_aten::ScalarType::Bool) {
10271025
return false;
10281026
}

0 commit comments

Comments
 (0)