@@ -15,13 +15,13 @@ namespace api {
1515
1616std::vector<int64_t > calculate_dim_order (
1717 const size_t ndim,
18- const int32_t packed_dim_whcn_idx ) {
18+ const int32_t packed_dim ) {
1919 // Special case for zero dim tensors
2020 if (ndim == 0 ) {
2121 return {0 };
2222 }
2323 std::vector<int64_t > dim_order (ndim);
24- int64_t last_dim = ndim - 1 - packed_dim_whcn_idx ;
24+ int64_t last_dim = ndim - 1 - packed_dim ;
2525
2626 int64_t cur_dim = 0 ;
2727 for (int d = 0 ; d < ndim; ++d) {
@@ -131,7 +131,7 @@ std::vector<int64_t> unsqueeze_strides(
131131
132132std::vector<int64_t > calculate_padded_sizes (
133133 const std::vector<int64_t >& sizes,
134- const int32_t packed_dim_whcn_idx ) {
134+ const int32_t packed_dim ) {
135135 int64_t ndim = sizes.size ();
136136 if (ndim == 0 ) {
137137 ndim = 1 ;
@@ -145,7 +145,7 @@ std::vector<int64_t> calculate_padded_sizes(
145145 }
146146
147147 // Pad the packed dim to the next multiple of 4.
148- const int64_t dim_offset = packed_dim_whcn_idx + 1 ;
148+ const int64_t dim_offset = packed_dim + 1 ;
149149 const int64_t padded_dim_size = utils::val_at (-dim_offset, sizes);
150150 padded_sizes.at (ndim_up4 - dim_offset) = utils::align_up_4 (padded_dim_size);
151151
@@ -155,7 +155,7 @@ std::vector<int64_t> calculate_padded_sizes(
155155utils::uvec3 calculate_image_extents (
156156 const std::vector<int64_t >& padded_sizes,
157157 const std::vector<int64_t >& axis_map,
158- const int32_t packed_dim_whcn_idx ) {
158+ const int32_t packed_dim ) {
159159 VK_CHECK_COND (padded_sizes.size () == 4 );
160160 VK_CHECK_COND (axis_map.size () == 4 );
161161
@@ -176,8 +176,8 @@ utils::uvec3 calculate_image_extents(
176176 // Multiply the extents of the batch axis by the batch size.
177177 extents[batch_axis] *= padded_sizes.at (0 );
178178
179- VK_CHECK_COND (extents[axis_map.at (packed_dim_whcn_idx )] % 4 == 0 );
180- extents[axis_map.at (packed_dim_whcn_idx )] /= 4 ;
179+ VK_CHECK_COND (extents[axis_map.at (packed_dim )] % 4 == 0 );
180+ extents[axis_map.at (packed_dim )] /= 4 ;
181181 return extents;
182182}
183183
@@ -254,14 +254,14 @@ vTensorStorage::vTensorStorage(
254254 Context* const context,
255255 const utils::StorageType storage_type,
256256 const std::vector<int64_t >& axis_map,
257- const int32_t packed_dim_whcn_idx ,
257+ const int32_t packed_dim ,
258258 const std::vector<int64_t >& padded_sizes,
259259 const vkapi::ScalarType dtype,
260260 const bool allocate_memory)
261261 : context_(context),
262262 storage_type_{storage_type},
263263 image_extents_ (
264- calculate_image_extents (padded_sizes, axis_map, packed_dim_whcn_idx )),
264+ calculate_image_extents (padded_sizes, axis_map, packed_dim )),
265265 buffer_length_{utils::multiply_integers (padded_sizes)},
266266 buffer_offset_{0 },
267267 image_ (allocate_image(
@@ -378,13 +378,12 @@ vTensor::vTensor(
378378 : dtype_(dtype),
379379 // Calculate tensor metadata
380380 sizes_(sizes.begin(), sizes.end()),
381- packed_dim_whcn_idx_(
382- utils::to_packed_dim_whcn_idx<int32_t >(memory_layout)),
383- dim_order_(calculate_dim_order(sizes_.size(), packed_dim_whcn_idx_)),
381+ packed_dim_(utils::to_packed_dim<int32_t >(memory_layout)),
382+ dim_order_(calculate_dim_order(sizes_.size(), packed_dim_)),
384383 axis_map_(default_axis_map()),
385384 strides_(calculate_strides(sizes, dim_order_)),
386385 numel_(utils::multiply_integers(sizes_)),
387- padded_sizes_{calculate_padded_sizes (sizes, packed_dim_whcn_idx_ )},
386+ padded_sizes_{calculate_padded_sizes (sizes, packed_dim_ )},
388387 unsqueezed_strides_{unsqueeze_strides (strides_, numel_)},
389388 padded_numel_ (utils::multiply_integers(padded_sizes_)),
390389 logical_limits_{{0 , 0 , 0 }},
@@ -399,7 +398,7 @@ vTensor::vTensor(
399398 context,
400399 storage_type,
401400 axis_map_,
402- packed_dim_whcn_idx_ ,
401+ packed_dim_ ,
403402 padded_sizes_,
404403 dtype_,
405404 allocate_memory) {
@@ -422,7 +421,7 @@ vTensor::vTensor(const vTensor& other)
422421 : dtype_(other.dtype_),
423422 // Copy tensor size metadata
424423 sizes_(other.sizes_.begin(), other.sizes_.end()),
425- packed_dim_whcn_idx_ {other.packed_dim_whcn_idx_ },
424+ packed_dim_ {other.packed_dim_ },
426425 dim_order_ (other.dim_order_.begin(), other.dim_order_.end()),
427426 axis_map_(other.axis_map_.begin(), other.axis_map_.end()),
428427 strides_(other.strides_.begin(), other.strides_.end()),
@@ -450,12 +449,12 @@ vTensor::vTensor(
450449 : dtype_(other.dtype_),
451450 // Copy tensor size metadata
452451 sizes_(sizes.begin(), sizes.end()),
453- packed_dim_whcn_idx_ (other.packed_dim_whcn_idx_ ),
452+ packed_dim_ (other.packed_dim_ ),
454453 dim_order_(dim_order.begin(), dim_order.end()),
455454 axis_map_(default_axis_map()),
456455 strides_(calculate_strides(sizes_, dim_order_)),
457456 numel_(utils::multiply_integers(sizes_)),
458- padded_sizes_{calculate_padded_sizes (sizes, packed_dim_whcn_idx_ )},
457+ padded_sizes_{calculate_padded_sizes (sizes, packed_dim_ )},
459458 unsqueezed_strides_{unsqueeze_strides (strides_, numel_)},
460459 padded_numel_ (utils::multiply_integers(padded_sizes_)),
461460 logical_limits_(other.logical_limits_),
@@ -512,7 +511,7 @@ void vTensor::set_logical_limits(const utils::uvec3& image_extents) {
512511}
513512
514513utils::GPUMemoryLayout vTensor::estimate_memory_layout () const {
515- switch (packed_dim_whcn_idx_ ) {
514+ switch (packed_dim_ ) {
516515 case WHCN::kWidthDim :
517516 return utils::kWidthPacked ;
518517 case WHCN::kHeightDim :
@@ -602,14 +601,14 @@ void vTensor::update_metadata() {
602601 strides_ = calculate_strides (sizes_, dim_order_);
603602 numel_ = utils::multiply_integers (sizes_);
604603
605- padded_sizes_ = calculate_padded_sizes (sizes_, packed_dim_whcn_idx_ );
604+ padded_sizes_ = calculate_padded_sizes (sizes_, packed_dim_ );
606605 unsqueezed_strides_ = unsqueeze_strides (strides_, numel_);
607606 padded_numel_ = utils::multiply_integers (padded_sizes_);
608607
609608 // Calculate the image extents that would have been used to allocate a texture
610609 // withthe current sizes, and use that to set the logical limits.
611610 set_logical_limits (
612- calculate_image_extents (padded_sizes_, axis_map_, packed_dim_whcn_idx_ ));
611+ calculate_image_extents (padded_sizes_, axis_map_, packed_dim_ ));
613612
614613 if (sizes_uniform_.buffer ()) {
615614 sizes_uniform_.update (utils::make_whcn_ivec4 (sizes_));
@@ -633,7 +632,7 @@ void vTensor::check_sizes(const std::vector<int64_t>& sizes) const {
633632 // For texture storage check that the current texture is large enough for
634633 // the new sizes of the tensor.
635634 utils::uvec3 virtual_extents =
636- calculate_image_extents (padded_sizes_, axis_map_, packed_dim_whcn_idx_ );
635+ calculate_image_extents (padded_sizes_, axis_map_, packed_dim_ );
637636
638637 bool valid_resize = virtual_extents[0 ] <= storage_.image_extents_ [0 ];
639638 valid_resize =
@@ -705,11 +704,11 @@ void vTensor::virtual_transpose(const int64_t dim0, const int64_t dim1) {
705704
706705 const int dim0_whcn = sizes_.size () - 1 - dim0;
707706 const int dim1_whcn = sizes_.size () - 1 - dim1;
708- if (packed_dim_whcn_idx_ == dim0_whcn) {
709- packed_dim_whcn_idx_ = dim1_whcn;
707+ if (packed_dim_ == dim0_whcn) {
708+ packed_dim_ = dim1_whcn;
710709 }
711- if (packed_dim_whcn_idx_ == dim1_whcn) {
712- packed_dim_whcn_idx_ = dim0_whcn;
710+ if (packed_dim_ == dim1_whcn) {
711+ packed_dim_ = dim0_whcn;
713712 }
714713
715714 if (storage_type () == utils::kBuffer ) {
0 commit comments