|
13 | 13 | namespace vkcompute { |
14 | 14 | namespace api { |
15 | 15 |
|
| 16 | +std::vector<int64_t> calculate_sizes( |
| 17 | + const vkapi::VulkanImage& image, |
| 18 | + const utils::GPUMemoryLayout memory_layout) { |
| 19 | + auto sizes = std::vector<int64_t>{ |
| 20 | + image.extents().width, image.extents().height, image.extents().depth}; |
| 21 | + const auto packed_dim = utils::to_packed_dim<int32_t>(memory_layout); |
| 22 | + sizes.at(packed_dim) *= 4; |
| 23 | + return sizes; |
| 24 | +} |
| 25 | + |
16 | 26 | std::vector<int64_t> calculate_dim_order( |
17 | 27 | const size_t ndim, |
18 | 28 | const int32_t packed_dim) { |
@@ -186,6 +196,18 @@ utils::uvec3 calculate_image_extents( |
186 | 196 | // vTensorStorage |
187 | 197 | // |
188 | 198 |
|
| 199 | +utils::StorageType storage_type(const vkapi::VulkanImage& image) { |
| 200 | + const auto type = image.type(); |
| 201 | + switch (type) { |
| 202 | + case VK_IMAGE_TYPE_3D: |
| 203 | + return utils::kTexture3D; |
| 204 | + case VK_IMAGE_TYPE_2D: |
| 205 | + return utils::kTexture2D; |
| 206 | + default: |
| 207 | + VK_THROW("Unsupported image type", type); |
| 208 | + } |
| 209 | +} |
| 210 | + |
189 | 211 | vkapi::VulkanImage allocate_image( |
190 | 212 | Context* const context_ptr, |
191 | 213 | utils::uvec3& image_extents, |
@@ -281,6 +303,21 @@ vTensorStorage::vTensorStorage( |
281 | 303 | last_access_{}, |
282 | 304 | has_copies_{false} {} |
283 | 305 |
|
| 306 | +vTensorStorage::vTensorStorage( |
| 307 | + Context* const context, |
| 308 | + const vkapi::VulkanImage& image) |
| 309 | + : context_(context), |
| 310 | + storage_type_{storage_type(image)}, |
| 311 | + image_extents_( |
| 312 | + {image.extents().width, |
| 313 | + image.extents().height, |
| 314 | + image.extents().depth}), |
| 315 | + buffer_length_{0}, |
| 316 | + buffer_offset_{0}, |
| 317 | + image_(image), |
| 318 | + buffer_(vkapi::VulkanBuffer()), |
| 319 | + last_access_{} {} |
| 320 | + |
284 | 321 | vTensorStorage::vTensorStorage( |
285 | 322 | vTensorStorage& other, |
286 | 323 | const int64_t buffer_offset) |
@@ -446,6 +483,33 @@ vTensor::vTensor( |
446 | 483 | } |
447 | 484 |
|
448 | 485 | // NOLINTNEXTLINE |
| 486 | +vTensor::vTensor( |
| 487 | + Context* context, |
| 488 | + const vkapi::VulkanImage& image, |
| 489 | + const utils::GPUMemoryLayout memory_layout) |
| 490 | + : dtype_(vkapi::element_scalartype(image.format())), |
| 491 | + // Calculate tensor metadata |
| 492 | + sizes_(calculate_sizes(image, memory_layout)), |
| 493 | + packed_dim_(utils::to_packed_dim<int32_t>(memory_layout)), |
| 494 | + dim_order_(), |
| 495 | + axis_map_(default_axis_map()), |
| 496 | + strides_(), |
| 497 | + numel_(utils::multiply_integers(sizes_)), |
| 498 | + padded_sizes_(calculate_padded_sizes(sizes_, packed_dim_)), |
| 499 | + unsqueezed_strides_(), |
| 500 | + padded_numel_(utils::multiply_integers(padded_sizes_)), |
| 501 | + logical_limits_(), |
| 502 | + // Utility Uniform Buffers that can be passed to shaders as arguments |
| 503 | + sizes_uniform_(), |
| 504 | + strides_uniform_(), |
| 505 | + numel_uniform_(), |
| 506 | + axis_map_uniform_(), |
| 507 | + logical_limits_uniform_(), |
| 508 | + // Construct Tensor storage |
| 509 | + storage_(context, image) { |
| 510 | + set_logical_limits(storage_.image_extents_); |
| 511 | +} |
| 512 | + |
449 | 513 | vTensor::vTensor(vTensor& other) |
450 | 514 | : dtype_(other.dtype_), |
451 | 515 | // Copy tensor size metadata |
|
0 commit comments