@@ -51,6 +51,22 @@ IGPUQueue::SSubmitInfo IUtilities::updateImageViaStagingBuffer(
51
51
srcFormat = dstImage->getCreationParameters ().format ;
52
52
}
53
53
54
+ // Validate Copies from srcBuffer to dstImage with these regions
55
+ // if the initial regions are valid then ImageRegionIterator will do it's job correctly breaking it down ;)
56
+ // note to future self: couldn't use dstImage->validateCopies because it doesn't consider that cpubuffer will be promoted and hence it will get a validation error about size of the buffer being smaller than max accessible offset.
57
+ bool regionsValid = true ;
58
+ for (const auto region : regions)
59
+ {
60
+ auto subresourceSize = dstImage->getMipSize (region.imageSubresource .mipLevel );
61
+ if (!dstImage->validateCopyOffsetAndExtent (region.imageExtent , region.imageOffset , subresourceSize, minImageTransferGranularity))
62
+ regionsValid = false ;
63
+ }
64
+ if (!regionsValid)
65
+ {
66
+ assert (false );
67
+ return intendedNextSubmit;
68
+ }
69
+
54
70
ImageRegionIterator regionIterator = ImageRegionIterator (regions, queueFamProps, srcBuffer, srcFormat, dstImage);
55
71
56
72
// Assuming each thread can handle minImageTranferGranularitySize of texelBlocks:
@@ -231,7 +247,7 @@ ImageRegionIterator::ImageRegionIterator(
231
247
size_t ImageRegionIterator::getMemoryNeededForRemainingRegions () const
232
248
{
233
249
asset::TexelBlockInfo dstImageTexelBlockInfo (dstImageFormat);
234
- assert (dstImageTexelBlockInfo.getBlockByteSize ()> 0u );
250
+ assert (dstImageTexelBlockInfo.getBlockByteSize () > 0u );
235
251
auto texelBlockDim = dstImageTexelBlockInfo.getDimension ();
236
252
uint32_t memoryNeededForRemainingRegions = 0ull ;
237
253
@@ -246,44 +262,6 @@ size_t ImageRegionIterator::getMemoryNeededForRemainingRegions() const
246
262
{
247
263
const asset::IImage::SBufferCopy & region = regions[i];
248
264
249
- auto subresourceSize = dstImage->getMipSize (region.imageSubresource .mipLevel );
250
-
251
- // Validate Region, TODO: move these to IGPUImage::validateCopies and call them on every region at the beginning
252
-
253
- assert (static_cast <uint32_t >(region.imageSubresource .aspectMask ) != 0u );
254
- assert (core::isPoT (static_cast <uint32_t >(region.imageSubresource .aspectMask )) && " region.aspectMask should only have a single bit set." );
255
-
256
- // canTransferMipLevelsPartially = !(minImageTransferGranularity.width == 0 && minImageTransferGranularity.height == 0 && minImageTransferGranularity.depth == 0);
257
- if (canTransferMipLevelsPartially)
258
- {
259
- // region.imageOffset.{xyz} should be multiple of minImageTransferGranularity.{xyz} scaled up by block size
260
- bool isImageOffsetAlignmentValid =
261
- (region.imageOffset .x % (minImageTransferGranularity.width * texelBlockDim.x ) == 0 ) &&
262
- (region.imageOffset .y % (minImageTransferGranularity.height * texelBlockDim.y ) == 0 ) &&
263
- (region.imageOffset .z % (minImageTransferGranularity.depth * texelBlockDim.z ) == 0 );
264
- assert (isImageOffsetAlignmentValid);
265
-
266
- // region.imageExtent.{xyz} should be multiple of minImageTransferGranularity.{xyz} scaled up by block size,
267
- // OR ELSE (region.imageOffset.{x/y/z} + region.imageExtent.{width/height/depth}) MUST be equal to subresource{Width,Height,Depth}
268
- bool isImageExtentAlignmentValid =
269
- (region.imageExtent .width % (minImageTransferGranularity.width * texelBlockDim.x ) == 0 || (region.imageOffset .x + region.imageExtent .width == subresourceSize.x )) &&
270
- (region.imageExtent .height % (minImageTransferGranularity.height * texelBlockDim.y ) == 0 || (region.imageOffset .y + region.imageExtent .height == subresourceSize.y )) &&
271
- (region.imageExtent .depth % (minImageTransferGranularity.depth * texelBlockDim.z ) == 0 || (region.imageOffset .z + region.imageExtent .depth == subresourceSize.z ));
272
- assert (isImageExtentAlignmentValid);
273
-
274
- bool isImageExtentAndOffsetValid =
275
- (region.imageExtent .width + region.imageOffset .x <= subresourceSize.x ) &&
276
- (region.imageExtent .height + region.imageOffset .y <= subresourceSize.y ) &&
277
- (region.imageExtent .depth + region.imageOffset .z <= subresourceSize.z );
278
- assert (isImageExtentAndOffsetValid);
279
- }
280
- else
281
- {
282
- assert (region.imageOffset .x == 0 && region.imageOffset .y == 0 && region.imageOffset .z == 0 );
283
- assert (region.imageExtent .width == subresourceSize.x && region.imageExtent .height == subresourceSize.y && region.imageExtent .depth == subresourceSize.z );
284
- }
285
-
286
-
287
265
auto imageExtent = core::vector3du32_SIMD (region.imageExtent .width , region.imageExtent .height , region.imageExtent .depth );
288
266
auto imageExtentInBlocks = dstImageTexelBlockInfo.convertTexelsToBlocks (imageExtent);
289
267
auto imageExtentBlockStridesInBytes = dstImageTexelBlockInfo.convert3DBlockStridesTo1DByteStrides (imageExtentInBlocks);
@@ -681,7 +659,6 @@ bool ImageRegionIterator::advanceAndCopyToStagingBuffer(asset::IImage::SBufferCo
681
659
}
682
660
else if (currentBlockInRow == 0 && currentRowInSlice == 0 && canTransferMipLevelsPartially && uploadableSlices > 0 )
683
661
{
684
- // tryFillLayer();
685
662
uint32_t slicesToUploadMemorySize = eachSliceNeededMemory * uploadableSlices;
686
663
687
664
regionToCopyNext.bufferOffset = stagingBufferOffset;
@@ -720,7 +697,6 @@ bool ImageRegionIterator::advanceAndCopyToStagingBuffer(asset::IImage::SBufferCo
720
697
}
721
698
else if (currentBlockInRow == 0 && canTransferMipLevelsPartially && uploadableRows > 0 )
722
699
{
723
- // tryFillSlice();
724
700
uint32_t rowsToUploadMemorySize = eachRowNeededMemory * uploadableRows;
725
701
726
702
regionToCopyNext.bufferOffset = stagingBufferOffset;
0 commit comments