@@ -1301,8 +1301,8 @@ bool UI::render(IGPUCommandBuffer* const commandBuffer, ISemaphore::SWaitInfo wa
1301
1301
memcpy (streamingPtr+geo.indexByteOffset ,list->IdxBuffer .Data ,list->IdxBuffer .size_in_bytes ());
1302
1302
memcpy (streamingPtr+geo.vertexByteOffset ,list->VtxBuffer .Data ,list->VtxBuffer .size_in_bytes ());
1303
1303
// not writing past the end
1304
- assert (streamingPtr+geo.indexByteOffset < endByte);
1305
- assert (streamingPtr+geo.vertexByteOffset < endByte);
1304
+ assert (streamingPtr+geo.indexByteOffset +list-> IdxBuffer . size_in_bytes ()<= endByte);
1305
+ assert (streamingPtr+geo.vertexByteOffset +list-> VtxBuffer . size_in_bytes ()<= endByte);
1306
1306
}
1307
1307
}
1308
1308
// the offets were enough and allocation should not overlap
@@ -1334,7 +1334,7 @@ bool UI::render(IGPUCommandBuffer* const commandBuffer, ISemaphore::SWaitInfo wa
1334
1334
{
1335
1335
auto mdiBinding = binding;
1336
1336
mdiBinding.offset = offsets.drawIndirectByteOffset ;
1337
- commandBuffer->drawIndexedIndirect (binding ,drawID,sizeof (VkDrawIndexedIndirectCommand));
1337
+ commandBuffer->drawIndexedIndirect (mdiBinding ,drawID,sizeof (VkDrawIndexedIndirectCommand));
1338
1338
}
1339
1339
}
1340
1340
@@ -1349,6 +1349,7 @@ bool UI::render(IGPUCommandBuffer* const commandBuffer, ISemaphore::SWaitInfo wa
1349
1349
// trim the leftover actually used block
1350
1350
metaAlloc.memBlockSize = offsets.totalSize ;
1351
1351
}
1352
+
1352
1353
// latch our used chunk free
1353
1354
streaming->multi_deallocate (1 ,&metaAlloc.memBlockOffset ,&metaAlloc.memBlockSize ,waitInfo);
1354
1355
// reset to initial state
0 commit comments