@@ -52,7 +52,6 @@ bool CAssetConverter::patch_impl_t<ICPUShader>::valid(const ILogicalDevice* devi
52
52
case IGPUShader::E_SHADER_STAGE::ESS_FRAGMENT:
53
53
case IGPUShader::E_SHADER_STAGE::ESS_COMPUTE:
54
54
return true ;
55
- break ;
56
55
case IGPUShader::E_SHADER_STAGE::ESS_TESSELLATION_CONTROL:
57
56
case IGPUShader::E_SHADER_STAGE::ESS_TESSELLATION_EVALUATION:
58
57
if (features.tessellationShader )
@@ -653,7 +652,7 @@ class AssetVisitor : public CRTP
653
652
case IDescriptor::EC_IMAGE:
654
653
{
655
654
auto imageView = static_cast <const ICPUImageView*>(untypedDesc);
656
- IGPUImage::E_USAGE_FLAGS usage;
655
+ IGPUImage::E_USAGE_FLAGS usage = IGPUImage::E_USAGE_FLAGS::EUF_NONE; // silence a warning
657
656
switch (type)
658
657
{
659
658
case IDescriptor::E_TYPE::ET_COMBINED_IMAGE_SAMPLER:
@@ -1593,26 +1592,26 @@ void CAssetConverter::CHashCache::eraseStale(const IPatchOverride* patchOverride
1593
1592
);
1594
1593
};
1595
1594
// to make the process more efficient we start ejecting from "lowest level" assets
1596
- rehash.operator ()<ICPUSampler>();
1597
- rehash.operator ()<ICPUDescriptorSetLayout>();
1598
- rehash.operator ()<ICPUPipelineLayout>();
1595
+ rehash.template operator ()<ICPUSampler>();
1596
+ rehash.template operator ()<ICPUDescriptorSetLayout>();
1597
+ rehash.template operator ()<ICPUPipelineLayout>();
1599
1598
// shaders and images depend on buffers for data sourcing
1600
- rehash.operator ()<ICPUBuffer>();
1601
- rehash.operator ()<ICPUBufferView>();
1602
- rehash.operator ()<ICPUImage>();
1603
- rehash.operator ()<ICPUImageView>();
1604
- rehash.operator ()<ICPUBottomLevelAccelerationStructure>();
1605
- rehash.operator ()<ICPUTopLevelAccelerationStructure>();
1599
+ rehash.template operator ()<ICPUBuffer>();
1600
+ rehash.template operator ()<ICPUBufferView>();
1601
+ rehash.template operator ()<ICPUImage>();
1602
+ rehash.template operator ()<ICPUImageView>();
1603
+ rehash.template operator ()<ICPUBottomLevelAccelerationStructure>();
1604
+ rehash.template operator ()<ICPUTopLevelAccelerationStructure>();
1606
1605
// only once all the descriptor types have been hashed, we can hash sets
1607
- rehash.operator ()<ICPUDescriptorSet>();
1606
+ rehash.template operator ()<ICPUDescriptorSet>();
1608
1607
// naturally any pipeline depends on shaders and pipeline cache
1609
- rehash.operator ()<ICPUShader>();
1610
- rehash.operator ()<ICPUPipelineCache>();
1611
- rehash.operator ()<ICPUComputePipeline>();
1608
+ rehash.template operator ()<ICPUShader>();
1609
+ rehash.template operator ()<ICPUPipelineCache>();
1610
+ rehash.template operator ()<ICPUComputePipeline>();
1612
1611
// graphics pipeline needs a renderpass
1613
- rehash.operator ()<ICPURenderpass>();
1614
- rehash.operator ()<ICPUGraphicsPipeline>();
1615
- // rehash.operator()<ICPUFramebuffer>();
1612
+ rehash.template operator ()<ICPURenderpass>();
1613
+ rehash.template operator ()<ICPUGraphicsPipeline>();
1614
+ // rehash.template operator()<ICPUFramebuffer>();
1616
1615
}
1617
1616
1618
1617
@@ -2230,7 +2229,7 @@ class MetaDeviceMemoryAllocator final
2230
2229
.image = std::get<asset_cached_t <ICPUImage>*>(toBind)->get (),
2231
2230
.binding = binding
2232
2231
};
2233
- bindSuccess = device->bindImageMemory (1 , &info);
2232
+ bindSuccess = device->bindImageMemory (std::span ( &info, 1 ) );
2234
2233
}
2235
2234
break ;
2236
2235
default :
@@ -2386,28 +2385,28 @@ auto CAssetConverter::reserve(const SInputs& inputs) -> SReserveResult
2386
2385
switch (entry.instance .asset ->getAssetType ())
2387
2386
{
2388
2387
case ICPUDescriptorSetLayout::AssetType:
2389
- visit.operator ()<ICPUDescriptorSetLayout>(entry);
2388
+ visit.template operator ()<ICPUDescriptorSetLayout>(entry);
2390
2389
break ;
2391
2390
case ICPUPipelineLayout::AssetType:
2392
- visit.operator ()<ICPUPipelineLayout>(entry);
2391
+ visit.template operator ()<ICPUPipelineLayout>(entry);
2393
2392
break ;
2394
2393
case ICPUComputePipeline::AssetType:
2395
- visit.operator ()<ICPUComputePipeline>(entry);
2394
+ visit.template operator ()<ICPUComputePipeline>(entry);
2396
2395
break ;
2397
2396
case ICPUGraphicsPipeline::AssetType:
2398
- visit.operator ()<ICPUGraphicsPipeline>(entry);
2397
+ visit.template operator ()<ICPUGraphicsPipeline>(entry);
2399
2398
break ;
2400
2399
case ICPUDescriptorSet::AssetType:
2401
- visit.operator ()<ICPUDescriptorSet>(entry);
2400
+ visit.template operator ()<ICPUDescriptorSet>(entry);
2402
2401
break ;
2403
2402
case ICPUBufferView::AssetType:
2404
- visit.operator ()<ICPUBufferView>(entry);
2403
+ visit.template operator ()<ICPUBufferView>(entry);
2405
2404
break ;
2406
2405
case ICPUImageView::AssetType:
2407
- visit.operator ()<ICPUImageView>(entry);
2406
+ visit.template operator ()<ICPUImageView>(entry);
2408
2407
break ;
2409
2408
case ICPUTopLevelAccelerationStructure::AssetType:
2410
- visit.operator ()<ICPUTopLevelAccelerationStructure>(entry);
2409
+ visit.template operator ()<ICPUTopLevelAccelerationStructure>(entry);
2411
2410
break ;
2412
2411
// these assets have no dependants, should have never been pushed on the stack
2413
2412
default :
@@ -2687,7 +2686,7 @@ auto CAssetConverter::reserve(const SInputs& inputs) -> SReserveResult
2687
2686
{
2688
2687
for (auto & entry : conversionRequests)
2689
2688
for (auto i=0ull ; i<entry.second .copyCount ; i++)
2690
- assign.operator ()<true >(entry.first ,entry.second .firstCopyIx ,i,device->createSampler (entry.second .canonicalAsset ->getParams ()));
2689
+ assign.template operator ()<true >(entry.first ,entry.second .firstCopyIx ,i,device->createSampler (entry.second .canonicalAsset ->getParams ()));
2691
2690
}
2692
2691
if constexpr (std::is_same_v<AssetType,ICPUBuffer>)
2693
2692
{
@@ -3135,7 +3134,7 @@ auto CAssetConverter::reserve(const SInputs& inputs) -> SReserveResult
3135
3134
{
3136
3135
// since we don't have dependants we don't care about our group ID
3137
3136
// we create threadsafe pipeline caches, because we have no idea how they may be used
3138
- assign.operator ()<true >(entry.first ,entry.second .firstCopyIx ,i,device->createPipelineCache (asset,false ));
3137
+ assign.template operator ()<true >(entry.first ,entry.second .firstCopyIx ,i,device->createPipelineCache (asset,false ));
3139
3138
}
3140
3139
}
3141
3140
}
@@ -3180,7 +3179,7 @@ auto CAssetConverter::reserve(const SInputs& inputs) -> SReserveResult
3180
3179
{
3181
3180
// since we don't have dependants we don't care about our group ID
3182
3181
// we create threadsafe pipeline caches, because we have no idea how they may be used
3183
- assign.operator ()<true >(entry.first ,entry.second .firstCopyIx ,i,device->createRenderpass (asset->getCreationParameters ()));
3182
+ assign.template operator ()<true >(entry.first ,entry.second .firstCopyIx ,i,device->createRenderpass (asset->getCreationParameters ()));
3184
3183
}
3185
3184
}
3186
3185
}
@@ -3331,7 +3330,7 @@ auto CAssetConverter::reserve(const SInputs& inputs) -> SReserveResult
3331
3330
gpuObj.get ()->setObjectDebugName (debugName.str ().c_str ());
3332
3331
}
3333
3332
// insert into staging cache
3334
- stagingCache.emplace (gpuObj.get (),CCache<AssetType>::key_t (contentHash,uniqueCopyGroupID));
3333
+ stagingCache.emplace (gpuObj.get (),typename CCache<AssetType>::key_t (contentHash,uniqueCopyGroupID));
3335
3334
// propagate back to dfsCache
3336
3335
created.gpuObj = std::move (gpuObj);
3337
3336
// record if a device memory allocation will be needed
@@ -3359,10 +3358,10 @@ auto CAssetConverter::reserve(const SInputs& inputs) -> SReserveResult
3359
3358
// The order of these calls is super important to go BOTTOM UP in terms of hashing and conversion dependants.
3360
3359
// Both so we can hash in O(Depth) and not O(Depth^2) but also so we have all the possible dependants ready.
3361
3360
// If two Asset chains are independent then we order them from most catastrophic failure to least.
3362
- dedupCreateProp.operator ()<ICPUBuffer>();
3363
- dedupCreateProp.operator ()<ICPUBottomLevelAccelerationStructure>();
3364
- dedupCreateProp.operator ()<ICPUTopLevelAccelerationStructure>();
3365
- dedupCreateProp.operator ()<ICPUImage>();
3361
+ dedupCreateProp.template operator ()<ICPUBuffer>();
3362
+ dedupCreateProp.template operator ()<ICPUBottomLevelAccelerationStructure>();
3363
+ dedupCreateProp.template operator ()<ICPUTopLevelAccelerationStructure>();
3364
+ dedupCreateProp.template operator ()<ICPUImage>();
3366
3365
// now allocate the memory for buffers and images
3367
3366
deferredAllocator.finalize ();
3368
3367
@@ -3441,18 +3440,18 @@ auto CAssetConverter::reserve(const SInputs& inputs) -> SReserveResult
3441
3440
retval.m_queueFlags |= IQueue::FAMILY_FLAGS::COMPUTE_BIT;
3442
3441
}
3443
3442
3444
- dedupCreateProp.operator ()<ICPUBufferView>();
3445
- dedupCreateProp.operator ()<ICPUImageView>();
3446
- dedupCreateProp.operator ()<ICPUShader>();
3447
- dedupCreateProp.operator ()<ICPUSampler>();
3448
- dedupCreateProp.operator ()<ICPUDescriptorSetLayout>();
3449
- dedupCreateProp.operator ()<ICPUPipelineLayout>();
3450
- dedupCreateProp.operator ()<ICPUPipelineCache>();
3451
- dedupCreateProp.operator ()<ICPUComputePipeline>();
3452
- dedupCreateProp.operator ()<ICPURenderpass>();
3453
- dedupCreateProp.operator ()<ICPUGraphicsPipeline>();
3454
- dedupCreateProp.operator ()<ICPUDescriptorSet>();
3455
- // dedupCreateProp.operator()<ICPUFramebuffer>();
3443
+ dedupCreateProp.template operator ()<ICPUBufferView>();
3444
+ dedupCreateProp.template operator ()<ICPUImageView>();
3445
+ dedupCreateProp.template operator ()<ICPUShader>();
3446
+ dedupCreateProp.template operator ()<ICPUSampler>();
3447
+ dedupCreateProp.template operator ()<ICPUDescriptorSetLayout>();
3448
+ dedupCreateProp.template operator ()<ICPUPipelineLayout>();
3449
+ dedupCreateProp.template operator ()<ICPUPipelineCache>();
3450
+ dedupCreateProp.template operator ()<ICPUComputePipeline>();
3451
+ dedupCreateProp.template operator ()<ICPURenderpass>();
3452
+ dedupCreateProp.template operator ()<ICPUGraphicsPipeline>();
3453
+ dedupCreateProp.template operator ()<ICPUDescriptorSet>();
3454
+ // dedupCreateProp.template operator()<ICPUFramebuffer>();
3456
3455
}
3457
3456
3458
3457
// write out results
@@ -3482,7 +3481,7 @@ auto CAssetConverter::reserve(const SInputs& inputs) -> SReserveResult
3482
3481
// if something with this content hash is in the stagingCache, then it must match the `found->gpuObj`
3483
3482
if (auto finalCacheIt=stagingCache.find (gpuObj.get ()); finalCacheIt!=stagingCache.end ())
3484
3483
{
3485
- const bool matches = finalCacheIt->second ==CCache<AssetType>::key_t (found.contentHash ,uniqueCopyGroupID);
3484
+ const bool matches = finalCacheIt->second ==typename CCache<AssetType>::key_t (found.contentHash ,uniqueCopyGroupID);
3486
3485
assert (matches);
3487
3486
}
3488
3487
}
@@ -3665,7 +3664,7 @@ ISemaphore::future_t<IQueue::RESULT> CAssetConverter::convert_impl(SReserveResul
3665
3664
auto findInStaging = [&reservations]<Asset AssetType>(const typename asset_traits<AssetType>::video_t * gpuObj)->core ::blake3_hash_t *
3666
3665
{
3667
3666
auto & stagingCache = std::get<SReserveResult::staging_cache_t <AssetType>>(reservations.m_stagingCaches );
3668
- const auto found = stagingCache.find (const_cast <asset_traits<AssetType>::video_t *>(gpuObj));
3667
+ const auto found = stagingCache.find (const_cast <typename asset_traits<AssetType>::video_t *>(gpuObj));
3669
3668
assert (found!=stagingCache.end ());
3670
3669
return const_cast <core::blake3_hash_t *>(&found->second .value );
3671
3670
};
@@ -3678,7 +3677,7 @@ ISemaphore::future_t<IQueue::RESULT> CAssetConverter::convert_impl(SReserveResul
3678
3677
outputReverseMap[gpuObj.value .get ()] = i++;
3679
3678
}
3680
3679
);
3681
- auto markFailureInStaging = [&reservations,&outputReverseMap,logger]<Asset AssetType>(const char * message, smart_refctd_ptr<const AssetType>& canonical, const asset_traits<AssetType>::video_t * gpuObj, core::blake3_hash_t * hash)->void
3680
+ auto markFailureInStaging = [&reservations,&outputReverseMap,logger]<Asset AssetType>(const char * message, smart_refctd_ptr<const AssetType>& canonical, const typename asset_traits<AssetType>::video_t * gpuObj, core::blake3_hash_t * hash)->void
3682
3681
{
3683
3682
// wipe the smart pointer to the canonical, make sure we release that memory ASAP if no other user is around
3684
3683
canonical = nullptr ;
@@ -3766,7 +3765,7 @@ ISemaphore::future_t<IQueue::RESULT> CAssetConverter::convert_impl(SReserveResul
3766
3765
.size = item.gpuObj ->getCreationParams ().size ,
3767
3766
.buffer = core::smart_refctd_ptr<IGPUBuffer>(buffer)
3768
3767
};
3769
- auto pFoundHash = findInStaging.operator ()<ICPUBuffer>(buffer);
3768
+ auto pFoundHash = findInStaging.template operator ()<ICPUBuffer>(buffer);
3770
3769
//
3771
3770
const auto ownerQueueFamily = checkOwnership (buffer,params.getFinalOwnerQueueFamily (buffer,*pFoundHash),transferFamily);
3772
3771
if (ownerQueueFamily==QueueFamilyInvalid)
@@ -3946,7 +3945,7 @@ ISemaphore::future_t<IQueue::RESULT> CAssetConverter::convert_impl(SReserveResul
3946
3945
// basiscs
3947
3946
const auto * cpuImg = item.canonical .get ();
3948
3947
auto * image = item.gpuObj ;
3949
- auto pFoundHash = findInStaging.operator ()<ICPUImage>(image);
3948
+ auto pFoundHash = findInStaging.template operator ()<ICPUImage>(image);
3950
3949
// get params
3951
3950
const auto & creationParams = image->getCreationParameters ();
3952
3951
const auto format = creationParams.format ;
@@ -4429,7 +4428,7 @@ ISemaphore::future_t<IQueue::RESULT> CAssetConverter::convert_impl(SReserveResul
4429
4428
if (!success)
4430
4429
for (const auto & info : buildInfos)
4431
4430
{
4432
- const auto pFoundHash = findInStaging.operator ()<ICPUTopLevelAccelerationStructure>(info.dstAS );
4431
+ const auto pFoundHash = findInStaging.template operator ()<ICPUTopLevelAccelerationStructure>(info.dstAS );
4433
4432
smart_refctd_ptr<const ICPUTopLevelAccelerationStructure> dummy; // already null at this point
4434
4433
markFailureInStaging (" AS Build Command Recording" ,dummy,info.dstAS ,pFoundHash);
4435
4434
}
@@ -4477,7 +4476,7 @@ ISemaphore::future_t<IQueue::RESULT> CAssetConverter::convert_impl(SReserveResul
4477
4476
for (auto & item : blasToBuild)
4478
4477
{
4479
4478
auto * as = item.gpuObj ;
4480
- auto pFoundHash = findInStaging.operator ()<ICPUBottomLevelAccelerationStructure>(as);
4479
+ auto pFoundHash = findInStaging.template operator ()<ICPUBottomLevelAccelerationStructure>(as);
4481
4480
if (item.asBuildParams .host )
4482
4481
{
4483
4482
auto dOp = device->createDeferredOperation ();
@@ -4580,7 +4579,7 @@ ISemaphore::future_t<IQueue::RESULT> CAssetConverter::convert_impl(SReserveResul
4580
4579
{
4581
4580
dedupBLASesUsed.clear ();
4582
4581
const auto as = tlasToBuild.gpuObj ;
4583
- const auto pFoundHash = findInStaging.operator ()<ICPUTopLevelAccelerationStructure>(as);
4582
+ const auto pFoundHash = findInStaging.template operator ()<ICPUTopLevelAccelerationStructure>(as);
4584
4583
const auto & backingRange = as->getCreationParams ().bufferRange ;
4585
4584
// checking ownership for the future on old buffer, but compacted will be made with same sharing creation parameters
4586
4585
const auto finalOwnerQueueFamily = checkOwnership (backingRange.buffer .get (),params.getFinalOwnerQueueFamily (as,*pFoundHash),computeFamily);
@@ -4938,7 +4937,7 @@ if (worstSize>minScratchSize)
4938
4937
auto missingDependent = [&reservations]<Asset AssetType>(const typename asset_traits<AssetType>::video_t * dep)->bool
4939
4938
{
4940
4939
auto & stagingCache = std::get<SReserveResult::staging_cache_t <AssetType>>(reservations.m_stagingCaches );
4941
- auto found = stagingCache.find (const_cast <asset_traits<AssetType>::video_t *>(dep));
4940
+ auto found = stagingCache.find (const_cast <typename asset_traits<AssetType>::video_t *>(dep));
4942
4941
// this only checks if whether we had to convert and failed
4943
4942
if (found!=stagingCache.end () && found->second .value ==CHashCache::NoContentHash)
4944
4943
return true ;
@@ -4965,9 +4964,9 @@ if (worstSize>minScratchSize)
4965
4964
}
4966
4965
4967
4966
if constexpr (std::is_same_v<AssetType,ICPUBufferView>)
4968
- depsMissing = missingDependent.operator ()<ICPUBuffer>(item.first ->getUnderlyingBuffer ());
4967
+ depsMissing = missingDependent.template operator ()<ICPUBuffer>(item.first ->getUnderlyingBuffer ());
4969
4968
if constexpr (std::is_same_v<AssetType,ICPUImageView>)
4970
- depsMissing = missingDependent.operator ()<ICPUImage>(item.first ->getCreationParameters ().image .get ());
4969
+ depsMissing = missingDependent.template operator ()<ICPUImage>(item.first ->getCreationParameters ().image .get ());
4971
4970
if constexpr (std::is_same_v<AssetType,ICPUDescriptorSet>)
4972
4971
{
4973
4972
const IGPUDescriptorSetLayout* layout = item.first ->getLayout ();
@@ -4977,7 +4976,7 @@ if (worstSize>minScratchSize)
4977
4976
const auto * samplers = item.first ->getAllMutableCombinedSamplers ();
4978
4977
for (auto i=0u ; !depsMissing && i<count; i++)
4979
4978
if (samplers[i])
4980
- depsMissing = missingDependent.operator ()<ICPUSampler>(samplers[i].get ());
4979
+ depsMissing = missingDependent.template operator ()<ICPUSampler>(samplers[i].get ());
4981
4980
}
4982
4981
for (auto i=0u ; !depsMissing && i<static_cast <uint32_t >(asset::IDescriptor::E_TYPE::ET_COUNT); i++)
4983
4982
{
@@ -4993,16 +4992,16 @@ if (worstSize>minScratchSize)
4993
4992
switch (asset::IDescriptor::GetTypeCategory (type))
4994
4993
{
4995
4994
case asset::IDescriptor::EC_BUFFER:
4996
- depsMissing = missingDependent.operator ()<ICPUBuffer>(static_cast <const IGPUBuffer*>(untypedDesc));
4995
+ depsMissing = missingDependent.template operator ()<ICPUBuffer>(static_cast <const IGPUBuffer*>(untypedDesc));
4997
4996
break ;
4998
4997
case asset::IDescriptor::EC_SAMPLER:
4999
- depsMissing = missingDependent.operator ()<ICPUSampler>(static_cast <const IGPUSampler*>(untypedDesc));
4998
+ depsMissing = missingDependent.template operator ()<ICPUSampler>(static_cast <const IGPUSampler*>(untypedDesc));
5000
4999
break ;
5001
5000
case asset::IDescriptor::EC_IMAGE:
5002
- depsMissing = missingDependent.operator ()<ICPUImageView>(static_cast <const IGPUImageView*>(untypedDesc));
5001
+ depsMissing = missingDependent.template operator ()<ICPUImageView>(static_cast <const IGPUImageView*>(untypedDesc));
5003
5002
break ;
5004
5003
case asset::IDescriptor::EC_BUFFER_VIEW:
5005
- depsMissing = missingDependent.operator ()<ICPUBufferView>(static_cast <const IGPUBufferView*>(untypedDesc));
5004
+ depsMissing = missingDependent.template operator ()<ICPUBufferView>(static_cast <const IGPUBufferView*>(untypedDesc));
5006
5005
break ;
5007
5006
case asset::IDescriptor::EC_ACCELERATION_STRUCTURE:
5008
5007
{
@@ -5059,20 +5058,20 @@ if (worstSize>minScratchSize)
5059
5058
}
5060
5059
};
5061
5060
// again, need to go bottom up so we can check dependencies being successes
5062
- mergeCache.operator ()<ICPUBuffer>();
5063
- mergeCache.operator ()<ICPUImage>();
5064
- mergeCache.operator ()<ICPUBottomLevelAccelerationStructure>();
5065
- mergeCache.operator ()<ICPUTopLevelAccelerationStructure>();
5066
- mergeCache.operator ()<ICPUBufferView>();
5067
- mergeCache.operator ()<ICPUImageView>();
5068
- mergeCache.operator ()<ICPUShader>();
5069
- mergeCache.operator ()<ICPUSampler>();
5070
- mergeCache.operator ()<ICPUDescriptorSetLayout>();
5071
- mergeCache.operator ()<ICPUPipelineLayout>();
5072
- mergeCache.operator ()<ICPUPipelineCache>();
5073
- mergeCache.operator ()<ICPUComputePipeline>();
5074
- mergeCache.operator ()<ICPURenderpass>();
5075
- mergeCache.operator ()<ICPUGraphicsPipeline>();
5061
+ mergeCache.template operator ()<ICPUBuffer>();
5062
+ mergeCache.template operator ()<ICPUImage>();
5063
+ mergeCache.template operator ()<ICPUBottomLevelAccelerationStructure>();
5064
+ mergeCache.template operator ()<ICPUTopLevelAccelerationStructure>();
5065
+ mergeCache.template operator ()<ICPUBufferView>();
5066
+ mergeCache.template operator ()<ICPUImageView>();
5067
+ mergeCache.template operator ()<ICPUShader>();
5068
+ mergeCache.template operator ()<ICPUSampler>();
5069
+ mergeCache.template operator ()<ICPUDescriptorSetLayout>();
5070
+ mergeCache.template operator ()<ICPUPipelineLayout>();
5071
+ mergeCache.template operator ()<ICPUPipelineCache>();
5072
+ mergeCache.template operator ()<ICPUComputePipeline>();
5073
+ mergeCache.template operator ()<ICPURenderpass>();
5074
+ mergeCache.template operator ()<ICPUGraphicsPipeline>();
5076
5075
// write the TLASes into Descriptor Set finally
5077
5076
if (auto & tlasWriteMap=reservations.m_deferredTLASDescriptorWrites ; !tlasWriteMap.empty ())
5078
5077
{
@@ -5085,7 +5084,7 @@ if (worstSize>minScratchSize)
5085
5084
// I know what I'm doing, this member has no influence on the set key hash
5086
5085
auto & tlas = const_cast <smart_refctd_ptr<IGPUTopLevelAccelerationStructure>&>(inWrite.tlas );
5087
5086
assert (tlas);
5088
- if (missingDependent.operator ()<ICPUTopLevelAccelerationStructure>(tlas.get ()))
5087
+ if (missingDependent.template operator ()<ICPUTopLevelAccelerationStructure>(tlas.get ()))
5089
5088
{
5090
5089
tlas = nullptr ;
5091
5090
continue ;
@@ -5108,10 +5107,10 @@ if (worstSize>minScratchSize)
5108
5107
for (auto & inWrite : tlasWriteMap)
5109
5108
const_cast <smart_refctd_ptr<IGPUTopLevelAccelerationStructure>&>(inWrite.tlas ) = nullptr ;
5110
5109
}
5111
- mergeCache.operator ()<ICPUDescriptorSet>();
5110
+ mergeCache.template operator ()<ICPUDescriptorSet>();
5112
5111
// needed for the IGPUDescriptorSets to check if TLAS exists/was written, can be released now
5113
5112
reservations.m_deferredTLASDescriptorWrites .clear ();
5114
- // mergeCache.operator()<ICPUFramebuffer>();
5113
+ // mergeCache.template operator()<ICPUFramebuffer>();
5115
5114
5116
5115
// no submit was necessary, so should signal the extra semaphores from the host
5117
5116
if (!retval.blocking ())
0 commit comments