3232using namespace GPUCA_NAMESPACE ::gpu;
3333using namespace o2 ::tpc;
3434
35+ bool GPUChainTracking::NeedTPCClustersOnGPU ()
36+ {
37+ return (mRec ->GetRecoStepsGPU () & GPUDataTypes::RecoStep::TPCConversion) || (mRec ->GetRecoStepsGPU () & GPUDataTypes::RecoStep::TPCSliceTracking) || (mRec ->GetRecoStepsGPU () & GPUDataTypes::RecoStep::TPCMerging) || (mRec ->GetRecoStepsGPU () & GPUDataTypes::RecoStep::TPCCompression);
38+ }
39+
3540int32_t GPUChainTracking::ConvertNativeToClusterData ()
3641{
3742#ifdef GPUCA_HAVE_O2HEADERS
@@ -42,19 +47,17 @@ int32_t GPUChainTracking::ConvertNativeToClusterData()
4247 GPUTPCConvert& convertShadow = doGPU ? processorsShadow ()->tpcConverter : convert;
4348
4449 bool transferClusters = false ;
45- if (doGPU) {
46- if (!(mRec ->GetRecoStepsGPU () & GPUDataTypes::RecoStep::TPCClusterFinding)) {
47- mInputsHost ->mNClusterNative = mInputsShadow ->mNClusterNative = mIOPtrs .clustersNative ->nClustersTotal ;
48- AllocateRegisteredMemory (mInputsHost ->mResourceClusterNativeBuffer );
49- processorsShadow ()->ioPtrs .clustersNative = mInputsShadow ->mPclusterNativeAccess ;
50- WriteToConstantMemory (RecoStep::TPCConversion, (char *)&processors ()->ioPtrs - (char *)processors (), &processorsShadow ()->ioPtrs , sizeof (processorsShadow ()->ioPtrs ), 0 );
51- *mInputsHost ->mPclusterNativeAccess = *mIOPtrs .clustersNative ;
52- mInputsHost ->mPclusterNativeAccess ->clustersLinear = mInputsShadow ->mPclusterNativeBuffer ;
53- mInputsHost ->mPclusterNativeAccess ->setOffsetPtrs ();
54- GPUMemCpy (RecoStep::TPCConversion, mInputsShadow ->mPclusterNativeBuffer , mIOPtrs .clustersNative ->clustersLinear , sizeof (mIOPtrs .clustersNative ->clustersLinear [0 ]) * mIOPtrs .clustersNative ->nClustersTotal , 0 , true );
55- TransferMemoryResourceLinkToGPU (RecoStep::TPCConversion, mInputsHost ->mResourceClusterNativeAccess , 0 );
56- transferClusters = true ;
57- }
50+ if (mRec ->IsGPU () && !(mRec ->GetRecoStepsGPU () & GPUDataTypes::RecoStep::TPCClusterFinding) && NeedTPCClustersOnGPU ()) {
51+ mInputsHost ->mNClusterNative = mInputsShadow ->mNClusterNative = mIOPtrs .clustersNative ->nClustersTotal ;
52+ AllocateRegisteredMemory (mInputsHost ->mResourceClusterNativeBuffer );
53+ processorsShadow ()->ioPtrs .clustersNative = mInputsShadow ->mPclusterNativeAccess ;
54+ WriteToConstantMemory (RecoStep::TPCConversion, (char *)&processors ()->ioPtrs - (char *)processors (), &processorsShadow ()->ioPtrs , sizeof (processorsShadow ()->ioPtrs ), 0 );
55+ *mInputsHost ->mPclusterNativeAccess = *mIOPtrs .clustersNative ;
56+ mInputsHost ->mPclusterNativeAccess ->clustersLinear = mInputsShadow ->mPclusterNativeBuffer ;
57+ mInputsHost ->mPclusterNativeAccess ->setOffsetPtrs ();
58+ GPUMemCpy (RecoStep::TPCConversion, mInputsShadow ->mPclusterNativeBuffer , mIOPtrs .clustersNative ->clustersLinear , sizeof (mIOPtrs .clustersNative ->clustersLinear [0 ]) * mIOPtrs .clustersNative ->nClustersTotal , 0 , true );
59+ TransferMemoryResourceLinkToGPU (RecoStep::TPCConversion, mInputsHost ->mResourceClusterNativeAccess , 0 );
60+ transferClusters = true ;
5861 }
5962 if (!param ().par .earlyTpcTransform ) {
6063 if (GetProcessingSettings ().debugLevel >= 3 ) {
0 commit comments