diff --git a/.github/intel-llvm-mirror-base-commit b/.github/intel-llvm-mirror-base-commit index 0eed1c8ea3..2b187e4d5e 100644 --- a/.github/intel-llvm-mirror-base-commit +++ b/.github/intel-llvm-mirror-base-commit @@ -1 +1 @@ -f119abe1b69f99602067517a64300a659471134b +f2ac846551b0f7fb97974a87c74f7a48bc4f43f8 diff --git a/source/adapters/level_zero/context.cpp b/source/adapters/level_zero/context.cpp index fe690f3673..930b57f354 100644 --- a/source/adapters/level_zero/context.cpp +++ b/source/adapters/level_zero/context.cpp @@ -407,14 +407,71 @@ ur_result_t ur_context_handle_t_::getFreeSlotInExistingOrNewPool( bool ProfilingEnabled, ur_device_handle_t Device, bool CounterBasedEventEnabled, bool UsingImmCmdList, bool InterruptBasedEventEnabled) { - // Lock while updating event pool machinery. - std::scoped_lock Lock(ZeEventPoolCacheMutex); ze_device_handle_t ZeDevice = nullptr; - if (Device) { ZeDevice = Device->ZeDevice; } + + if (DisableEventsCaching) { + // Skip all cache handling, always create a new pool + ze_event_pool_counter_based_exp_desc_t counterBasedExt = { + ZE_STRUCTURE_TYPE_COUNTER_BASED_EVENT_POOL_EXP_DESC, nullptr, 0}; + + ze_intel_event_sync_mode_exp_desc_t eventSyncMode = { + ZE_INTEL_STRUCTURE_TYPE_EVENT_SYNC_MODE_EXP_DESC, nullptr, 0}; + eventSyncMode.syncModeFlags = + ZE_INTEL_EVENT_SYNC_MODE_EXP_FLAG_LOW_POWER_WAIT | + ZE_INTEL_EVENT_SYNC_MODE_EXP_FLAG_SIGNAL_INTERRUPT; + + ZeStruct ZeEventPoolDesc; + ZeEventPoolDesc.count = MaxNumEventsPerPool; + ZeEventPoolDesc.flags = 0; + ZeEventPoolDesc.pNext = nullptr; + if (HostVisible) + ZeEventPoolDesc.flags |= ZE_EVENT_POOL_FLAG_HOST_VISIBLE; + if (ProfilingEnabled) + ZeEventPoolDesc.flags |= ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP; + UR_LOG(DEBUG, "ze_event_pool_desc_t flags set to: {}", + ZeEventPoolDesc.flags); + if (CounterBasedEventEnabled) { + if (UsingImmCmdList) { + counterBasedExt.flags = ZE_EVENT_POOL_COUNTER_BASED_EXP_FLAG_IMMEDIATE; + } else { + counterBasedExt.flags = + ZE_EVENT_POOL_COUNTER_BASED_EXP_FLAG_NON_IMMEDIATE; + } + UR_LOG(DEBUG, "ze_event_pool_desc_t counter based flags set to: {}", + counterBasedExt.flags); + if (InterruptBasedEventEnabled) { + counterBasedExt.pNext = &eventSyncMode; + } + ZeEventPoolDesc.pNext = &counterBasedExt; + } else if (InterruptBasedEventEnabled) { + ZeEventPoolDesc.pNext = &eventSyncMode; + } + + std::vector ZeDevices; + if (ZeDevice) { + ZeDevices.push_back(ZeDevice); + } else { + std::for_each(Devices.begin(), Devices.end(), + [&](const ur_device_handle_t &D) { + ZeDevices.push_back(D->ZeDevice); + }); + } + + ZE2UR_CALL(zeEventPoolCreate, (ZeContext, &ZeEventPoolDesc, + ZeDevices.size(), &ZeDevices[0], &Pool)); + Index = 0; + NumEventsAvailableInEventPool[Pool] = MaxNumEventsPerPool - 1; + NumEventsUnreleasedInEventPool[Pool] = 1; + return UR_RESULT_SUCCESS; + } + + // --- Normal cache-based logic below --- + std::scoped_lock Lock(ZeEventPoolCacheMutex); + std::list *ZePoolCache = getZeEventPoolCache( HostVisible, ProfilingEnabled, CounterBasedEventEnabled, UsingImmCmdList, InterruptBasedEventEnabled, ZeDevice); @@ -423,6 +480,7 @@ ur_result_t ur_context_handle_t_::getFreeSlotInExistingOrNewPool( if (NumEventsAvailableInEventPool[ZePoolCache->front()] == 0) { if (DisableEventsCaching) { // Remove full pool from the cache if events caching is disabled. + ZE_CALL_NOCHECK(zeEventPoolDestroy, (ZePoolCache->front())); ZePoolCache->erase(ZePoolCache->begin()); } else { // If event caching is enabled then we don't destroy events so there is