@@ -406,15 +406,89 @@ ur_result_t ur_context_handle_t_::getFreeSlotInExistingOrNewPool(
406406 ze_event_pool_handle_t &Pool, size_t &Index, bool HostVisible,
407407 bool ProfilingEnabled, ur_device_handle_t Device,
408408 bool CounterBasedEventEnabled, bool UsingImmCmdList,
409- bool InterruptBasedEventEnabled) {
410- // Lock while updating event pool machinery.
411- std::scoped_lock<ur_mutex> Lock (ZeEventPoolCacheMutex);
409+ bool InterruptBasedEventEnabled, ur_queue_handle_t Queue, bool IsInternal) {
412410
413411 ze_device_handle_t ZeDevice = nullptr ;
414-
415412 if (Device) {
416413 ZeDevice = Device->ZeDevice ;
417414 }
415+
416+ if (DisableEventsCaching) {
417+ // Skip all cache handling, always create a new pool
418+ ze_event_pool_counter_based_exp_desc_t counterBasedExt = {
419+ ZE_STRUCTURE_TYPE_COUNTER_BASED_EVENT_POOL_EXP_DESC, nullptr , 0 };
420+
421+ ze_intel_event_sync_mode_exp_desc_t eventSyncMode = {
422+ ZE_INTEL_STRUCTURE_TYPE_EVENT_SYNC_MODE_EXP_DESC, nullptr , 0 };
423+ eventSyncMode.syncModeFlags =
424+ ZE_INTEL_EVENT_SYNC_MODE_EXP_FLAG_LOW_POWER_WAIT |
425+ ZE_INTEL_EVENT_SYNC_MODE_EXP_FLAG_SIGNAL_INTERRUPT;
426+
427+ ZeStruct<ze_event_pool_desc_t > ZeEventPoolDesc;
428+ ZeEventPoolDesc.count = MaxNumEventsPerPool;
429+ ZeEventPoolDesc.flags = 0 ;
430+ ZeEventPoolDesc.pNext = nullptr ;
431+ if (HostVisible)
432+ ZeEventPoolDesc.flags |= ZE_EVENT_POOL_FLAG_HOST_VISIBLE;
433+ if (ProfilingEnabled)
434+ ZeEventPoolDesc.flags |= ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP;
435+ UR_LOG (DEBUG, " ze_event_pool_desc_t flags set to: {}" ,
436+ ZeEventPoolDesc.flags );
437+ if (CounterBasedEventEnabled) {
438+ if (UsingImmCmdList) {
439+ counterBasedExt.flags = ZE_EVENT_POOL_COUNTER_BASED_EXP_FLAG_IMMEDIATE;
440+ } else {
441+ counterBasedExt.flags =
442+ ZE_EVENT_POOL_COUNTER_BASED_EXP_FLAG_NON_IMMEDIATE;
443+ }
444+ UR_LOG (DEBUG, " ze_event_pool_desc_t counter based flags set to: {}" ,
445+ counterBasedExt.flags );
446+ if (InterruptBasedEventEnabled) {
447+ counterBasedExt.pNext = &eventSyncMode;
448+ }
449+ ZeEventPoolDesc.pNext = &counterBasedExt;
450+ } else if (InterruptBasedEventEnabled) {
451+ ZeEventPoolDesc.pNext = &eventSyncMode;
452+ }
453+
454+ std::vector<ze_device_handle_t > ZeDevices;
455+ if (ZeDevice) {
456+ ZeDevices.push_back (ZeDevice);
457+ } else {
458+ std::for_each (Devices.begin (), Devices.end (),
459+ [&](const ur_device_handle_t &D) {
460+ ZeDevices.push_back (D->ZeDevice );
461+ });
462+ }
463+
464+ ze_result_t Result = ZE_CALL_NOCHECK (
465+ zeEventPoolCreate,
466+ (ZeContext, &ZeEventPoolDesc, ZeDevices.size (), &ZeDevices[0 ], &Pool));
467+ if (IsInternal && ze2urResult (Result) == UR_RESULT_ERROR_OUT_OF_RESOURCES &&
468+ Queue) {
469+ if (!Queue->isInOrderQueue ()) {
470+ if (Queue->UsingImmCmdLists ) {
471+ UR_CALL (CleanupEventsInImmCmdLists (Queue, true /* QueueLocked*/ ,
472+ false /* QueueSynced*/ ,
473+ nullptr /* CompletedEvent*/ ));
474+ } else {
475+ UR_CALL (resetCommandLists (Queue));
476+ }
477+ ZE2UR_CALL (zeEventPoolCreate, (ZeContext, &ZeEventPoolDesc,
478+ ZeDevices.size (), &ZeDevices[0 ], &Pool));
479+ }
480+ } else if (ze2urResult (Result) != UR_RESULT_SUCCESS) {
481+ return ze2urResult (Result);
482+ }
483+ Index = 0 ;
484+ NumEventsAvailableInEventPool[Pool] = MaxNumEventsPerPool - 1 ;
485+ NumEventsUnreleasedInEventPool[Pool] = 1 ;
486+ return UR_RESULT_SUCCESS;
487+ }
488+
489+ // --- Normal cache-based logic below ---
490+ std::scoped_lock<ur_mutex> Lock (ZeEventPoolCacheMutex);
491+
418492 std::list<ze_event_pool_handle_t > *ZePoolCache = getZeEventPoolCache (
419493 HostVisible, ProfilingEnabled, CounterBasedEventEnabled, UsingImmCmdList,
420494 InterruptBasedEventEnabled, ZeDevice);
@@ -423,6 +497,7 @@ ur_result_t ur_context_handle_t_::getFreeSlotInExistingOrNewPool(
423497 if (NumEventsAvailableInEventPool[ZePoolCache->front ()] == 0 ) {
424498 if (DisableEventsCaching) {
425499 // Remove full pool from the cache if events caching is disabled.
500+ ZE_CALL_NOCHECK (zeEventPoolDestroy, (ZePoolCache->front ()));
426501 ZePoolCache->erase (ZePoolCache->begin ());
427502 } else {
428503 // If event caching is enabled then we don't destroy events so there is
@@ -488,8 +563,26 @@ ur_result_t ur_context_handle_t_::getFreeSlotInExistingOrNewPool(
488563 });
489564 }
490565
491- ZE2UR_CALL (zeEventPoolCreate, (ZeContext, &ZeEventPoolDesc,
492- ZeDevices.size (), &ZeDevices[0 ], ZePool));
566+ ze_result_t Result = ZE_CALL_NOCHECK (
567+ zeEventPoolCreate,
568+ (ZeContext, &ZeEventPoolDesc, ZeDevices.size (), &ZeDevices[0 ], ZePool));
569+ if (IsInternal && ze2urResult (Result) == UR_RESULT_ERROR_OUT_OF_RESOURCES &&
570+ Queue) {
571+ if (!Queue->isInOrderQueue ()) {
572+ if (Queue->UsingImmCmdLists ) {
573+ UR_CALL (CleanupEventsInImmCmdLists (Queue, true /* QueueLocked*/ ,
574+ false /* QueueSynced*/ ,
575+ nullptr /* CompletedEvent*/ ));
576+ } else {
577+ UR_CALL (resetCommandLists (Queue));
578+ }
579+ ZE2UR_CALL (zeEventPoolCreate,
580+ (ZeContext, &ZeEventPoolDesc, ZeDevices.size (),
581+ &ZeDevices[0 ], ZePool));
582+ }
583+ } else if (ze2urResult (Result) != UR_RESULT_SUCCESS) {
584+ return ze2urResult (Result);
585+ }
493586 NumEventsAvailableInEventPool[*ZePool] = MaxNumEventsPerPool - 1 ;
494587 NumEventsUnreleasedInEventPool[*ZePool] = 1 ;
495588 } else {
0 commit comments