@@ -139,8 +139,9 @@ TEST_P(urEnqueueKernelLaunchTest, InvalidKernelArgs) {
139139 nullptr ));
140140
141141 if (backend == UR_PLATFORM_BACKEND_CUDA ||
142- backend == UR_PLATFORM_BACKEND_HIP) {
143- GTEST_FAIL () << " AMD and Nvidia can't check kernel arguments." ;
142+ backend == UR_PLATFORM_BACKEND_HIP ||
143+ backend == UR_PLATFORM_BACKEND_LEVEL_ZERO) {
144+ GTEST_FAIL () << " AMD, L0 and Nvidia can't check kernel arguments." ;
144145 }
145146
146147 // Enqueue kernel without setting any args
@@ -561,16 +562,16 @@ TEST_P(urEnqueueKernelLaunchUSMLinkedList, Success) {
561562 }
562563
563564 // Build linked list with USM allocations
564- ASSERT_SUCCESS ( urUSMSharedAlloc (context, device , nullptr , pool,
565- sizeof (Node),
565+ ur_usm_desc_t desc{UR_STRUCTURE_TYPE_USM_DESC , nullptr , 0 , alignof (Node)};
566+ ASSERT_SUCCESS ( urUSMSharedAlloc (context, device, &desc, pool, sizeof (Node),
566567 reinterpret_cast <void **>(&list_head)));
567568 ASSERT_NE (list_head, nullptr );
568569 Node *list_cur = list_head;
569570 for (int i = 0 ; i < num_nodes; i++) {
570571 list_cur->num = i * 2 ;
571572 if (i < num_nodes - 1 ) {
572573 ASSERT_SUCCESS (
573- urUSMSharedAlloc (context, device, nullptr , pool, sizeof (Node),
574+ urUSMSharedAlloc (context, device, &desc , pool, sizeof (Node),
574575 reinterpret_cast <void **>(&list_cur->next )));
575576 ASSERT_NE (list_cur->next , nullptr );
576577 } else {
@@ -579,6 +580,11 @@ TEST_P(urEnqueueKernelLaunchUSMLinkedList, Success) {
579580 list_cur = list_cur->next ;
580581 }
581582
583+ ur_bool_t indirect = true ;
584+ ASSERT_SUCCESS (urKernelSetExecInfo (kernel,
585+ UR_KERNEL_EXEC_INFO_USM_INDIRECT_ACCESS,
586+ sizeof (indirect), nullptr , &indirect));
587+
582588 // Run kernel which will iterate the list and modify the values
583589 ASSERT_SUCCESS (urKernelSetArgPointer (kernel, 0 , nullptr , list_head));
584590 ASSERT_SUCCESS (urEnqueueKernelLaunch (queue, kernel, 1 , &global_offset,
0 commit comments