@@ -139,8 +139,9 @@ TEST_P(urEnqueueKernelLaunchTest, InvalidKernelArgs) {
139139 nullptr ));
140140
141141 if (backend == UR_PLATFORM_BACKEND_CUDA ||
142- backend == UR_PLATFORM_BACKEND_HIP) {
143- GTEST_FAIL () << " AMD and Nvidia can't check kernel arguments." ;
142+ backend == UR_PLATFORM_BACKEND_HIP ||
143+ backend == UR_PLATFORM_BACKEND_LEVEL_ZERO) {
144+ GTEST_FAIL () << " AMD, L0 and Nvidia can't check kernel arguments." ;
144145 }
145146
146147 // Enqueue kernel without setting any args
@@ -561,16 +562,17 @@ TEST_P(urEnqueueKernelLaunchUSMLinkedList, Success) {
561562 }
562563
563564 // Build linked list with USM allocations
564- ASSERT_SUCCESS (urUSMSharedAlloc (context, device, nullptr , pool,
565- sizeof (Node),
565+ ur_usm_desc_t desc{UR_STRUCTURE_TYPE_USM_DESC, nullptr , 0 };
566+ desc.align = alignof (Node);
567+ ASSERT_SUCCESS (urUSMSharedAlloc (context, device, &desc, pool, sizeof (Node),
566568 reinterpret_cast <void **>(&list_head)));
567569 ASSERT_NE (list_head, nullptr );
568570 Node *list_cur = list_head;
569571 for (int i = 0 ; i < num_nodes; i++) {
570572 list_cur->num = i * 2 ;
571573 if (i < num_nodes - 1 ) {
572574 ASSERT_SUCCESS (
573- urUSMSharedAlloc (context, device, nullptr , pool, sizeof (Node),
575+ urUSMSharedAlloc (context, device, &desc , pool, sizeof (Node),
574576 reinterpret_cast <void **>(&list_cur->next )));
575577 ASSERT_NE (list_cur->next , nullptr );
576578 } else {
@@ -579,6 +581,11 @@ TEST_P(urEnqueueKernelLaunchUSMLinkedList, Success) {
579581 list_cur = list_cur->next ;
580582 }
581583
584+ ur_bool_t indirect = true ;
585+ ASSERT_SUCCESS (urKernelSetExecInfo (kernel,
586+ UR_KERNEL_EXEC_INFO_USM_INDIRECT_ACCESS,
587+ sizeof (indirect), nullptr , &indirect));
588+
582589 // Run kernel which will iterate the list and modify the values
583590 ASSERT_SUCCESS (urKernelSetArgPointer (kernel, 0 , nullptr , list_head));
584591 ASSERT_SUCCESS (urEnqueueKernelLaunch (queue, kernel, 1 , &global_offset,
0 commit comments