1+ // This test checks whether memory accesses to contiguous virtual memory ranges are performed correctly
2+
3+ // RUN: %{build} -o %t.out
4+ // RUN: %{run} %t.out
5+
6+ #include < cassert>
7+
8+ #include " helpers.hpp"
9+
10+ struct VirtualAddressRange {
11+ VirtualAddressRange (uintptr_t Ptr, size_t Size) : MPtr{Ptr}, MSize{Size} {}
12+
13+ uintptr_t MPtr;
14+ size_t MSize;
15+ };
16+
17+ struct PhysicalMemoryMapping {
18+ PhysicalMemoryMapping (syclext::physical_mem&& PhysicalMem, void * MappingPtr) : MPhysicalMem(std::move(PhysicalMem)), MMappingPtr(MappingPtr){}
19+ syclext::physical_mem MPhysicalMem;
20+ void * MMappingPtr;
21+ };
22+
23+
24+ int main (){
25+ int Failed = 0 ;
26+ sycl::queue Q;
27+ sycl::context Context = Q.get_context ();
28+ sycl::device Device = Q.get_device ();
29+
30+ constexpr size_t NumberOfVirtualMemoryRanges = 5 ;
31+ constexpr size_t ElementsInRange = 100 ;
32+ constexpr int ValueSetInKernel = 999 ;
33+
34+ size_t BytesRequiredPerRange = ElementsInRange* sizeof (int );
35+
36+ size_t UsedGranularity = GetLCMGranularity (Device, Context);
37+
38+ size_t AlignedByteSizePerRange = GetAlignedByteSize (BytesRequiredPerRange, UsedGranularity);
39+
40+ std::vector<VirtualAddressRange> VirtualMemoryRanges;
41+ std::vector<PhysicalMemoryMapping> PhysicalMemoryMappings;
42+
43+ for (size_t Index =0 ; Index<NumberOfVirtualMemoryRanges; ++Index){
44+ uintptr_t VirtualMemoryPtr =
45+ syclext::reserve_virtual_mem (AlignedByteSizePerRange, Context);
46+ syclext::physical_mem PhysicalMem{Device, Context, AlignedByteSizePerRange};
47+ void *MappedPtr = PhysicalMem.map (VirtualMemoryPtr, AlignedByteSizePerRange, syclext::address_access_mode::read_write);
48+
49+ VirtualMemoryRanges.emplace_back (VirtualMemoryPtr, AlignedByteSizePerRange);
50+ PhysicalMemoryMappings.emplace_back (std::move (PhysicalMem), MappedPtr);
51+ }
52+
53+ std::vector<int > ResultHostData (ElementsInRange);
54+
55+ for (size_t Index =0 ; Index<NumberOfVirtualMemoryRanges; ++Index){
56+ int * DataRangePtr = reinterpret_cast <int *>(PhysicalMemoryMappings[Index].MMappingPtr );
57+
58+ Q.parallel_for (ElementsInRange, [=](sycl::id<1 > Idx) {
59+ DataRangePtr[Idx] = ValueSetInKernel;
60+ }).wait_and_throw ();
61+
62+ {
63+ sycl::buffer<int > ResultBuffer (ResultHostData);
64+
65+ Q.submit ([&](sycl::handler &Handle) {
66+ sycl::accessor A (ResultBuffer, Handle, sycl::write_only);
67+ Handle.parallel_for (ElementsInRange,
68+ [=](sycl::id<1 > Idx) { A[Idx] = DataRangePtr[Idx]; });
69+ });
70+ }
71+
72+ for (size_t i = 0 ; i < ElementsInRange; i++) {
73+ if (ResultHostData[i] != ValueSetInKernel) {
74+ std::cout << " Comparison failed with virtual range " << Index+1 << " at index " << i<<" : "
75+ << ResultHostData[i] << " != " << ValueSetInKernel
76+ << std::endl;
77+ ++Failed;
78+ }
79+ }
80+ }
81+
82+ for (auto PhysMemMap: PhysicalMemoryMappings){
83+ syclext::unmap (PhysMemMap.MMappingPtr , PhysMemMap.MPhysicalMem .size (), Context);
84+ }
85+ for (auto VirtualMemRange: VirtualMemoryRanges) {
86+ syclext::free_virtual_mem (VirtualMemRange.MPtr , VirtualMemRange.MSize , Context);
87+ }
88+
89+ return Failed;
90+ }
0 commit comments