|
1 | | -// This test checks whether certain operations in virtual memory extension work as expectd. |
| 1 | +// This test checks whether certain operations in virtual memory extension work |
| 2 | +// as expectd. |
2 | 3 |
|
3 | 4 | // RUN: %{build} -o %t.out |
4 | 5 | // RUN: %{run} %t.out |
|
8 | 9 | int main() { |
9 | 10 |
|
10 | 11 | constexpr size_t NumberOfIterations = 3; |
11 | | - std::array<size_t, NumberOfIterations> NumberOfElementsPerIteration{10,100,1000}; |
12 | | - |
| 12 | + std::array<size_t, NumberOfIterations> NumberOfElementsPerIteration{10, 100, |
| 13 | + 1000}; |
| 14 | + |
13 | 15 | sycl::queue Q; |
14 | 16 | sycl::context Context = Q.get_context(); |
15 | | - sycl::device Device = Q.get_device(); |
16 | | - |
17 | | - //A check should be performed that we can successfully perform and immediately release a valid reservation. |
18 | | - for(const size_t RequiredNumElements: NumberOfElementsPerIteration){ |
| 17 | + sycl::device Device = Q.get_device(); |
| 18 | + |
| 19 | + // A check should be performed that we can successfully perform and |
| 20 | + // immediately release a valid reservation. |
| 21 | + for (const size_t RequiredNumElements : NumberOfElementsPerIteration) { |
19 | 22 | size_t BytesRequired = RequiredNumElements * sizeof(int); |
20 | 23 | size_t UsedGranularity = GetLCMGranularity(Device, Context); |
21 | 24 | size_t AlignedByteSize = GetAlignedByteSize(BytesRequired, UsedGranularity); |
22 | | - uintptr_t VirtualMemoryPtr = syclext::reserve_virtual_mem(0, AlignedByteSize, Context); |
| 25 | + uintptr_t VirtualMemoryPtr = |
| 26 | + syclext::reserve_virtual_mem(0, AlignedByteSize, Context); |
23 | 27 | syclext::free_virtual_mem(VirtualMemoryPtr, AlignedByteSize, Context); |
24 | 28 | } |
25 | 29 |
|
26 | | - |
27 | | - // A check should be performed that we can successfully map and immediately unmap a virtual memory range. |
28 | | - for(const size_t RequiredNumElements: NumberOfElementsPerIteration){ |
| 30 | + // A check should be performed that we can successfully map and immediately |
| 31 | + // unmap a virtual memory range. |
| 32 | + for (const size_t RequiredNumElements : NumberOfElementsPerIteration) { |
29 | 33 | size_t BytesRequired = RequiredNumElements * sizeof(int); |
30 | 34 | size_t UsedGranularity = GetLCMGranularity(Device, Context); |
31 | 35 | size_t AlignedByteSize = GetAlignedByteSize(BytesRequired, UsedGranularity); |
32 | | - uintptr_t VirtualMemoryPtr = syclext::reserve_virtual_mem(0, AlignedByteSize, Context); |
| 36 | + uintptr_t VirtualMemoryPtr = |
| 37 | + syclext::reserve_virtual_mem(0, AlignedByteSize, Context); |
33 | 38 | syclext::physical_mem PhysicalMem{Device, Context, AlignedByteSize}; |
34 | | - void* MappedPtr = |
35 | | - PhysicalMem.map(VirtualMemoryPtr, AlignedByteSize, |
36 | | - syclext::address_access_mode::read_write); |
| 39 | + void *MappedPtr = PhysicalMem.map(VirtualMemoryPtr, AlignedByteSize, |
| 40 | + syclext::address_access_mode::read_write); |
37 | 41 | syclext::unmap(MappedPtr, AlignedByteSize, Context); |
38 | 42 | syclext::free_virtual_mem(VirtualMemoryPtr, AlignedByteSize, Context); |
39 | 43 | } |
40 | 44 |
|
41 | 45 | { |
42 | | - // Check should be performed that methods get_context(), get_device() and size() return correct values (i.e. ones which were passed to physical_mem constructor). |
43 | | - size_t BytesRequired = NumberOfElementsPerIteration[2] * sizeof(int); |
44 | | - size_t UsedGranularity = GetLCMGranularity(Device, Context); |
45 | | - size_t AlignedByteSize = GetAlignedByteSize(BytesRequired, UsedGranularity); |
46 | | - |
47 | | - syclext::physical_mem PhysicalMem{Device, Context, AlignedByteSize}; |
| 46 | + // Check should be performed that methods get_context(), get_device() and |
| 47 | + // size() return correct values (i.e. ones which were passed to physical_mem |
| 48 | + // constructor). |
| 49 | + size_t BytesRequired = NumberOfElementsPerIteration[2] * sizeof(int); |
| 50 | + size_t UsedGranularity = GetLCMGranularity(Device, Context); |
| 51 | + size_t AlignedByteSize = GetAlignedByteSize(BytesRequired, UsedGranularity); |
| 52 | + |
| 53 | + syclext::physical_mem PhysicalMem{Device, Context, AlignedByteSize}; |
48 | 54 |
|
49 | | - PhysicalMem.get_context(); |
50 | | - PhysicalMem.get_device(); |
51 | | - PhysicalMem.size(); |
| 55 | + PhysicalMem.get_context(); |
| 56 | + PhysicalMem.get_device(); |
| 57 | + PhysicalMem.size(); |
52 | 58 |
|
53 | | - assert(PhysicalMem.get_device() == Device && |
54 | | - "device passed to physical_mem must be the same as returned from get_device()"); |
| 59 | + assert(PhysicalMem.get_device() == Device && |
| 60 | + "device passed to physical_mem must be the same as returned from " |
| 61 | + "get_device()"); |
55 | 62 |
|
56 | | - assert(PhysicalMem.get_context() == Context && |
57 | | - "context passed to physical_mem must be the same as returned from get_context()"); |
| 63 | + assert(PhysicalMem.get_context() == Context && |
| 64 | + "context passed to physical_mem must be the same as returned from " |
| 65 | + "get_context()"); |
58 | 66 |
|
59 | | - assert(PhysicalMem.size() == AlignedByteSize && |
60 | | - "size in bytes passed to physical_mem must be the same as returned from size()"); |
| 67 | + assert(PhysicalMem.size() == AlignedByteSize && |
| 68 | + "size in bytes passed to physical_mem must be the same as returned " |
| 69 | + "from size()"); |
61 | 70 | } |
62 | 71 |
|
63 | 72 | { |
64 | | - // Check to see if value returned from a valid call to map() is the same as reinterpret_cast<void *>(ptr). |
65 | | - size_t BytesRequired = NumberOfElementsPerIteration[2] * sizeof(int); |
66 | | - size_t UsedGranularity = GetLCMGranularity(Device, Context); |
67 | | - size_t AlignedByteSize = GetAlignedByteSize(BytesRequired, UsedGranularity); |
| 73 | + // Check to see if value returned from a valid call to map() is the same as |
| 74 | + // reinterpret_cast<void *>(ptr). |
| 75 | + size_t BytesRequired = NumberOfElementsPerIteration[2] * sizeof(int); |
| 76 | + size_t UsedGranularity = GetLCMGranularity(Device, Context); |
| 77 | + size_t AlignedByteSize = GetAlignedByteSize(BytesRequired, UsedGranularity); |
68 | 78 |
|
69 | | - uintptr_t VirtualMemoryPtr = |
70 | | - syclext::reserve_virtual_mem(0, AlignedByteSize, Context); |
| 79 | + uintptr_t VirtualMemoryPtr = |
| 80 | + syclext::reserve_virtual_mem(0, AlignedByteSize, Context); |
71 | 81 |
|
72 | | - syclext::physical_mem PhysicalMem{Device, Context, AlignedByteSize}; |
| 82 | + syclext::physical_mem PhysicalMem{Device, Context, AlignedByteSize}; |
73 | 83 |
|
74 | | - void *MappedPtr = |
75 | | - PhysicalMem.map(VirtualMemoryPtr, AlignedByteSize, |
76 | | - syclext::address_access_mode::read_write); |
| 84 | + void *MappedPtr = PhysicalMem.map(VirtualMemoryPtr, AlignedByteSize, |
| 85 | + syclext::address_access_mode::read_write); |
77 | 86 |
|
78 | | - assert(MappedPtr == reinterpret_cast<void*>(VirtualMemoryPtr) && |
79 | | - "ptrs arent equal"); |
| 87 | + assert(MappedPtr == reinterpret_cast<void *>(VirtualMemoryPtr) && |
| 88 | + "ptrs arent equal"); |
80 | 89 |
|
81 | | - syclext::unmap(MappedPtr, AlignedByteSize, Context); |
82 | | - syclext::free_virtual_mem(VirtualMemoryPtr, AlignedByteSize, Context); |
| 90 | + syclext::unmap(MappedPtr, AlignedByteSize, Context); |
| 91 | + syclext::free_virtual_mem(VirtualMemoryPtr, AlignedByteSize, Context); |
83 | 92 | } |
84 | | - |
85 | | - // Check to see if can change access mode of a virtual memory range and immediately see it changed. |
86 | | - for(const size_t RequiredNumElements: NumberOfElementsPerIteration){ |
| 93 | + |
| 94 | + // Check to see if can change access mode of a virtual memory range and |
| 95 | + // immediately see it changed. |
| 96 | + for (const size_t RequiredNumElements : NumberOfElementsPerIteration) { |
87 | 97 | size_t BytesRequired = RequiredNumElements * sizeof(int); |
88 | 98 | size_t UsedGranularity = GetLCMGranularity(Device, Context); |
89 | 99 | size_t AlignedByteSize = GetAlignedByteSize(BytesRequired, UsedGranularity); |
90 | | - uintptr_t VirtualMemoryPtr = syclext::reserve_virtual_mem(0, AlignedByteSize, Context); |
| 100 | + uintptr_t VirtualMemoryPtr = |
| 101 | + syclext::reserve_virtual_mem(0, AlignedByteSize, Context); |
91 | 102 | syclext::physical_mem PhysicalMem{Device, Context, AlignedByteSize}; |
92 | | - void* MappedPtr = |
93 | | - PhysicalMem.map(VirtualMemoryPtr, AlignedByteSize, |
94 | | - syclext::address_access_mode::read_write); |
95 | | - |
96 | | - |
| 103 | + void *MappedPtr = PhysicalMem.map(VirtualMemoryPtr, AlignedByteSize, |
| 104 | + syclext::address_access_mode::read_write); |
| 105 | + |
97 | 106 | syclext::set_access_mode(MappedPtr, AlignedByteSize, |
98 | | - syclext::address_access_mode::read, Context); |
99 | | - |
100 | | - syclext::address_access_mode CurrentAccessMode = syclext::get_access_mode(MappedPtr, AlignedByteSize, Context); |
| 107 | + syclext::address_access_mode::read, Context); |
| 108 | + |
| 109 | + syclext::address_access_mode CurrentAccessMode = |
| 110 | + syclext::get_access_mode(MappedPtr, AlignedByteSize, Context); |
101 | 111 |
|
102 | 112 | assert(CurrentAccessMode == syclext::address_access_mode::read && |
103 | | - "access mode must be address_access_mode::read after change with set_access_mode()"); |
| 113 | + "access mode must be address_access_mode::read after change with " |
| 114 | + "set_access_mode()"); |
104 | 115 |
|
105 | 116 | syclext::unmap(MappedPtr, AlignedByteSize, Context); |
106 | 117 | syclext::free_virtual_mem(VirtualMemoryPtr, AlignedByteSize, Context); |
107 | 118 | } |
108 | | - |
| 119 | + |
109 | 120 | return 0; |
110 | 121 | } |
0 commit comments