@@ -112,6 +112,8 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0);
112112FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES (anonymous_hugetlb_2mb , SZ_2M , MAP_HUGETLB | MAP_HUGE_2MB );
113113FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES (anonymous_hugetlb_1gb , SZ_1G , MAP_HUGETLB | MAP_HUGE_1GB );
114114
115+ #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
116+
115117FIXTURE_SETUP (vfio_dma_mapping_test )
116118{
117119 self -> device = vfio_pci_device_init (device_bdf , variant -> iommu_mode );
@@ -195,6 +197,94 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)
195197 ASSERT_TRUE (!munmap (region .vaddr , size ));
196198}
197199
200+ FIXTURE (vfio_dma_map_limit_test ) {
201+ struct vfio_pci_device * device ;
202+ struct vfio_dma_region region ;
203+ size_t mmap_size ;
204+ };
205+
206+ FIXTURE_VARIANT (vfio_dma_map_limit_test ) {
207+ const char * iommu_mode ;
208+ };
209+
210+ #define FIXTURE_VARIANT_ADD_IOMMU_MODE (_iommu_mode ) \
211+ FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) { \
212+ .iommu_mode = #_iommu_mode, \
213+ }
214+
215+ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES ();
216+
217+ #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
218+
219+ FIXTURE_SETUP (vfio_dma_map_limit_test )
220+ {
221+ struct vfio_dma_region * region = & self -> region ;
222+ u64 region_size = getpagesize ();
223+
224+ /*
225+ * Over-allocate mmap by double the size to provide enough backing vaddr
226+ * for overflow tests
227+ */
228+ self -> mmap_size = 2 * region_size ;
229+
230+ self -> device = vfio_pci_device_init (device_bdf , variant -> iommu_mode );
231+ region -> vaddr = mmap (NULL , self -> mmap_size , PROT_READ | PROT_WRITE ,
232+ MAP_ANONYMOUS | MAP_PRIVATE , -1 , 0 );
233+ ASSERT_NE (region -> vaddr , MAP_FAILED );
234+
235+ /* One page prior to the end of address space */
236+ region -> iova = ~(iova_t )0 & ~(region_size - 1 );
237+ region -> size = region_size ;
238+ }
239+
240+ FIXTURE_TEARDOWN (vfio_dma_map_limit_test )
241+ {
242+ vfio_pci_device_cleanup (self -> device );
243+ ASSERT_EQ (munmap (self -> region .vaddr , self -> mmap_size ), 0 );
244+ }
245+
246+ TEST_F (vfio_dma_map_limit_test , unmap_range )
247+ {
248+ struct vfio_dma_region * region = & self -> region ;
249+ u64 unmapped ;
250+ int rc ;
251+
252+ vfio_pci_dma_map (self -> device , region );
253+ ASSERT_EQ (region -> iova , to_iova (self -> device , region -> vaddr ));
254+
255+ rc = __vfio_pci_dma_unmap (self -> device , region , & unmapped );
256+ ASSERT_EQ (rc , 0 );
257+ ASSERT_EQ (unmapped , region -> size );
258+ }
259+
260+ TEST_F (vfio_dma_map_limit_test , unmap_all )
261+ {
262+ struct vfio_dma_region * region = & self -> region ;
263+ u64 unmapped ;
264+ int rc ;
265+
266+ vfio_pci_dma_map (self -> device , region );
267+ ASSERT_EQ (region -> iova , to_iova (self -> device , region -> vaddr ));
268+
269+ rc = __vfio_pci_dma_unmap_all (self -> device , & unmapped );
270+ ASSERT_EQ (rc , 0 );
271+ ASSERT_EQ (unmapped , region -> size );
272+ }
273+
274+ TEST_F (vfio_dma_map_limit_test , overflow )
275+ {
276+ struct vfio_dma_region * region = & self -> region ;
277+ int rc ;
278+
279+ region -> size = self -> mmap_size ;
280+
281+ rc = __vfio_pci_dma_map (self -> device , region );
282+ ASSERT_EQ (rc , - EOVERFLOW );
283+
284+ rc = __vfio_pci_dma_unmap (self -> device , region , NULL );
285+ ASSERT_EQ (rc , - EOVERFLOW );
286+ }
287+
198288int main (int argc , char * argv [])
199289{
200290 device_bdf = vfio_selftests_get_bdf (& argc , argv );
0 commit comments