@@ -163,6 +163,7 @@ struct mock_dev {
163
163
struct device dev ;
164
164
unsigned long flags ;
165
165
int id ;
166
+ u32 cache [MOCK_DEV_CACHE_NUM ];
166
167
};
167
168
168
169
static inline struct mock_dev * to_mock_dev (struct device * dev )
@@ -609,9 +610,80 @@ mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
609
610
return & mock_nested -> domain ;
610
611
}
611
612
613
+ static int mock_viommu_cache_invalidate (struct iommufd_viommu * viommu ,
614
+ struct iommu_user_data_array * array )
615
+ {
616
+ struct iommu_viommu_invalidate_selftest * cmds ;
617
+ struct iommu_viommu_invalidate_selftest * cur ;
618
+ struct iommu_viommu_invalidate_selftest * end ;
619
+ int rc ;
620
+
621
+ /* A zero-length array is allowed to validate the array type */
622
+ if (array -> entry_num == 0 &&
623
+ array -> type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST ) {
624
+ array -> entry_num = 0 ;
625
+ return 0 ;
626
+ }
627
+
628
+ cmds = kcalloc (array -> entry_num , sizeof (* cmds ), GFP_KERNEL );
629
+ if (!cmds )
630
+ return - ENOMEM ;
631
+ cur = cmds ;
632
+ end = cmds + array -> entry_num ;
633
+
634
+ static_assert (sizeof (* cmds ) == 3 * sizeof (u32 ));
635
+ rc = iommu_copy_struct_from_full_user_array (
636
+ cmds , sizeof (* cmds ), array ,
637
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST );
638
+ if (rc )
639
+ goto out ;
640
+
641
+ while (cur != end ) {
642
+ struct mock_dev * mdev ;
643
+ struct device * dev ;
644
+ int i ;
645
+
646
+ if (cur -> flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL ) {
647
+ rc = - EOPNOTSUPP ;
648
+ goto out ;
649
+ }
650
+
651
+ if (cur -> cache_id > MOCK_DEV_CACHE_ID_MAX ) {
652
+ rc = - EINVAL ;
653
+ goto out ;
654
+ }
655
+
656
+ xa_lock (& viommu -> vdevs );
657
+ dev = iommufd_viommu_find_dev (viommu ,
658
+ (unsigned long )cur -> vdev_id );
659
+ if (!dev ) {
660
+ xa_unlock (& viommu -> vdevs );
661
+ rc = - EINVAL ;
662
+ goto out ;
663
+ }
664
+ mdev = container_of (dev , struct mock_dev , dev );
665
+
666
+ if (cur -> flags & IOMMU_TEST_INVALIDATE_FLAG_ALL ) {
667
+ /* Invalidate all cache entries and ignore cache_id */
668
+ for (i = 0 ; i < MOCK_DEV_CACHE_NUM ; i ++ )
669
+ mdev -> cache [i ] = 0 ;
670
+ } else {
671
+ mdev -> cache [cur -> cache_id ] = 0 ;
672
+ }
673
+ xa_unlock (& viommu -> vdevs );
674
+
675
+ cur ++ ;
676
+ }
677
+ out :
678
+ array -> entry_num = cur - cmds ;
679
+ kfree (cmds );
680
+ return rc ;
681
+ }
682
+
612
683
static struct iommufd_viommu_ops mock_viommu_ops = {
613
684
.destroy = mock_viommu_destroy ,
614
685
.alloc_domain_nested = mock_viommu_alloc_domain_nested ,
686
+ .cache_invalidate = mock_viommu_cache_invalidate ,
615
687
};
616
688
617
689
static struct iommufd_viommu * mock_viommu_alloc (struct device * dev ,
@@ -782,7 +854,7 @@ static void mock_dev_release(struct device *dev)
782
854
static struct mock_dev * mock_dev_create (unsigned long dev_flags )
783
855
{
784
856
struct mock_dev * mdev ;
785
- int rc ;
857
+ int rc , i ;
786
858
787
859
if (dev_flags &
788
860
~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA ))
@@ -796,6 +868,8 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
796
868
mdev -> flags = dev_flags ;
797
869
mdev -> dev .release = mock_dev_release ;
798
870
mdev -> dev .bus = & iommufd_mock_bus_type .bus ;
871
+ for (i = 0 ; i < MOCK_DEV_CACHE_NUM ; i ++ )
872
+ mdev -> cache [i ] = IOMMU_TEST_DEV_CACHE_DEFAULT ;
799
873
800
874
rc = ida_alloc (& mock_dev_ida , GFP_KERNEL );
801
875
if (rc < 0 )
0 commit comments