@@ -134,6 +134,7 @@ to_mock_domain(struct iommu_domain *domain)
134
134
135
135
struct mock_iommu_domain_nested {
136
136
struct iommu_domain domain ;
137
+ struct mock_viommu * mock_viommu ;
137
138
struct mock_iommu_domain * parent ;
138
139
u32 iotlb [MOCK_NESTED_DOMAIN_IOTLB_NUM ];
139
140
};
@@ -144,6 +145,16 @@ to_mock_nested(struct iommu_domain *domain)
144
145
return container_of (domain , struct mock_iommu_domain_nested , domain );
145
146
}
146
147
148
+ struct mock_viommu {
149
+ struct iommufd_viommu core ;
150
+ struct mock_iommu_domain * s2_parent ;
151
+ };
152
+
153
+ static inline struct mock_viommu * to_mock_viommu (struct iommufd_viommu * viommu )
154
+ {
155
+ return container_of (viommu , struct mock_viommu , core );
156
+ }
157
+
147
158
enum selftest_obj_type {
148
159
TYPE_IDEV ,
149
160
};
@@ -569,6 +580,61 @@ static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features fea
569
580
return 0 ;
570
581
}
571
582
583
+ static void mock_viommu_destroy (struct iommufd_viommu * viommu )
584
+ {
585
+ struct mock_iommu_device * mock_iommu = container_of (
586
+ viommu -> iommu_dev , struct mock_iommu_device , iommu_dev );
587
+
588
+ if (refcount_dec_and_test (& mock_iommu -> users ))
589
+ complete (& mock_iommu -> complete );
590
+
591
+ /* iommufd core frees mock_viommu and viommu */
592
+ }
593
+
594
+ static struct iommu_domain *
595
+ mock_viommu_alloc_domain_nested (struct iommufd_viommu * viommu , u32 flags ,
596
+ const struct iommu_user_data * user_data )
597
+ {
598
+ struct mock_viommu * mock_viommu = to_mock_viommu (viommu );
599
+ struct mock_iommu_domain_nested * mock_nested ;
600
+
601
+ if (flags & ~IOMMU_HWPT_FAULT_ID_VALID )
602
+ return ERR_PTR (- EOPNOTSUPP );
603
+
604
+ mock_nested = __mock_domain_alloc_nested (user_data );
605
+ if (IS_ERR (mock_nested ))
606
+ return ERR_CAST (mock_nested );
607
+ mock_nested -> mock_viommu = mock_viommu ;
608
+ mock_nested -> parent = mock_viommu -> s2_parent ;
609
+ return & mock_nested -> domain ;
610
+ }
611
+
612
+ static struct iommufd_viommu_ops mock_viommu_ops = {
613
+ .destroy = mock_viommu_destroy ,
614
+ .alloc_domain_nested = mock_viommu_alloc_domain_nested ,
615
+ };
616
+
617
+ static struct iommufd_viommu * mock_viommu_alloc (struct device * dev ,
618
+ struct iommu_domain * domain ,
619
+ struct iommufd_ctx * ictx ,
620
+ unsigned int viommu_type )
621
+ {
622
+ struct mock_iommu_device * mock_iommu =
623
+ iommu_get_iommu_dev (dev , struct mock_iommu_device , iommu_dev );
624
+ struct mock_viommu * mock_viommu ;
625
+
626
+ if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST )
627
+ return ERR_PTR (- EOPNOTSUPP );
628
+
629
+ mock_viommu = iommufd_viommu_alloc (ictx , struct mock_viommu , core ,
630
+ & mock_viommu_ops );
631
+ if (IS_ERR (mock_viommu ))
632
+ return ERR_CAST (mock_viommu );
633
+
634
+ refcount_inc (& mock_iommu -> users );
635
+ return & mock_viommu -> core ;
636
+ }
637
+
572
638
static const struct iommu_ops mock_ops = {
573
639
/*
574
640
* IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
@@ -588,6 +654,7 @@ static const struct iommu_ops mock_ops = {
588
654
.dev_enable_feat = mock_dev_enable_feat ,
589
655
.dev_disable_feat = mock_dev_disable_feat ,
590
656
.user_pasid_table = true,
657
+ .viommu_alloc = mock_viommu_alloc ,
591
658
.default_domain_ops =
592
659
& (struct iommu_domain_ops ){
593
660
.free = mock_domain_free ,
0 commit comments