@@ -41,7 +41,6 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
41
41
}
42
42
iommu_mm -> pasid = pasid ;
43
43
INIT_LIST_HEAD (& iommu_mm -> sva_domains );
44
- INIT_LIST_HEAD (& iommu_mm -> sva_handles );
45
44
/*
46
45
* Make sure the write to mm->iommu_mm is not reordered in front of
47
46
* initialization to iommu_mm fields. If it does, readers may see a
@@ -69,11 +68,16 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
69
68
*/
70
69
struct iommu_sva * iommu_sva_bind_device (struct device * dev , struct mm_struct * mm )
71
70
{
71
+ struct iommu_group * group = dev -> iommu_group ;
72
+ struct iommu_attach_handle * attach_handle ;
72
73
struct iommu_mm_data * iommu_mm ;
73
74
struct iommu_domain * domain ;
74
75
struct iommu_sva * handle ;
75
76
int ret ;
76
77
78
+ if (!group )
79
+ return ERR_PTR (- ENODEV );
80
+
77
81
mutex_lock (& iommu_sva_lock );
78
82
79
83
/* Allocate mm->pasid if necessary. */
@@ -83,12 +87,22 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
83
87
goto out_unlock ;
84
88
}
85
89
86
- list_for_each_entry (handle , & mm -> iommu_mm -> sva_handles , handle_item ) {
87
- if (handle -> dev == dev ) {
88
- refcount_inc (& handle -> users );
89
- mutex_unlock (& iommu_sva_lock );
90
- return handle ;
90
+ /* A bond already exists, just take a reference`. */
91
+ attach_handle = iommu_attach_handle_get (group , iommu_mm -> pasid , IOMMU_DOMAIN_SVA );
92
+ if (!IS_ERR (attach_handle )) {
93
+ handle = container_of (attach_handle , struct iommu_sva , handle );
94
+ if (attach_handle -> domain -> mm != mm ) {
95
+ ret = - EBUSY ;
96
+ goto out_unlock ;
91
97
}
98
+ refcount_inc (& handle -> users );
99
+ mutex_unlock (& iommu_sva_lock );
100
+ return handle ;
101
+ }
102
+
103
+ if (PTR_ERR (attach_handle ) != - ENOENT ) {
104
+ ret = PTR_ERR (attach_handle );
105
+ goto out_unlock ;
92
106
}
93
107
94
108
handle = kzalloc (sizeof (* handle ), GFP_KERNEL );
@@ -99,7 +113,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
99
113
100
114
/* Search for an existing domain. */
101
115
list_for_each_entry (domain , & mm -> iommu_mm -> sva_domains , next ) {
102
- handle -> handle .domain = domain ;
103
116
ret = iommu_attach_device_pasid (domain , dev , iommu_mm -> pasid ,
104
117
& handle -> handle );
105
118
if (!ret ) {
@@ -115,7 +128,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
115
128
goto out_free_handle ;
116
129
}
117
130
118
- handle -> handle .domain = domain ;
119
131
ret = iommu_attach_device_pasid (domain , dev , iommu_mm -> pasid ,
120
132
& handle -> handle );
121
133
if (ret )
@@ -125,7 +137,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
125
137
126
138
out :
127
139
refcount_set (& handle -> users , 1 );
128
- list_add (& handle -> handle_item , & mm -> iommu_mm -> sva_handles );
129
140
mutex_unlock (& iommu_sva_lock );
130
141
handle -> dev = dev ;
131
142
return handle ;
@@ -159,7 +170,6 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
159
170
mutex_unlock (& iommu_sva_lock );
160
171
return ;
161
172
}
162
- list_del (& handle -> handle_item );
163
173
164
174
iommu_detach_device_pasid (domain , dev , iommu_mm -> pasid );
165
175
if (-- domain -> users == 0 ) {
0 commit comments