@@ -168,88 +168,25 @@ void intel_svm_check(struct intel_iommu *iommu)
168
168
iommu -> flags |= VTD_FLAG_SVM_CAPABLE ;
169
169
}
170
170
171
- static void __flush_svm_range_dev (struct intel_svm * svm ,
172
- struct intel_svm_dev * sdev ,
173
- unsigned long address ,
174
- unsigned long pages , int ih )
175
- {
176
- struct device_domain_info * info = dev_iommu_priv_get (sdev -> dev );
177
-
178
- if (WARN_ON (!pages ))
179
- return ;
180
-
181
- qi_flush_piotlb (sdev -> iommu , sdev -> did , svm -> pasid , address , pages , ih );
182
- if (info -> ats_enabled ) {
183
- qi_flush_dev_iotlb_pasid (sdev -> iommu , sdev -> sid , info -> pfsid ,
184
- svm -> pasid , sdev -> qdep , address ,
185
- order_base_2 (pages ));
186
- quirk_extra_dev_tlb_flush (info , address , order_base_2 (pages ),
187
- svm -> pasid , sdev -> qdep );
188
- }
189
- }
190
-
191
- static void intel_flush_svm_range_dev (struct intel_svm * svm ,
192
- struct intel_svm_dev * sdev ,
193
- unsigned long address ,
194
- unsigned long pages , int ih )
195
- {
196
- unsigned long shift = ilog2 (__roundup_pow_of_two (pages ));
197
- unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift ));
198
- unsigned long start = ALIGN_DOWN (address , align );
199
- unsigned long end = ALIGN (address + (pages << VTD_PAGE_SHIFT ), align );
200
-
201
- while (start < end ) {
202
- __flush_svm_range_dev (svm , sdev , start , align >> VTD_PAGE_SHIFT , ih );
203
- start += align ;
204
- }
205
- }
206
-
207
- static void intel_flush_svm_range (struct intel_svm * svm , unsigned long address ,
208
- unsigned long pages , int ih )
209
- {
210
- struct intel_svm_dev * sdev ;
211
-
212
- rcu_read_lock ();
213
- list_for_each_entry_rcu (sdev , & svm -> devs , list )
214
- intel_flush_svm_range_dev (svm , sdev , address , pages , ih );
215
- rcu_read_unlock ();
216
- }
217
-
218
- static void intel_flush_svm_all (struct intel_svm * svm )
219
- {
220
- struct device_domain_info * info ;
221
- struct intel_svm_dev * sdev ;
222
-
223
- rcu_read_lock ();
224
- list_for_each_entry_rcu (sdev , & svm -> devs , list ) {
225
- info = dev_iommu_priv_get (sdev -> dev );
226
-
227
- qi_flush_piotlb (sdev -> iommu , sdev -> did , svm -> pasid , 0 , -1UL , 0 );
228
- if (info -> ats_enabled ) {
229
- qi_flush_dev_iotlb_pasid (sdev -> iommu , sdev -> sid , info -> pfsid ,
230
- svm -> pasid , sdev -> qdep ,
231
- 0 , 64 - VTD_PAGE_SHIFT );
232
- quirk_extra_dev_tlb_flush (info , 0 , 64 - VTD_PAGE_SHIFT ,
233
- svm -> pasid , sdev -> qdep );
234
- }
235
- }
236
- rcu_read_unlock ();
237
- }
238
-
239
171
/* Pages have been freed at this point */
240
172
static void intel_arch_invalidate_secondary_tlbs (struct mmu_notifier * mn ,
241
173
struct mm_struct * mm ,
242
174
unsigned long start , unsigned long end )
243
175
{
244
176
struct intel_svm * svm = container_of (mn , struct intel_svm , notifier );
177
+ struct dmar_domain * domain = svm -> domain ;
245
178
246
179
if (start == 0 && end == -1UL ) {
247
- intel_flush_svm_all ( svm );
180
+ cache_tag_flush_all ( domain );
248
181
return ;
249
182
}
250
183
251
- intel_flush_svm_range (svm , start ,
252
- (end - start + PAGE_SIZE - 1 ) >> VTD_PAGE_SHIFT , 0 );
184
+ /*
185
+ * The mm_types defines vm_end as the first byte after the end address,
186
+ * different from IOMMU subsystem using the last address of an address
187
+ * range.
188
+ */
189
+ cache_tag_flush_range (domain , start , end - 1 , 0 );
253
190
}
254
191
255
192
static void intel_mm_release (struct mmu_notifier * mn , struct mm_struct * mm )
@@ -336,6 +273,7 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
336
273
INIT_LIST_HEAD_RCU (& svm -> devs );
337
274
338
275
svm -> notifier .ops = & intel_mmuops ;
276
+ svm -> domain = to_dmar_domain (domain );
339
277
ret = mmu_notifier_register (& svm -> notifier , mm );
340
278
if (ret ) {
341
279
kfree (svm );
@@ -747,6 +685,7 @@ struct iommu_domain *intel_svm_domain_alloc(void)
747
685
if (!domain )
748
686
return NULL ;
749
687
domain -> domain .ops = & intel_svm_domain_ops ;
688
+ domain -> use_first_level = true;
750
689
INIT_LIST_HEAD (& domain -> cache_tags );
751
690
spin_lock_init (& domain -> cache_lock );
752
691
0 commit comments