Skip to content

Commit 8744daf

Browse files
Jacob Panjoergroedel
authored andcommitted
iommu/vt-d: Remove global page flush support
Global pages support is removed from VT-d spec 3.0. Since global pages G flag only affects first-level paging structures and because DMA request with PASID are only supported by VT-d spec. 3.0 and onward, we can safely remove global pages support. For kernel shared virtual address IOTLB invalidation, PASID granularity and page selective within PASID will be used. There is no global granularity supported. Without this fix, IOTLB invalidation will cause invalid descriptor error in the queued invalidation (QI) interface. Fixes: 1c4f88b ("iommu/vt-d: Shared virtual address in scalable mode") Reported-by: Sanjay K Kumar <[email protected]> Signed-off-by: Jacob Pan <[email protected]> Signed-off-by: Joerg Roedel <[email protected]>
1 parent 0ce4a85 commit 8744daf

File tree

2 files changed

+15
-24
lines changed

2 files changed

+15
-24
lines changed

drivers/iommu/intel-svm.c

Lines changed: 15 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
100100
}
101101

102102
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
103-
unsigned long address, unsigned long pages, int ih, int gl)
103+
unsigned long address, unsigned long pages, int ih)
104104
{
105105
struct qi_desc desc;
106106

107-
if (pages == -1) {
108-
/* For global kernel pages we have to flush them in *all* PASIDs
109-
* because that's the only option the hardware gives us. Despite
110-
* the fact that they are actually only accessible through one. */
111-
if (gl)
112-
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
113-
QI_EIOTLB_DID(sdev->did) |
114-
QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
115-
QI_EIOTLB_TYPE;
116-
else
117-
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
118-
QI_EIOTLB_DID(sdev->did) |
119-
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
120-
QI_EIOTLB_TYPE;
107+
/*
108+
* Do PASID granu IOTLB invalidation if page selective capability is
109+
* not available.
110+
*/
111+
if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
112+
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
113+
QI_EIOTLB_DID(sdev->did) |
114+
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
115+
QI_EIOTLB_TYPE;
121116
desc.qw1 = 0;
122117
} else {
123118
int mask = ilog2(__roundup_pow_of_two(pages));
@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
127122
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
128123
QI_EIOTLB_TYPE;
129124
desc.qw1 = QI_EIOTLB_ADDR(address) |
130-
QI_EIOTLB_GL(gl) |
131125
QI_EIOTLB_IH(ih) |
132126
QI_EIOTLB_AM(mask);
133127
}
@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
162156
}
163157

164158
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
165-
unsigned long pages, int ih, int gl)
159+
unsigned long pages, int ih)
166160
{
167161
struct intel_svm_dev *sdev;
168162

169163
rcu_read_lock();
170164
list_for_each_entry_rcu(sdev, &svm->devs, list)
171-
intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
165+
intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
172166
rcu_read_unlock();
173167
}
174168

@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
180174
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
181175

182176
intel_flush_svm_range(svm, start,
183-
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
177+
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
184178
}
185179

186180
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
203197
rcu_read_lock();
204198
list_for_each_entry_rcu(sdev, &svm->devs, list) {
205199
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
206-
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
200+
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
207201
}
208202
rcu_read_unlock();
209203

@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
425419
* large and has to be physically contiguous. So it's
426420
* hard to be as defensive as we might like. */
427421
intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
428-
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
422+
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
429423
kfree_rcu(sdev, rcu);
430424

431425
if (list_empty(&svm->devs)) {

include/linux/intel-iommu.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -346,7 +346,6 @@ enum {
346346
#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
347347

348348
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
349-
#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
350349
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
351350
#define QI_EIOTLB_AM(am) (((u64)am))
352351
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
@@ -378,8 +377,6 @@ enum {
378377
#define QI_RESP_INVALID 0x1
379378
#define QI_RESP_FAILURE 0xf
380379

381-
#define QI_GRAN_ALL_ALL 0
382-
#define QI_GRAN_NONG_ALL 1
383380
#define QI_GRAN_NONG_PASID 2
384381
#define QI_GRAN_PSI_PASID 3
385382

0 commit comments

Comments
 (0)