Skip to content

Commit 76f5e9f

Browse files
committed
Merge tag 'iommu-fixes-v5.3-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU fixes from Joerg Roedel: - Revert an Intel VT-d patch that caused problems for some users. - Removal of a feature in the Intel VT-d driver that was never supported in hardware. This qualifies as a fix because the code for this feature sets reserved bits in the invalidation queue descriptor, causing failed invalidations on real hardware. - Two fixes for AMD IOMMU driver to fix a race condition and to add a missing IOTLB flush when kernel is booted in kdump mode. * tag 'iommu-fixes-v5.3-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: iommu/amd: Fix race in increase_address_space() iommu/amd: Flush old domains in kdump kernel iommu/vt-d: Remove global page flush support Revert "iommu/vt-d: Avoid duplicated pci dma alias consideration"
2 parents 0445971 + 754265b commit 76f5e9f

File tree

4 files changed

+103
-31
lines changed

4 files changed

+103
-31
lines changed

drivers/iommu/amd_iommu.c

Lines changed: 35 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
11431143
iommu_completion_wait(iommu);
11441144
}
11451145

1146+
static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1147+
{
1148+
struct iommu_cmd cmd;
1149+
1150+
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1151+
dom_id, 1);
1152+
iommu_queue_command(iommu, &cmd);
1153+
1154+
iommu_completion_wait(iommu);
1155+
}
1156+
11461157
static void amd_iommu_flush_all(struct amd_iommu *iommu)
11471158
{
11481159
struct iommu_cmd cmd;
@@ -1424,26 +1435,32 @@ static void free_pagetable(struct protection_domain *domain)
14241435
* another level increases the size of the address space by 9 bits to a size up
14251436
* to 64 bits.
14261437
*/
1427-
static bool increase_address_space(struct protection_domain *domain,
1438+
static void increase_address_space(struct protection_domain *domain,
14281439
gfp_t gfp)
14291440
{
1441+
unsigned long flags;
14301442
u64 *pte;
14311443

1432-
if (domain->mode == PAGE_MODE_6_LEVEL)
1444+
spin_lock_irqsave(&domain->lock, flags);
1445+
1446+
if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
14331447
/* address space already 64 bit large */
1434-
return false;
1448+
goto out;
14351449

14361450
pte = (void *)get_zeroed_page(gfp);
14371451
if (!pte)
1438-
return false;
1452+
goto out;
14391453

14401454
*pte = PM_LEVEL_PDE(domain->mode,
14411455
iommu_virt_to_phys(domain->pt_root));
14421456
domain->pt_root = pte;
14431457
domain->mode += 1;
14441458
domain->updated = true;
14451459

1446-
return true;
1460+
out:
1461+
spin_unlock_irqrestore(&domain->lock, flags);
1462+
1463+
return;
14471464
}
14481465

14491466
static u64 *alloc_pte(struct protection_domain *domain,
@@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
18731890
{
18741891
u64 pte_root = 0;
18751892
u64 flags = 0;
1893+
u32 old_domid;
18761894

18771895
if (domain->mode != PAGE_MODE_NONE)
18781896
pte_root = iommu_virt_to_phys(domain->pt_root);
@@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
19221940
flags &= ~DEV_DOMID_MASK;
19231941
flags |= domain->id;
19241942

1943+
old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
19251944
amd_iommu_dev_table[devid].data[1] = flags;
19261945
amd_iommu_dev_table[devid].data[0] = pte_root;
1946+
1947+
/*
1948+
* A kdump kernel might be replacing a domain ID that was copied from
1949+
* the previous kernel--if so, it needs to flush the translation cache
1950+
* entries for the old domain ID that is being overwritten
1951+
*/
1952+
if (old_domid) {
1953+
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1954+
1955+
amd_iommu_flush_tlb_domid(iommu, old_domid);
1956+
}
19271957
}
19281958

19291959
static void clear_dte_entry(u16 devid)

drivers/iommu/intel-iommu.c

Lines changed: 53 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -339,6 +339,8 @@ static void domain_exit(struct dmar_domain *domain);
339339
static void domain_remove_dev_info(struct dmar_domain *domain);
340340
static void dmar_remove_one_dev_info(struct device *dev);
341341
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
342+
static void domain_context_clear(struct intel_iommu *iommu,
343+
struct device *dev);
342344
static int domain_detach_iommu(struct dmar_domain *domain,
343345
struct intel_iommu *iommu);
344346
static bool device_is_rmrr_locked(struct device *dev);
@@ -2105,9 +2107,26 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
21052107
return ret;
21062108
}
21072109

2110+
struct domain_context_mapping_data {
2111+
struct dmar_domain *domain;
2112+
struct intel_iommu *iommu;
2113+
struct pasid_table *table;
2114+
};
2115+
2116+
static int domain_context_mapping_cb(struct pci_dev *pdev,
2117+
u16 alias, void *opaque)
2118+
{
2119+
struct domain_context_mapping_data *data = opaque;
2120+
2121+
return domain_context_mapping_one(data->domain, data->iommu,
2122+
data->table, PCI_BUS_NUM(alias),
2123+
alias & 0xff);
2124+
}
2125+
21082126
static int
21092127
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
21102128
{
2129+
struct domain_context_mapping_data data;
21112130
struct pasid_table *table;
21122131
struct intel_iommu *iommu;
21132132
u8 bus, devfn;
@@ -2117,7 +2136,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
21172136
return -ENODEV;
21182137

21192138
table = intel_pasid_get_table(dev);
2120-
return domain_context_mapping_one(domain, iommu, table, bus, devfn);
2139+
2140+
if (!dev_is_pci(dev))
2141+
return domain_context_mapping_one(domain, iommu, table,
2142+
bus, devfn);
2143+
2144+
data.domain = domain;
2145+
data.iommu = iommu;
2146+
data.table = table;
2147+
2148+
return pci_for_each_dma_alias(to_pci_dev(dev),
2149+
&domain_context_mapping_cb, &data);
21212150
}
21222151

21232152
static int domain_context_mapped_cb(struct pci_dev *pdev,
@@ -4759,6 +4788,28 @@ int __init intel_iommu_init(void)
47594788
return ret;
47604789
}
47614790

4791+
static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4792+
{
4793+
struct intel_iommu *iommu = opaque;
4794+
4795+
domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4796+
return 0;
4797+
}
4798+
4799+
/*
4800+
* NB - intel-iommu lacks any sort of reference counting for the users of
4801+
* dependent devices. If multiple endpoints have intersecting dependent
4802+
* devices, unbinding the driver from any one of them will possibly leave
4803+
* the others unable to operate.
4804+
*/
4805+
static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4806+
{
4807+
if (!iommu || !dev || !dev_is_pci(dev))
4808+
return;
4809+
4810+
pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4811+
}
4812+
47624813
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
47634814
{
47644815
struct dmar_domain *domain;
@@ -4779,7 +4830,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
47794830
PASID_RID2PASID);
47804831

47814832
iommu_disable_dev_iotlb(info);
4782-
domain_context_clear_one(iommu, info->bus, info->devfn);
4833+
domain_context_clear(iommu, info->dev);
47834834
intel_pasid_free_table(info->dev);
47844835
}
47854836

drivers/iommu/intel-svm.c

Lines changed: 15 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
100100
}
101101

102102
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
103-
unsigned long address, unsigned long pages, int ih, int gl)
103+
unsigned long address, unsigned long pages, int ih)
104104
{
105105
struct qi_desc desc;
106106

107-
if (pages == -1) {
108-
/* For global kernel pages we have to flush them in *all* PASIDs
109-
* because that's the only option the hardware gives us. Despite
110-
* the fact that they are actually only accessible through one. */
111-
if (gl)
112-
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
113-
QI_EIOTLB_DID(sdev->did) |
114-
QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
115-
QI_EIOTLB_TYPE;
116-
else
117-
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
118-
QI_EIOTLB_DID(sdev->did) |
119-
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
120-
QI_EIOTLB_TYPE;
107+
/*
108+
* Do PASID granu IOTLB invalidation if page selective capability is
109+
* not available.
110+
*/
111+
if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
112+
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
113+
QI_EIOTLB_DID(sdev->did) |
114+
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
115+
QI_EIOTLB_TYPE;
121116
desc.qw1 = 0;
122117
} else {
123118
int mask = ilog2(__roundup_pow_of_two(pages));
@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
127122
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
128123
QI_EIOTLB_TYPE;
129124
desc.qw1 = QI_EIOTLB_ADDR(address) |
130-
QI_EIOTLB_GL(gl) |
131125
QI_EIOTLB_IH(ih) |
132126
QI_EIOTLB_AM(mask);
133127
}
@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
162156
}
163157

164158
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
165-
unsigned long pages, int ih, int gl)
159+
unsigned long pages, int ih)
166160
{
167161
struct intel_svm_dev *sdev;
168162

169163
rcu_read_lock();
170164
list_for_each_entry_rcu(sdev, &svm->devs, list)
171-
intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
165+
intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
172166
rcu_read_unlock();
173167
}
174168

@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
180174
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
181175

182176
intel_flush_svm_range(svm, start,
183-
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
177+
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
184178
}
185179

186180
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
203197
rcu_read_lock();
204198
list_for_each_entry_rcu(sdev, &svm->devs, list) {
205199
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
206-
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
200+
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
207201
}
208202
rcu_read_unlock();
209203

@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
425419
* large and has to be physically contiguous. So it's
426420
* hard to be as defensive as we might like. */
427421
intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
428-
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
422+
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
429423
kfree_rcu(sdev, rcu);
430424

431425
if (list_empty(&svm->devs)) {

include/linux/intel-iommu.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -346,7 +346,6 @@ enum {
346346
#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
347347

348348
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
349-
#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
350349
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
351350
#define QI_EIOTLB_AM(am) (((u64)am))
352351
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
@@ -378,8 +377,6 @@ enum {
378377
#define QI_RESP_INVALID 0x1
379378
#define QI_RESP_FAILURE 0xf
380379

381-
#define QI_GRAN_ALL_ALL 0
382-
#define QI_GRAN_NONG_ALL 1
383380
#define QI_GRAN_NONG_PASID 2
384381
#define QI_GRAN_PSI_PASID 3
385382

0 commit comments

Comments
 (0)