Skip to content

Commit f701c9f

Browse files
TinaZhangZWjoergroedel
authored andcommitted
iommu/vt-d: Factor out invalidation descriptor composition
Separate the logic for constructing IOTLB and device TLB invalidation descriptors from the qi_flush interfaces. New helpers, qi_desc(), are introduced to encapsulate this common functionality. Moving descriptor composition code to new helpers enables its reuse in the upcoming qi_batch interfaces. No functional changes are intended. Signed-off-by: Tina Zhang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Lu Baolu <[email protected]> Signed-off-by: Joerg Roedel <[email protected]>
1 parent 1f5e307 commit f701c9f

File tree

2 files changed

+115
-87
lines changed

2 files changed

+115
-87
lines changed

drivers/iommu/intel/dmar.c

Lines changed: 6 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -1526,24 +1526,9 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
15261526
void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
15271527
unsigned int size_order, u64 type)
15281528
{
1529-
u8 dw = 0, dr = 0;
1530-
15311529
struct qi_desc desc;
1532-
int ih = 0;
1533-
1534-
if (cap_write_drain(iommu->cap))
1535-
dw = 1;
1536-
1537-
if (cap_read_drain(iommu->cap))
1538-
dr = 1;
1539-
1540-
desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1541-
| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1542-
desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1543-
| QI_IOTLB_AM(size_order);
1544-
desc.qw2 = 0;
1545-
desc.qw3 = 0;
15461530

1531+
qi_desc_iotlb(iommu, did, addr, size_order, type, &desc);
15471532
qi_submit_sync(iommu, &desc, 1, 0);
15481533
}
15491534

@@ -1561,20 +1546,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
15611546
if (!(iommu->gcmd & DMA_GCMD_TE))
15621547
return;
15631548

1564-
if (mask) {
1565-
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1566-
desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1567-
} else
1568-
desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1569-
1570-
if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1571-
qdep = 0;
1572-
1573-
desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1574-
QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1575-
desc.qw2 = 0;
1576-
desc.qw3 = 0;
1577-
1549+
qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, &desc);
15781550
qi_submit_sync(iommu, &desc, 1, 0);
15791551
}
15801552

@@ -1594,36 +1566,14 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
15941566
return;
15951567
}
15961568

1597-
if (npages == -1) {
1598-
desc.qw0 = QI_EIOTLB_PASID(pasid) |
1599-
QI_EIOTLB_DID(did) |
1600-
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1601-
QI_EIOTLB_TYPE;
1602-
desc.qw1 = 0;
1603-
} else {
1604-
int mask = ilog2(__roundup_pow_of_two(npages));
1605-
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1606-
1607-
if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
1608-
addr = ALIGN_DOWN(addr, align);
1609-
1610-
desc.qw0 = QI_EIOTLB_PASID(pasid) |
1611-
QI_EIOTLB_DID(did) |
1612-
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1613-
QI_EIOTLB_TYPE;
1614-
desc.qw1 = QI_EIOTLB_ADDR(addr) |
1615-
QI_EIOTLB_IH(ih) |
1616-
QI_EIOTLB_AM(mask);
1617-
}
1618-
1569+
qi_desc_piotlb(did, pasid, addr, npages, ih, &desc);
16191570
qi_submit_sync(iommu, &desc, 1, 0);
16201571
}
16211572

16221573
/* PASID-based device IOTLB Invalidate */
16231574
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
16241575
u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
16251576
{
1626-
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
16271577
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
16281578

16291579
/*
@@ -1635,40 +1585,9 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
16351585
if (!(iommu->gcmd & DMA_GCMD_TE))
16361586
return;
16371587

1638-
desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1639-
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1640-
QI_DEV_IOTLB_PFSID(pfsid);
1641-
1642-
/*
1643-
* If S bit is 0, we only flush a single page. If S bit is set,
1644-
* The least significant zero bit indicates the invalidation address
1645-
* range. VT-d spec 6.5.2.6.
1646-
* e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
1647-
* size order = 0 is PAGE_SIZE 4KB
1648-
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
1649-
* ECAP.
1650-
*/
1651-
if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
1652-
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1653-
addr, size_order);
1654-
1655-
/* Take page address */
1656-
desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
1657-
1658-
if (size_order) {
1659-
/*
1660-
* Existing 0s in address below size_order may be the least
1661-
* significant bit, we must set them to 1s to avoid having
1662-
* smaller size than desired.
1663-
*/
1664-
desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1665-
VTD_PAGE_SHIFT);
1666-
/* Clear size_order bit to indicate size */
1667-
desc.qw1 &= ~mask;
1668-
/* Set the S bit to indicate flushing more than 1 page */
1669-
desc.qw1 |= QI_DEV_EIOTLB_SIZE;
1670-
}
1671-
1588+
qi_desc_dev_iotlb_pasid(sid, pfsid, pasid,
1589+
qdep, addr, size_order,
1590+
&desc);
16721591
qi_submit_sync(iommu, &desc, 1, 0);
16731592
}
16741593

drivers/iommu/intel/iommu.h

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1066,6 +1066,115 @@ static inline unsigned long nrpages_to_size(unsigned long npages)
10661066
return npages << VTD_PAGE_SHIFT;
10671067
}
10681068

1069+
static inline void qi_desc_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1070+
unsigned int size_order, u64 type,
1071+
struct qi_desc *desc)
1072+
{
1073+
u8 dw = 0, dr = 0;
1074+
int ih = 0;
1075+
1076+
if (cap_write_drain(iommu->cap))
1077+
dw = 1;
1078+
1079+
if (cap_read_drain(iommu->cap))
1080+
dr = 1;
1081+
1082+
desc->qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1083+
| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1084+
desc->qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1085+
| QI_IOTLB_AM(size_order);
1086+
desc->qw2 = 0;
1087+
desc->qw3 = 0;
1088+
}
1089+
1090+
static inline void qi_desc_dev_iotlb(u16 sid, u16 pfsid, u16 qdep, u64 addr,
1091+
unsigned int mask, struct qi_desc *desc)
1092+
{
1093+
if (mask) {
1094+
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1095+
desc->qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1096+
} else {
1097+
desc->qw1 = QI_DEV_IOTLB_ADDR(addr);
1098+
}
1099+
1100+
if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1101+
qdep = 0;
1102+
1103+
desc->qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1104+
QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1105+
desc->qw2 = 0;
1106+
desc->qw3 = 0;
1107+
}
1108+
1109+
static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr,
1110+
unsigned long npages, bool ih,
1111+
struct qi_desc *desc)
1112+
{
1113+
if (npages == -1) {
1114+
desc->qw0 = QI_EIOTLB_PASID(pasid) |
1115+
QI_EIOTLB_DID(did) |
1116+
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1117+
QI_EIOTLB_TYPE;
1118+
desc->qw1 = 0;
1119+
} else {
1120+
int mask = ilog2(__roundup_pow_of_two(npages));
1121+
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1122+
1123+
if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
1124+
addr = ALIGN_DOWN(addr, align);
1125+
1126+
desc->qw0 = QI_EIOTLB_PASID(pasid) |
1127+
QI_EIOTLB_DID(did) |
1128+
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1129+
QI_EIOTLB_TYPE;
1130+
desc->qw1 = QI_EIOTLB_ADDR(addr) |
1131+
QI_EIOTLB_IH(ih) |
1132+
QI_EIOTLB_AM(mask);
1133+
}
1134+
}
1135+
1136+
static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid,
1137+
u16 qdep, u64 addr,
1138+
unsigned int size_order,
1139+
struct qi_desc *desc)
1140+
{
1141+
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
1142+
1143+
desc->qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1144+
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1145+
QI_DEV_IOTLB_PFSID(pfsid);
1146+
1147+
/*
1148+
* If S bit is 0, we only flush a single page. If S bit is set,
1149+
* The least significant zero bit indicates the invalidation address
1150+
* range. VT-d spec 6.5.2.6.
1151+
* e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
1152+
* size order = 0 is PAGE_SIZE 4KB
1153+
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
1154+
* ECAP.
1155+
*/
1156+
if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
1157+
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1158+
addr, size_order);
1159+
1160+
/* Take page address */
1161+
desc->qw1 = QI_DEV_EIOTLB_ADDR(addr);
1162+
1163+
if (size_order) {
1164+
/*
1165+
* Existing 0s in address below size_order may be the least
1166+
* significant bit, we must set them to 1s to avoid having
1167+
* smaller size than desired.
1168+
*/
1169+
desc->qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1170+
VTD_PAGE_SHIFT);
1171+
/* Clear size_order bit to indicate size */
1172+
desc->qw1 &= ~mask;
1173+
/* Set the S bit to indicate flushing more than 1 page */
1174+
desc->qw1 |= QI_DEV_EIOTLB_SIZE;
1175+
}
1176+
}
1177+
10691178
/* Convert value to context PASID directory size field coding. */
10701179
#define context_pdts(pds) (((pds) & 0x7) << 9)
10711180

0 commit comments

Comments
 (0)