Skip to content

Commit 9111aeb

Browse files
jpbruckerwilldeacon
authored andcommitted
iommu/arm-smmu-v3: Add support for VHE
ARMv8.1 extensions added Virtualization Host Extensions (VHE), which allow to run a host kernel at EL2. When using normal DMA, Device and CPU address spaces are dissociated, and do not need to implement the same capabilities, so VHE hasn't been used in the SMMU until now. With shared address spaces however, ASIDs are shared between MMU and SMMU, and broadcast TLB invalidations issued by a CPU are taken into account by the SMMU. TLB entries on both sides need to have identical exception level in order to be cleared with a single invalidation. When the CPU is using VHE, enable VHE in the SMMU for all STEs. Normal DMA mappings will need to use TLBI_EL2 commands instead of TLBI_NH, but shouldn't be otherwise affected by this change. Acked-by: Will Deacon <[email protected]> Reviewed-by: Jonathan Cameron <[email protected]> Signed-off-by: Jean-Philippe Brucker <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 51d113c commit 9111aeb

File tree

2 files changed

+28
-7
lines changed

2 files changed

+28
-7
lines changed

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c

Lines changed: 25 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -263,9 +263,11 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
263263
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
264264
break;
265265
case CMDQ_OP_TLBI_NH_VA:
266+
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
267+
fallthrough;
268+
case CMDQ_OP_TLBI_EL2_VA:
266269
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
267270
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
268-
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
269271
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
270272
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
271273
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
@@ -287,6 +289,9 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
287289
case CMDQ_OP_TLBI_S12_VMALL:
288290
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
289291
break;
292+
case CMDQ_OP_TLBI_EL2_ASID:
293+
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
294+
break;
290295
case CMDQ_OP_ATC_INV:
291296
cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
292297
cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
@@ -877,7 +882,8 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
877882
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
878883
{
879884
struct arm_smmu_cmdq_ent cmd = {
880-
.opcode = CMDQ_OP_TLBI_NH_ASID,
885+
.opcode = smmu->features & ARM_SMMU_FEAT_E2H ?
886+
CMDQ_OP_TLBI_EL2_ASID : CMDQ_OP_TLBI_NH_ASID,
881887
.tlbi.asid = asid,
882888
};
883889

@@ -1260,13 +1266,16 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
12601266
}
12611267

12621268
if (s1_cfg) {
1269+
u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
1270+
STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
1271+
12631272
BUG_ON(ste_live);
12641273
dst[1] = cpu_to_le64(
12651274
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
12661275
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
12671276
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
12681277
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
1269-
FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
1278+
FIELD_PREP(STRTAB_STE_1_STRW, strw));
12701279

12711280
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
12721281
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
@@ -1728,7 +1737,8 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
17281737
};
17291738

17301739
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1731-
cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1740+
cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
1741+
CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
17321742
cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
17331743
} else {
17341744
cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
@@ -1748,7 +1758,8 @@ void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
17481758
struct arm_smmu_domain *smmu_domain)
17491759
{
17501760
struct arm_smmu_cmdq_ent cmd = {
1751-
.opcode = CMDQ_OP_TLBI_NH_VA,
1761+
.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
1762+
CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA,
17521763
.tlbi = {
17531764
.asid = asid,
17541765
.leaf = leaf,
@@ -3076,7 +3087,11 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
30763087
writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
30773088

30783089
/* CR2 (random crap) */
3079-
reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
3090+
reg = CR2_PTM | CR2_RECINVSID;
3091+
3092+
if (smmu->features & ARM_SMMU_FEAT_E2H)
3093+
reg |= CR2_E2H;
3094+
30803095
writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
30813096

30823097
/* Stream table */
@@ -3235,8 +3250,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
32353250
smmu->options |= ARM_SMMU_OPT_MSIPOLL;
32363251
}
32373252

3238-
if (reg & IDR0_HYP)
3253+
if (reg & IDR0_HYP) {
32393254
smmu->features |= ARM_SMMU_FEAT_HYP;
3255+
if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
3256+
smmu->features |= ARM_SMMU_FEAT_E2H;
3257+
}
32403258

32413259
/*
32423260
* The coherency feature as set by FW is used in preference to the ID

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -430,6 +430,8 @@ struct arm_smmu_cmdq_ent {
430430
#define CMDQ_OP_TLBI_NH_ASID 0x11
431431
#define CMDQ_OP_TLBI_NH_VA 0x12
432432
#define CMDQ_OP_TLBI_EL2_ALL 0x20
433+
#define CMDQ_OP_TLBI_EL2_ASID 0x21
434+
#define CMDQ_OP_TLBI_EL2_VA 0x22
433435
#define CMDQ_OP_TLBI_S12_VMALL 0x28
434436
#define CMDQ_OP_TLBI_S2_IPA 0x2a
435437
#define CMDQ_OP_TLBI_NSNH_ALL 0x30
@@ -604,6 +606,7 @@ struct arm_smmu_device {
604606
#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
605607
#define ARM_SMMU_FEAT_BTM (1 << 16)
606608
#define ARM_SMMU_FEAT_SVA (1 << 17)
609+
#define ARM_SMMU_FEAT_E2H (1 << 18)
607610
u32 features;
608611

609612
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)

0 commit comments

Comments
 (0)