24
24
* - v7/v8 long-descriptor format
25
25
* - Non-secure access to the SMMU
26
26
* - Context fault reporting
27
+ * - Extended Stream ID (16 bit)
27
28
*/
28
29
29
30
#define pr_fmt (fmt ) "arm-smmu: " fmt
87
88
#define sCR0_CLIENTPD (1 << 0)
88
89
#define sCR0_GFRE (1 << 1)
89
90
#define sCR0_GFIE (1 << 2)
91
+ #define sCR0_EXIDENABLE (1 << 3)
90
92
#define sCR0_GCFGFRE (1 << 4)
91
93
#define sCR0_GCFGFIE (1 << 5)
92
94
#define sCR0_USFCFG (1 << 10)
126
128
#define ID0_NUMIRPT_MASK 0xff
127
129
#define ID0_NUMSIDB_SHIFT 9
128
130
#define ID0_NUMSIDB_MASK 0xf
131
+ #define ID0_EXIDS (1 << 8)
129
132
#define ID0_NUMSMRG_SHIFT 0
130
133
#define ID0_NUMSMRG_MASK 0xff
131
134
169
172
#define ARM_SMMU_GR0_S2CR (n ) (0xc00 + ((n) << 2))
170
173
#define S2CR_CBNDX_SHIFT 0
171
174
#define S2CR_CBNDX_MASK 0xff
175
+ #define S2CR_EXIDVALID (1 << 10)
172
176
#define S2CR_TYPE_SHIFT 16
173
177
#define S2CR_TYPE_MASK 0x3
174
178
enum arm_smmu_s2cr_type {
@@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg {
260
264
261
265
#define TTBCR2_SEP_SHIFT 15
262
266
#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
267
+ #define TTBCR2_AS (1 << 4)
263
268
264
269
#define TTBRn_ASID_SHIFT 48
265
270
@@ -351,6 +356,7 @@ struct arm_smmu_device {
351
356
#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
352
357
#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
353
358
#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
359
+ #define ARM_SMMU_FEAT_EXIDS (1 << 12)
354
360
u32 features ;
355
361
356
362
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -778,6 +784,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
778
784
reg = pgtbl_cfg -> arm_lpae_s1_cfg .tcr ;
779
785
reg2 = pgtbl_cfg -> arm_lpae_s1_cfg .tcr >> 32 ;
780
786
reg2 |= TTBCR2_SEP_UPSTREAM ;
787
+ if (cfg -> fmt == ARM_SMMU_CTX_FMT_AARCH64 )
788
+ reg2 |= TTBCR2_AS ;
781
789
}
782
790
if (smmu -> version > ARM_SMMU_V1 )
783
791
writel_relaxed (reg2 , cb_base + ARM_SMMU_CB_TTBCR2 );
@@ -1048,7 +1056,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1048
1056
struct arm_smmu_smr * smr = smmu -> smrs + idx ;
1049
1057
u32 reg = smr -> id << SMR_ID_SHIFT | smr -> mask << SMR_MASK_SHIFT ;
1050
1058
1051
- if (smr -> valid )
1059
+ if (!( smmu -> features & ARM_SMMU_FEAT_EXIDS ) && smr -> valid )
1052
1060
reg |= SMR_VALID ;
1053
1061
writel_relaxed (reg , ARM_SMMU_GR0 (smmu ) + ARM_SMMU_GR0_SMR (idx ));
1054
1062
}
@@ -1060,6 +1068,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1060
1068
(s2cr -> cbndx & S2CR_CBNDX_MASK ) << S2CR_CBNDX_SHIFT |
1061
1069
(s2cr -> privcfg & S2CR_PRIVCFG_MASK ) << S2CR_PRIVCFG_SHIFT ;
1062
1070
1071
+ if (smmu -> features & ARM_SMMU_FEAT_EXIDS && smmu -> smrs &&
1072
+ smmu -> smrs [idx ].valid )
1073
+ reg |= S2CR_EXIDVALID ;
1063
1074
writel_relaxed (reg , ARM_SMMU_GR0 (smmu ) + ARM_SMMU_GR0_S2CR (idx ));
1064
1075
}
1065
1076
@@ -1070,6 +1081,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1070
1081
arm_smmu_write_smr (smmu , idx );
1071
1082
}
1072
1083
1084
+ /*
1085
+ * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1086
+ * should be called after sCR0 is written.
1087
+ */
1088
+ static void arm_smmu_test_smr_masks (struct arm_smmu_device * smmu )
1089
+ {
1090
+ void __iomem * gr0_base = ARM_SMMU_GR0 (smmu );
1091
+ u32 smr ;
1092
+
1093
+ if (!smmu -> smrs )
1094
+ return ;
1095
+
1096
+ /*
1097
+ * SMR.ID bits may not be preserved if the corresponding MASK
1098
+ * bits are set, so check each one separately. We can reject
1099
+ * masters later if they try to claim IDs outside these masks.
1100
+ */
1101
+ smr = smmu -> streamid_mask << SMR_ID_SHIFT ;
1102
+ writel_relaxed (smr , gr0_base + ARM_SMMU_GR0_SMR (0 ));
1103
+ smr = readl_relaxed (gr0_base + ARM_SMMU_GR0_SMR (0 ));
1104
+ smmu -> streamid_mask = smr >> SMR_ID_SHIFT ;
1105
+
1106
+ smr = smmu -> streamid_mask << SMR_MASK_SHIFT ;
1107
+ writel_relaxed (smr , gr0_base + ARM_SMMU_GR0_SMR (0 ));
1108
+ smr = readl_relaxed (gr0_base + ARM_SMMU_GR0_SMR (0 ));
1109
+ smmu -> smr_mask_mask = smr >> SMR_MASK_SHIFT ;
1110
+ }
1111
+
1073
1112
static int arm_smmu_find_sme (struct arm_smmu_device * smmu , u16 id , u16 mask )
1074
1113
{
1075
1114
struct arm_smmu_smr * smrs = smmu -> smrs ;
@@ -1648,6 +1687,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1648
1687
if (smmu -> features & ARM_SMMU_FEAT_VMID16 )
1649
1688
reg |= sCR0_VMID16EN ;
1650
1689
1690
+ if (smmu -> features & ARM_SMMU_FEAT_EXIDS )
1691
+ reg |= sCR0_EXIDENABLE ;
1692
+
1651
1693
/* Push the button */
1652
1694
__arm_smmu_tlb_sync (smmu );
1653
1695
writel (reg , ARM_SMMU_GR0_NS (smmu ) + ARM_SMMU_GR0_sCR0 );
@@ -1735,11 +1777,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1735
1777
"\t(IDR0.CTTW overridden by FW configuration)\n" );
1736
1778
1737
1779
/* Max. number of entries we have for stream matching/indexing */
1738
- size = 1 << ((id >> ID0_NUMSIDB_SHIFT ) & ID0_NUMSIDB_MASK );
1780
+ if (smmu -> version == ARM_SMMU_V2 && id & ID0_EXIDS ) {
1781
+ smmu -> features |= ARM_SMMU_FEAT_EXIDS ;
1782
+ size = 1 << 16 ;
1783
+ } else {
1784
+ size = 1 << ((id >> ID0_NUMSIDB_SHIFT ) & ID0_NUMSIDB_MASK );
1785
+ }
1739
1786
smmu -> streamid_mask = size - 1 ;
1740
1787
if (id & ID0_SMS ) {
1741
- u32 smr ;
1742
-
1743
1788
smmu -> features |= ARM_SMMU_FEAT_STREAM_MATCH ;
1744
1789
size = (id >> ID0_NUMSMRG_SHIFT ) & ID0_NUMSMRG_MASK ;
1745
1790
if (size == 0 ) {
@@ -1748,30 +1793,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1748
1793
return - ENODEV ;
1749
1794
}
1750
1795
1751
- /*
1752
- * SMR.ID bits may not be preserved if the corresponding MASK
1753
- * bits are set, so check each one separately. We can reject
1754
- * masters later if they try to claim IDs outside these masks.
1755
- */
1756
- smr = smmu -> streamid_mask << SMR_ID_SHIFT ;
1757
- writel_relaxed (smr , gr0_base + ARM_SMMU_GR0_SMR (0 ));
1758
- smr = readl_relaxed (gr0_base + ARM_SMMU_GR0_SMR (0 ));
1759
- smmu -> streamid_mask = smr >> SMR_ID_SHIFT ;
1760
-
1761
- smr = smmu -> streamid_mask << SMR_MASK_SHIFT ;
1762
- writel_relaxed (smr , gr0_base + ARM_SMMU_GR0_SMR (0 ));
1763
- smr = readl_relaxed (gr0_base + ARM_SMMU_GR0_SMR (0 ));
1764
- smmu -> smr_mask_mask = smr >> SMR_MASK_SHIFT ;
1765
-
1766
1796
/* Zero-initialised to mark as invalid */
1767
1797
smmu -> smrs = devm_kcalloc (smmu -> dev , size , sizeof (* smmu -> smrs ),
1768
1798
GFP_KERNEL );
1769
1799
if (!smmu -> smrs )
1770
1800
return - ENOMEM ;
1771
1801
1772
1802
dev_notice (smmu -> dev ,
1773
- "\tstream matching with %lu register groups, mask 0x%x" ,
1774
- size , smmu -> smr_mask_mask );
1803
+ "\tstream matching with %lu register groups" , size );
1775
1804
}
1776
1805
/* s2cr->type == 0 means translation, so initialise explicitly */
1777
1806
smmu -> s2crs = devm_kmalloc_array (smmu -> dev , size , sizeof (* smmu -> s2crs ),
@@ -2094,6 +2123,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
2094
2123
iommu_register_instance (dev -> fwnode , & arm_smmu_ops );
2095
2124
platform_set_drvdata (pdev , smmu );
2096
2125
arm_smmu_device_reset (smmu );
2126
+ arm_smmu_test_smr_masks (smmu );
2097
2127
2098
2128
/* Oh, for a proper bus abstraction */
2099
2129
if (!iommu_present (& platform_bus_type ))
0 commit comments