88#include <linux/mmu_notifier.h>
99#include <linux/sched/mm.h>
1010#include <linux/slab.h>
11+ #include <kunit/visibility.h>
1112
1213#include "arm-smmu-v3.h"
1314#include "../../io-pgtable-arm.h"
@@ -34,21 +35,25 @@ struct arm_smmu_bond {
3435
3536static DEFINE_MUTEX (sva_lock );
3637
37- /*
38- * Write the CD to the CD tables for all masters that this domain is attached
39- * to. Note that this is only used to update existing CD entries in the target
40- * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
41- */
42- static void arm_smmu_update_ctx_desc_devices (struct arm_smmu_domain * smmu_domain ,
43- int ssid ,
44- struct arm_smmu_ctx_desc * cd )
38+ static void
39+ arm_smmu_update_s1_domain_cd_entry (struct arm_smmu_domain * smmu_domain )
4540{
4641 struct arm_smmu_master * master ;
42+ struct arm_smmu_cd target_cd ;
4743 unsigned long flags ;
4844
4945 spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
5046 list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
51- arm_smmu_write_ctx_desc (master , ssid , cd );
47+ struct arm_smmu_cd * cdptr ;
48+
49+ /* S1 domains only support RID attachment right now */
50+ cdptr = arm_smmu_get_cd_ptr (master , IOMMU_NO_PASID );
51+ if (WARN_ON (!cdptr ))
52+ continue ;
53+
54+ arm_smmu_make_s1_cd (& target_cd , master , smmu_domain );
55+ arm_smmu_write_cd_entry (master , IOMMU_NO_PASID , cdptr ,
56+ & target_cd );
5257 }
5358 spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
5459}
@@ -96,7 +101,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
96101 * be some overlap between use of both ASIDs, until we invalidate the
97102 * TLB.
98103 */
99- arm_smmu_update_ctx_desc_devices (smmu_domain , IOMMU_NO_PASID , cd );
104+ arm_smmu_update_s1_domain_cd_entry (smmu_domain );
100105
101106 /* Invalidate TLB entries previously associated with that context */
102107 arm_smmu_tlb_inv_asid (smmu , asid );
@@ -105,11 +110,86 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
105110 return NULL ;
106111}
107112
113+ static u64 page_size_to_cd (void )
114+ {
115+ static_assert (PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
116+ PAGE_SIZE == SZ_64K );
117+ if (PAGE_SIZE == SZ_64K )
118+ return ARM_LPAE_TCR_TG0_64K ;
119+ if (PAGE_SIZE == SZ_16K )
120+ return ARM_LPAE_TCR_TG0_16K ;
121+ return ARM_LPAE_TCR_TG0_4K ;
122+ }
123+
124+ VISIBLE_IF_KUNIT
125+ void arm_smmu_make_sva_cd (struct arm_smmu_cd * target ,
126+ struct arm_smmu_master * master , struct mm_struct * mm ,
127+ u16 asid )
128+ {
129+ u64 par ;
130+
131+ memset (target , 0 , sizeof (* target ));
132+
133+ par = cpuid_feature_extract_unsigned_field (
134+ read_sanitised_ftr_reg (SYS_ID_AA64MMFR0_EL1 ),
135+ ID_AA64MMFR0_EL1_PARANGE_SHIFT );
136+
137+ target -> data [0 ] = cpu_to_le64 (
138+ CTXDESC_CD_0_TCR_EPD1 |
139+ #ifdef __BIG_ENDIAN
140+ CTXDESC_CD_0_ENDI |
141+ #endif
142+ CTXDESC_CD_0_V |
143+ FIELD_PREP (CTXDESC_CD_0_TCR_IPS , par ) |
144+ CTXDESC_CD_0_AA64 |
145+ (master -> stall_enabled ? CTXDESC_CD_0_S : 0 ) |
146+ CTXDESC_CD_0_R |
147+ CTXDESC_CD_0_A |
148+ CTXDESC_CD_0_ASET |
149+ FIELD_PREP (CTXDESC_CD_0_ASID , asid ));
150+
151+ /*
152+ * If no MM is passed then this creates a SVA entry that faults
153+ * everything. arm_smmu_write_cd_entry() can hitlessly go between these
154+ * two entries types since TTB0 is ignored by HW when EPD0 is set.
155+ */
156+ if (mm ) {
157+ target -> data [0 ] |= cpu_to_le64 (
158+ FIELD_PREP (CTXDESC_CD_0_TCR_T0SZ ,
159+ 64ULL - vabits_actual ) |
160+ FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , page_size_to_cd ()) |
161+ FIELD_PREP (CTXDESC_CD_0_TCR_IRGN0 ,
162+ ARM_LPAE_TCR_RGN_WBWA ) |
163+ FIELD_PREP (CTXDESC_CD_0_TCR_ORGN0 ,
164+ ARM_LPAE_TCR_RGN_WBWA ) |
165+ FIELD_PREP (CTXDESC_CD_0_TCR_SH0 , ARM_LPAE_TCR_SH_IS ));
166+
167+ target -> data [1 ] = cpu_to_le64 (virt_to_phys (mm -> pgd ) &
168+ CTXDESC_CD_1_TTB0_MASK );
169+ } else {
170+ target -> data [0 ] |= cpu_to_le64 (CTXDESC_CD_0_TCR_EPD0 );
171+
172+ /*
173+ * Disable stall and immediately generate an abort if stall
174+ * disable is permitted. This speeds up cleanup for an unclean
175+ * exit if the device is still doing a lot of DMA.
176+ */
177+ if (!(master -> smmu -> features & ARM_SMMU_FEAT_STALL_FORCE ))
178+ target -> data [0 ] &=
179+ cpu_to_le64 (~(CTXDESC_CD_0_S | CTXDESC_CD_0_R ));
180+ }
181+
182+ /*
183+ * MAIR value is pretty much constant and global, so we can just get it
184+ * from the current CPU register
185+ */
186+ target -> data [3 ] = cpu_to_le64 (read_sysreg (mair_el1 ));
187+ }
188+
108189static struct arm_smmu_ctx_desc * arm_smmu_alloc_shared_cd (struct mm_struct * mm )
109190{
110191 u16 asid ;
111192 int err = 0 ;
112- u64 tcr , par , reg ;
113193 struct arm_smmu_ctx_desc * cd ;
114194 struct arm_smmu_ctx_desc * ret = NULL ;
115195
@@ -143,39 +223,6 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
143223 if (err )
144224 goto out_free_asid ;
145225
146- tcr = FIELD_PREP (CTXDESC_CD_0_TCR_T0SZ , 64ULL - vabits_actual ) |
147- FIELD_PREP (CTXDESC_CD_0_TCR_IRGN0 , ARM_LPAE_TCR_RGN_WBWA ) |
148- FIELD_PREP (CTXDESC_CD_0_TCR_ORGN0 , ARM_LPAE_TCR_RGN_WBWA ) |
149- FIELD_PREP (CTXDESC_CD_0_TCR_SH0 , ARM_LPAE_TCR_SH_IS ) |
150- CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64 ;
151-
152- switch (PAGE_SIZE ) {
153- case SZ_4K :
154- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , ARM_LPAE_TCR_TG0_4K );
155- break ;
156- case SZ_16K :
157- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , ARM_LPAE_TCR_TG0_16K );
158- break ;
159- case SZ_64K :
160- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , ARM_LPAE_TCR_TG0_64K );
161- break ;
162- default :
163- WARN_ON (1 );
164- err = - EINVAL ;
165- goto out_free_asid ;
166- }
167-
168- reg = read_sanitised_ftr_reg (SYS_ID_AA64MMFR0_EL1 );
169- par = cpuid_feature_extract_unsigned_field (reg , ID_AA64MMFR0_EL1_PARANGE_SHIFT );
170- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_IPS , par );
171-
172- cd -> ttbr = virt_to_phys (mm -> pgd );
173- cd -> tcr = tcr ;
174- /*
175- * MAIR value is pretty much constant and global, so we can just get it
176- * from the current CPU register
177- */
178- cd -> mair = read_sysreg (mair_el1 );
179226 cd -> asid = asid ;
180227 cd -> mm = mm ;
181228
@@ -253,6 +300,8 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
253300{
254301 struct arm_smmu_mmu_notifier * smmu_mn = mn_to_smmu (mn );
255302 struct arm_smmu_domain * smmu_domain = smmu_mn -> domain ;
303+ struct arm_smmu_master * master ;
304+ unsigned long flags ;
256305
257306 mutex_lock (& sva_lock );
258307 if (smmu_mn -> cleared ) {
@@ -264,8 +313,19 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
264313 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
265314 * but disable translation.
266315 */
267- arm_smmu_update_ctx_desc_devices (smmu_domain , mm_get_enqcmd_pasid (mm ),
268- & quiet_cd );
316+ spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
317+ list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
318+ struct arm_smmu_cd target ;
319+ struct arm_smmu_cd * cdptr ;
320+
321+ cdptr = arm_smmu_get_cd_ptr (master , mm_get_enqcmd_pasid (mm ));
322+ if (WARN_ON (!cdptr ))
323+ continue ;
324+ arm_smmu_make_sva_cd (& target , master , NULL , smmu_mn -> cd -> asid );
325+ arm_smmu_write_cd_entry (master , mm_get_enqcmd_pasid (mm ), cdptr ,
326+ & target );
327+ }
328+ spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
269329
270330 arm_smmu_tlb_inv_asid (smmu_domain -> smmu , smmu_mn -> cd -> asid );
271331 arm_smmu_atc_inv_domain (smmu_domain , mm_get_enqcmd_pasid (mm ), 0 , 0 );
@@ -360,6 +420,8 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
360420 struct mm_struct * mm )
361421{
362422 int ret ;
423+ struct arm_smmu_cd target ;
424+ struct arm_smmu_cd * cdptr ;
363425 struct arm_smmu_bond * bond ;
364426 struct arm_smmu_master * master = dev_iommu_priv_get (dev );
365427 struct iommu_domain * domain = iommu_get_domain_for_dev (dev );
@@ -386,9 +448,13 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
386448 goto err_free_bond ;
387449 }
388450
389- ret = arm_smmu_write_ctx_desc (master , pasid , bond -> smmu_mn -> cd );
390- if (ret )
451+ cdptr = arm_smmu_alloc_cd_ptr (master , mm_get_enqcmd_pasid (mm ));
452+ if (!cdptr ) {
453+ ret = - ENOMEM ;
391454 goto err_put_notifier ;
455+ }
456+ arm_smmu_make_sva_cd (& target , master , mm , bond -> smmu_mn -> cd -> asid );
457+ arm_smmu_write_cd_entry (master , pasid , cdptr , & target );
392458
393459 list_add (& bond -> list , & master -> bonds );
394460 return 0 ;
@@ -546,7 +612,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
546612
547613 mutex_lock (& sva_lock );
548614
549- arm_smmu_write_ctx_desc (master , id , NULL );
615+ arm_smmu_clear_cd (master , id );
550616
551617 list_for_each_entry (t , & master -> bonds , list ) {
552618 if (t -> mm == mm ) {
@@ -569,6 +635,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
569635 int ret = 0 ;
570636 struct mm_struct * mm = domain -> mm ;
571637
638+ if (mm_get_enqcmd_pasid (mm ) != id )
639+ return - EINVAL ;
640+
572641 mutex_lock (& sva_lock );
573642 ret = __arm_smmu_sva_bind (dev , id , mm );
574643 mutex_unlock (& sva_lock );
0 commit comments