Skip to content

Commit e563cc0

Browse files
jernejskjoergroedel
authored andcommitted
iommu/sun50i: Implement .iotlb_sync_map
Allocated iova ranges need to be invalidated immediately or otherwise they might or might not work when used by master or CPU. This was discovered when running video decoder conformity test with Cedrus. Some videos were now and then decoded incorrectly and generated page faults. According to vendor driver, it's enough to invalidate just start and end TLB and PTW cache lines. Documentation says that neighbouring lines must be invalidated too. Finally, when page fault occurs, that iova must be invalidated the same way, according to documentation. Fixes: 4100b8c ("iommu: Add Allwinner H6 IOMMU driver") Signed-off-by: Jernej Skrabec <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 67a8a67 commit e563cc0

File tree

1 file changed

+73
-0
lines changed

1 file changed

+73
-0
lines changed

drivers/iommu/sun50i-iommu.c

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,8 @@
9393
#define NUM_PT_ENTRIES 256
9494
#define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
9595

96+
#define SPAGE_SIZE 4096
97+
9698
struct sun50i_iommu {
9799
struct iommu_device iommu;
98100

@@ -295,6 +297,62 @@ static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
295297
dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
296298
}
297299

300+
static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu,
301+
unsigned long iova)
302+
{
303+
u32 reg;
304+
int ret;
305+
306+
iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
307+
iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12));
308+
iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG,
309+
IOMMU_TLB_IVLD_ENABLE_ENABLE);
310+
311+
ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
312+
reg, !reg, 1, 2000);
313+
if (ret)
314+
dev_warn(iommu->dev, "TLB invalidation timed out!\n");
315+
}
316+
317+
static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu,
318+
unsigned long iova)
319+
{
320+
u32 reg;
321+
int ret;
322+
323+
iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
324+
iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG,
325+
IOMMU_PC_IVLD_ENABLE_ENABLE);
326+
327+
ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
328+
reg, !reg, 1, 2000);
329+
if (ret)
330+
dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
331+
}
332+
333+
static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu,
334+
unsigned long iova, size_t size)
335+
{
336+
assert_spin_locked(&iommu->iommu_lock);
337+
338+
iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
339+
340+
sun50i_iommu_zap_iova(iommu, iova);
341+
sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE);
342+
if (size > SPAGE_SIZE) {
343+
sun50i_iommu_zap_iova(iommu, iova + size);
344+
sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE);
345+
}
346+
sun50i_iommu_zap_ptw_cache(iommu, iova);
347+
sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M);
348+
if (size > SZ_1M) {
349+
sun50i_iommu_zap_ptw_cache(iommu, iova + size);
350+
sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M);
351+
}
352+
353+
iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
354+
}
355+
298356
static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
299357
{
300358
u32 reg;
@@ -344,6 +402,18 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
344402
spin_unlock_irqrestore(&iommu->iommu_lock, flags);
345403
}
346404

405+
static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
406+
unsigned long iova, size_t size)
407+
{
408+
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
409+
struct sun50i_iommu *iommu = sun50i_domain->iommu;
410+
unsigned long flags;
411+
412+
spin_lock_irqsave(&iommu->iommu_lock, flags);
413+
sun50i_iommu_zap_range(iommu, iova, size);
414+
spin_unlock_irqrestore(&iommu->iommu_lock, flags);
415+
}
416+
347417
static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
348418
struct iommu_iotlb_gather *gather)
349419
{
@@ -767,6 +837,7 @@ static const struct iommu_ops sun50i_iommu_ops = {
767837
.attach_dev = sun50i_iommu_attach_device,
768838
.detach_dev = sun50i_iommu_detach_device,
769839
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
840+
.iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
770841
.iotlb_sync = sun50i_iommu_iotlb_sync,
771842
.iova_to_phys = sun50i_iommu_iova_to_phys,
772843
.map = sun50i_iommu_map,
@@ -786,6 +857,8 @@ static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
786857
report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
787858
else
788859
dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
860+
861+
sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE);
789862
}
790863

791864
static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,

0 commit comments

Comments
 (0)