@@ -1658,40 +1658,28 @@ static void arm_smmu_tlb_inv_context(void *cookie)
1658
1658
arm_smmu_atc_inv_domain (smmu_domain , 0 , 0 , 0 );
1659
1659
}
1660
1660
1661
- static void arm_smmu_tlb_inv_range (unsigned long iova , size_t size ,
1662
- size_t granule , bool leaf ,
1663
- struct arm_smmu_domain * smmu_domain )
1661
+ static void __arm_smmu_tlb_inv_range (struct arm_smmu_cmdq_ent * cmd ,
1662
+ unsigned long iova , size_t size ,
1663
+ size_t granule ,
1664
+ struct arm_smmu_domain * smmu_domain )
1664
1665
{
1665
1666
struct arm_smmu_device * smmu = smmu_domain -> smmu ;
1666
- unsigned long start = iova , end = iova + size , num_pages = 0 , tg = 0 ;
1667
+ unsigned long end = iova + size , num_pages = 0 , tg = 0 ;
1667
1668
size_t inv_range = granule ;
1668
1669
struct arm_smmu_cmdq_batch cmds = {};
1669
- struct arm_smmu_cmdq_ent cmd = {
1670
- .tlbi = {
1671
- .leaf = leaf ,
1672
- },
1673
- };
1674
1670
1675
1671
if (!size )
1676
1672
return ;
1677
1673
1678
- if (smmu_domain -> stage == ARM_SMMU_DOMAIN_S1 ) {
1679
- cmd .opcode = CMDQ_OP_TLBI_NH_VA ;
1680
- cmd .tlbi .asid = smmu_domain -> s1_cfg .cd .asid ;
1681
- } else {
1682
- cmd .opcode = CMDQ_OP_TLBI_S2_IPA ;
1683
- cmd .tlbi .vmid = smmu_domain -> s2_cfg .vmid ;
1684
- }
1685
-
1686
1674
if (smmu -> features & ARM_SMMU_FEAT_RANGE_INV ) {
1687
1675
/* Get the leaf page size */
1688
1676
tg = __ffs (smmu_domain -> domain .pgsize_bitmap );
1689
1677
1690
1678
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
1691
- cmd . tlbi .tg = (tg - 10 ) / 2 ;
1679
+ cmd -> tlbi .tg = (tg - 10 ) / 2 ;
1692
1680
1693
1681
/* Determine what level the granule is at */
1694
- cmd . tlbi .ttl = 4 - ((ilog2 (granule ) - 3 ) / (tg - 3 ));
1682
+ cmd -> tlbi .ttl = 4 - ((ilog2 (granule ) - 3 ) / (tg - 3 ));
1695
1683
1696
1684
num_pages = size >> tg ;
1697
1685
}
@@ -1709,11 +1697,11 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
1709
1697
1710
1698
/* Determine the power of 2 multiple number of pages */
1711
1699
scale = __ffs (num_pages );
1712
- cmd . tlbi .scale = scale ;
1700
+ cmd -> tlbi .scale = scale ;
1713
1701
1714
1702
/* Determine how many chunks of 2^scale size we have */
1715
1703
num = (num_pages >> scale ) & CMDQ_TLBI_RANGE_NUM_MAX ;
1716
- cmd . tlbi .num = num - 1 ;
1704
+ cmd -> tlbi .num = num - 1 ;
1717
1705
1718
1706
/* range is num * 2^scale * pgsize */
1719
1707
inv_range = num << (scale + tg );
@@ -1722,17 +1710,37 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
1722
1710
num_pages -= num << scale ;
1723
1711
}
1724
1712
1725
- cmd . tlbi .addr = iova ;
1726
- arm_smmu_cmdq_batch_add (smmu , & cmds , & cmd );
1713
+ cmd -> tlbi .addr = iova ;
1714
+ arm_smmu_cmdq_batch_add (smmu , & cmds , cmd );
1727
1715
iova += inv_range ;
1728
1716
}
1729
1717
arm_smmu_cmdq_batch_submit (smmu , & cmds );
1718
+ }
1719
+
1720
+ static void arm_smmu_tlb_inv_range_domain (unsigned long iova , size_t size ,
1721
+ size_t granule , bool leaf ,
1722
+ struct arm_smmu_domain * smmu_domain )
1723
+ {
1724
+ struct arm_smmu_cmdq_ent cmd = {
1725
+ .tlbi = {
1726
+ .leaf = leaf ,
1727
+ },
1728
+ };
1729
+
1730
+ if (smmu_domain -> stage == ARM_SMMU_DOMAIN_S1 ) {
1731
+ cmd .opcode = CMDQ_OP_TLBI_NH_VA ;
1732
+ cmd .tlbi .asid = smmu_domain -> s1_cfg .cd .asid ;
1733
+ } else {
1734
+ cmd .opcode = CMDQ_OP_TLBI_S2_IPA ;
1735
+ cmd .tlbi .vmid = smmu_domain -> s2_cfg .vmid ;
1736
+ }
1737
+ __arm_smmu_tlb_inv_range (& cmd , iova , size , granule , smmu_domain );
1730
1738
1731
1739
/*
1732
1740
* Unfortunately, this can't be leaf-only since we may have
1733
1741
* zapped an entire table.
1734
1742
*/
1735
- arm_smmu_atc_inv_domain (smmu_domain , 0 , start , size );
1743
+ arm_smmu_atc_inv_domain (smmu_domain , 0 , iova , size );
1736
1744
}
1737
1745
1738
1746
static void arm_smmu_tlb_inv_page_nosync (struct iommu_iotlb_gather * gather ,
@@ -1748,7 +1756,7 @@ static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
1748
1756
static void arm_smmu_tlb_inv_walk (unsigned long iova , size_t size ,
1749
1757
size_t granule , void * cookie )
1750
1758
{
1751
- arm_smmu_tlb_inv_range (iova , size , granule , false, cookie );
1759
+ arm_smmu_tlb_inv_range_domain (iova , size , granule , false, cookie );
1752
1760
}
1753
1761
1754
1762
static const struct iommu_flush_ops arm_smmu_flush_ops = {
@@ -2271,8 +2279,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
2271
2279
{
2272
2280
struct arm_smmu_domain * smmu_domain = to_smmu_domain (domain );
2273
2281
2274
- arm_smmu_tlb_inv_range (gather -> start , gather -> end - gather -> start ,
2275
- gather -> pgsize , true, smmu_domain );
2282
+ arm_smmu_tlb_inv_range_domain (gather -> start ,
2283
+ gather -> end - gather -> start ,
2284
+ gather -> pgsize , true, smmu_domain );
2276
2285
}
2277
2286
2278
2287
static phys_addr_t
0 commit comments