7676
7777#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
7878#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
79+ #define ARM_LPAE_PTE_DBM (((arm_lpae_iopte)1) << 51)
7980#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
8081#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
8182#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
8586
8687#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
8788/* Ignore the contiguous bit for block splitting */
88- #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52 )
89+ #define ARM_LPAE_PTE_ATTR_HI_MASK (ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM )
8990#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
9091 ARM_LPAE_PTE_ATTR_HI_MASK)
9192/* Software bit for solving coherency races */
9293#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
9394
9495/* Stage-1 PTE */
9596#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
96- #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
97+ #define ARM_LPAE_PTE_AP_RDONLY_BIT 7
98+ #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)1) << \
99+ ARM_LPAE_PTE_AP_RDONLY_BIT)
100+ #define ARM_LPAE_PTE_AP_WR_CLEAN_MASK (ARM_LPAE_PTE_AP_RDONLY | \
101+ ARM_LPAE_PTE_DBM)
97102#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
98103#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
99104
139144
140145#define iopte_prot (pte ) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
141146
147+ #define iopte_writeable_dirty (pte ) \
148+ (((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
149+
150+ #define iopte_set_writeable_clean (ptep ) \
151+ set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
152+
142153struct arm_lpae_io_pgtable {
143154 struct io_pgtable iop ;
144155
@@ -160,6 +171,13 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
160171 return iopte_type (pte ) == ARM_LPAE_PTE_TYPE_BLOCK ;
161172}
162173
174+ static inline bool iopte_table (arm_lpae_iopte pte , int lvl )
175+ {
176+ if (lvl == (ARM_LPAE_MAX_LEVELS - 1 ))
177+ return false;
178+ return iopte_type (pte ) == ARM_LPAE_PTE_TYPE_TABLE ;
179+ }
180+
163181static arm_lpae_iopte paddr_to_iopte (phys_addr_t paddr ,
164182 struct arm_lpae_io_pgtable * data )
165183{
@@ -726,6 +744,97 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
726744 return iopte_to_paddr (pte , data ) | iova ;
727745}
728746
747+ struct io_pgtable_walk_data {
748+ struct iommu_dirty_bitmap * dirty ;
749+ unsigned long flags ;
750+ u64 addr ;
751+ const u64 end ;
752+ };
753+
754+ static int __arm_lpae_iopte_walk_dirty (struct arm_lpae_io_pgtable * data ,
755+ struct io_pgtable_walk_data * walk_data ,
756+ arm_lpae_iopte * ptep ,
757+ int lvl );
758+
759+ static int io_pgtable_visit_dirty (struct arm_lpae_io_pgtable * data ,
760+ struct io_pgtable_walk_data * walk_data ,
761+ arm_lpae_iopte * ptep , int lvl )
762+ {
763+ struct io_pgtable * iop = & data -> iop ;
764+ arm_lpae_iopte pte = READ_ONCE (* ptep );
765+
766+ if (iopte_leaf (pte , lvl , iop -> fmt )) {
767+ size_t size = ARM_LPAE_BLOCK_SIZE (lvl , data );
768+
769+ if (iopte_writeable_dirty (pte )) {
770+ iommu_dirty_bitmap_record (walk_data -> dirty ,
771+ walk_data -> addr , size );
772+ if (!(walk_data -> flags & IOMMU_DIRTY_NO_CLEAR ))
773+ iopte_set_writeable_clean (ptep );
774+ }
775+ walk_data -> addr += size ;
776+ return 0 ;
777+ }
778+
779+ if (WARN_ON (!iopte_table (pte , lvl )))
780+ return - EINVAL ;
781+
782+ ptep = iopte_deref (pte , data );
783+ return __arm_lpae_iopte_walk_dirty (data , walk_data , ptep , lvl + 1 );
784+ }
785+
786+ static int __arm_lpae_iopte_walk_dirty (struct arm_lpae_io_pgtable * data ,
787+ struct io_pgtable_walk_data * walk_data ,
788+ arm_lpae_iopte * ptep ,
789+ int lvl )
790+ {
791+ u32 idx ;
792+ int max_entries , ret ;
793+
794+ if (WARN_ON (lvl == ARM_LPAE_MAX_LEVELS ))
795+ return - EINVAL ;
796+
797+ if (lvl == data -> start_level )
798+ max_entries = ARM_LPAE_PGD_SIZE (data ) / sizeof (arm_lpae_iopte );
799+ else
800+ max_entries = ARM_LPAE_PTES_PER_TABLE (data );
801+
802+ for (idx = ARM_LPAE_LVL_IDX (walk_data -> addr , lvl , data );
803+ (idx < max_entries ) && (walk_data -> addr < walk_data -> end ); ++ idx ) {
804+ ret = io_pgtable_visit_dirty (data , walk_data , ptep + idx , lvl );
805+ if (ret )
806+ return ret ;
807+ }
808+
809+ return 0 ;
810+ }
811+
812+ static int arm_lpae_read_and_clear_dirty (struct io_pgtable_ops * ops ,
813+ unsigned long iova , size_t size ,
814+ unsigned long flags ,
815+ struct iommu_dirty_bitmap * dirty )
816+ {
817+ struct arm_lpae_io_pgtable * data = io_pgtable_ops_to_data (ops );
818+ struct io_pgtable_cfg * cfg = & data -> iop .cfg ;
819+ struct io_pgtable_walk_data walk_data = {
820+ .dirty = dirty ,
821+ .flags = flags ,
822+ .addr = iova ,
823+ .end = iova + size ,
824+ };
825+ arm_lpae_iopte * ptep = data -> pgd ;
826+ int lvl = data -> start_level ;
827+
828+ if (WARN_ON (!size ))
829+ return - EINVAL ;
830+ if (WARN_ON ((iova + size - 1 ) & ~(BIT (cfg -> ias ) - 1 )))
831+ return - EINVAL ;
832+ if (data -> iop .fmt != ARM_64_LPAE_S1 )
833+ return - EINVAL ;
834+
835+ return __arm_lpae_iopte_walk_dirty (data , & walk_data , ptep , lvl );
836+ }
837+
729838static void arm_lpae_restrict_pgsizes (struct io_pgtable_cfg * cfg )
730839{
731840 unsigned long granule , page_sizes ;
@@ -804,6 +913,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
804913 .map_pages = arm_lpae_map_pages ,
805914 .unmap_pages = arm_lpae_unmap_pages ,
806915 .iova_to_phys = arm_lpae_iova_to_phys ,
916+ .read_and_clear_dirty = arm_lpae_read_and_clear_dirty ,
807917 };
808918
809919 return data ;
0 commit comments