76
76
77
77
#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
78
78
#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
79
+ #define ARM_LPAE_PTE_DBM (((arm_lpae_iopte)1) << 51)
79
80
#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
80
81
#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
81
82
#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
85
86
86
87
#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
87
88
/* Ignore the contiguous bit for block splitting */
88
- #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52 )
89
+ #define ARM_LPAE_PTE_ATTR_HI_MASK (ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM )
89
90
#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
90
91
ARM_LPAE_PTE_ATTR_HI_MASK)
91
92
/* Software bit for solving coherency races */
92
93
#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
93
94
94
95
/* Stage-1 PTE */
95
96
#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
96
- #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
97
+ #define ARM_LPAE_PTE_AP_RDONLY_BIT 7
98
+ #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)1) << \
99
+ ARM_LPAE_PTE_AP_RDONLY_BIT)
100
+ #define ARM_LPAE_PTE_AP_WR_CLEAN_MASK (ARM_LPAE_PTE_AP_RDONLY | \
101
+ ARM_LPAE_PTE_DBM)
97
102
#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
98
103
#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
99
104
139
144
140
145
#define iopte_prot (pte ) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
141
146
147
+ #define iopte_writeable_dirty (pte ) \
148
+ (((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
149
+
150
+ #define iopte_set_writeable_clean (ptep ) \
151
+ set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
152
+
142
153
struct arm_lpae_io_pgtable {
143
154
struct io_pgtable iop ;
144
155
@@ -160,6 +171,13 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
160
171
return iopte_type (pte ) == ARM_LPAE_PTE_TYPE_BLOCK ;
161
172
}
162
173
174
+ static inline bool iopte_table (arm_lpae_iopte pte , int lvl )
175
+ {
176
+ if (lvl == (ARM_LPAE_MAX_LEVELS - 1 ))
177
+ return false;
178
+ return iopte_type (pte ) == ARM_LPAE_PTE_TYPE_TABLE ;
179
+ }
180
+
163
181
static arm_lpae_iopte paddr_to_iopte (phys_addr_t paddr ,
164
182
struct arm_lpae_io_pgtable * data )
165
183
{
@@ -726,6 +744,97 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
726
744
return iopte_to_paddr (pte , data ) | iova ;
727
745
}
728
746
747
+ struct io_pgtable_walk_data {
748
+ struct iommu_dirty_bitmap * dirty ;
749
+ unsigned long flags ;
750
+ u64 addr ;
751
+ const u64 end ;
752
+ };
753
+
754
+ static int __arm_lpae_iopte_walk_dirty (struct arm_lpae_io_pgtable * data ,
755
+ struct io_pgtable_walk_data * walk_data ,
756
+ arm_lpae_iopte * ptep ,
757
+ int lvl );
758
+
759
+ static int io_pgtable_visit_dirty (struct arm_lpae_io_pgtable * data ,
760
+ struct io_pgtable_walk_data * walk_data ,
761
+ arm_lpae_iopte * ptep , int lvl )
762
+ {
763
+ struct io_pgtable * iop = & data -> iop ;
764
+ arm_lpae_iopte pte = READ_ONCE (* ptep );
765
+
766
+ if (iopte_leaf (pte , lvl , iop -> fmt )) {
767
+ size_t size = ARM_LPAE_BLOCK_SIZE (lvl , data );
768
+
769
+ if (iopte_writeable_dirty (pte )) {
770
+ iommu_dirty_bitmap_record (walk_data -> dirty ,
771
+ walk_data -> addr , size );
772
+ if (!(walk_data -> flags & IOMMU_DIRTY_NO_CLEAR ))
773
+ iopte_set_writeable_clean (ptep );
774
+ }
775
+ walk_data -> addr += size ;
776
+ return 0 ;
777
+ }
778
+
779
+ if (WARN_ON (!iopte_table (pte , lvl )))
780
+ return - EINVAL ;
781
+
782
+ ptep = iopte_deref (pte , data );
783
+ return __arm_lpae_iopte_walk_dirty (data , walk_data , ptep , lvl + 1 );
784
+ }
785
+
786
+ static int __arm_lpae_iopte_walk_dirty (struct arm_lpae_io_pgtable * data ,
787
+ struct io_pgtable_walk_data * walk_data ,
788
+ arm_lpae_iopte * ptep ,
789
+ int lvl )
790
+ {
791
+ u32 idx ;
792
+ int max_entries , ret ;
793
+
794
+ if (WARN_ON (lvl == ARM_LPAE_MAX_LEVELS ))
795
+ return - EINVAL ;
796
+
797
+ if (lvl == data -> start_level )
798
+ max_entries = ARM_LPAE_PGD_SIZE (data ) / sizeof (arm_lpae_iopte );
799
+ else
800
+ max_entries = ARM_LPAE_PTES_PER_TABLE (data );
801
+
802
+ for (idx = ARM_LPAE_LVL_IDX (walk_data -> addr , lvl , data );
803
+ (idx < max_entries ) && (walk_data -> addr < walk_data -> end ); ++ idx ) {
804
+ ret = io_pgtable_visit_dirty (data , walk_data , ptep + idx , lvl );
805
+ if (ret )
806
+ return ret ;
807
+ }
808
+
809
+ return 0 ;
810
+ }
811
+
812
+ static int arm_lpae_read_and_clear_dirty (struct io_pgtable_ops * ops ,
813
+ unsigned long iova , size_t size ,
814
+ unsigned long flags ,
815
+ struct iommu_dirty_bitmap * dirty )
816
+ {
817
+ struct arm_lpae_io_pgtable * data = io_pgtable_ops_to_data (ops );
818
+ struct io_pgtable_cfg * cfg = & data -> iop .cfg ;
819
+ struct io_pgtable_walk_data walk_data = {
820
+ .dirty = dirty ,
821
+ .flags = flags ,
822
+ .addr = iova ,
823
+ .end = iova + size ,
824
+ };
825
+ arm_lpae_iopte * ptep = data -> pgd ;
826
+ int lvl = data -> start_level ;
827
+
828
+ if (WARN_ON (!size ))
829
+ return - EINVAL ;
830
+ if (WARN_ON ((iova + size - 1 ) & ~(BIT (cfg -> ias ) - 1 )))
831
+ return - EINVAL ;
832
+ if (data -> iop .fmt != ARM_64_LPAE_S1 )
833
+ return - EINVAL ;
834
+
835
+ return __arm_lpae_iopte_walk_dirty (data , & walk_data , ptep , lvl );
836
+ }
837
+
729
838
static void arm_lpae_restrict_pgsizes (struct io_pgtable_cfg * cfg )
730
839
{
731
840
unsigned long granule , page_sizes ;
@@ -804,6 +913,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
804
913
.map_pages = arm_lpae_map_pages ,
805
914
.unmap_pages = arm_lpae_unmap_pages ,
806
915
.iova_to_phys = arm_lpae_iova_to_phys ,
916
+ .read_and_clear_dirty = arm_lpae_read_and_clear_dirty ,
807
917
};
808
918
809
919
return data ;
0 commit comments