@@ -6172,6 +6172,156 @@ int follow_pte(struct vm_area_struct *vma, unsigned long address,
6172
6172
}
6173
6173
EXPORT_SYMBOL_GPL (follow_pte );
6174
6174
6175
+ static inline void pfnmap_args_setup (struct follow_pfnmap_args * args ,
6176
+ spinlock_t * lock , pte_t * ptep ,
6177
+ pgprot_t pgprot , unsigned long pfn_base ,
6178
+ unsigned long addr_mask , bool writable ,
6179
+ bool special )
6180
+ {
6181
+ args -> lock = lock ;
6182
+ args -> ptep = ptep ;
6183
+ args -> pfn = pfn_base + ((args -> address & ~addr_mask ) >> PAGE_SHIFT );
6184
+ args -> pgprot = pgprot ;
6185
+ args -> writable = writable ;
6186
+ args -> special = special ;
6187
+ }
6188
+
6189
+ static inline void pfnmap_lockdep_assert (struct vm_area_struct * vma )
6190
+ {
6191
+ #ifdef CONFIG_LOCKDEP
6192
+ struct address_space * mapping = vma -> vm_file -> f_mapping ;
6193
+
6194
+ if (mapping )
6195
+ lockdep_assert (lockdep_is_held (& vma -> vm_file -> f_mapping -> i_mmap_rwsem ) ||
6196
+ lockdep_is_held (& vma -> vm_mm -> mmap_lock ));
6197
+ else
6198
+ lockdep_assert (lockdep_is_held (& vma -> vm_mm -> mmap_lock ));
6199
+ #endif
6200
+ }
6201
+
6202
+ /**
6203
+ * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
6204
+ * @args: Pointer to struct @follow_pfnmap_args
6205
+ *
6206
+ * The caller needs to setup args->vma and args->address to point to the
6207
+ * virtual address as the target of such lookup. On a successful return,
6208
+ * the results will be put into other output fields.
6209
+ *
6210
+ * After the caller finished using the fields, the caller must invoke
6211
+ * another follow_pfnmap_end() to proper releases the locks and resources
6212
+ * of such look up request.
6213
+ *
6214
+ * During the start() and end() calls, the results in @args will be valid
6215
+ * as proper locks will be held. After the end() is called, all the fields
6216
+ * in @follow_pfnmap_args will be invalid to be further accessed. Further
6217
+ * use of such information after end() may require proper synchronizations
6218
+ * by the caller with page table updates, otherwise it can create a
6219
+ * security bug.
6220
+ *
6221
+ * If the PTE maps a refcounted page, callers are responsible to protect
6222
+ * against invalidation with MMU notifiers; otherwise access to the PFN at
6223
+ * a later point in time can trigger use-after-free.
6224
+ *
6225
+ * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
6226
+ * should be taken for read, and the mmap semaphore cannot be released
6227
+ * before the end() is invoked.
6228
+ *
6229
+ * This function must not be used to modify PTE content.
6230
+ *
6231
+ * Return: zero on success, negative otherwise.
6232
+ */
6233
+ int follow_pfnmap_start (struct follow_pfnmap_args * args )
6234
+ {
6235
+ struct vm_area_struct * vma = args -> vma ;
6236
+ unsigned long address = args -> address ;
6237
+ struct mm_struct * mm = vma -> vm_mm ;
6238
+ spinlock_t * lock ;
6239
+ pgd_t * pgdp ;
6240
+ p4d_t * p4dp , p4d ;
6241
+ pud_t * pudp , pud ;
6242
+ pmd_t * pmdp , pmd ;
6243
+ pte_t * ptep , pte ;
6244
+
6245
+ pfnmap_lockdep_assert (vma );
6246
+
6247
+ if (unlikely (address < vma -> vm_start || address >= vma -> vm_end ))
6248
+ goto out ;
6249
+
6250
+ if (!(vma -> vm_flags & (VM_IO | VM_PFNMAP )))
6251
+ goto out ;
6252
+ retry :
6253
+ pgdp = pgd_offset (mm , address );
6254
+ if (pgd_none (* pgdp ) || unlikely (pgd_bad (* pgdp )))
6255
+ goto out ;
6256
+
6257
+ p4dp = p4d_offset (pgdp , address );
6258
+ p4d = READ_ONCE (* p4dp );
6259
+ if (p4d_none (p4d ) || unlikely (p4d_bad (p4d )))
6260
+ goto out ;
6261
+
6262
+ pudp = pud_offset (p4dp , address );
6263
+ pud = READ_ONCE (* pudp );
6264
+ if (pud_none (pud ))
6265
+ goto out ;
6266
+ if (pud_leaf (pud )) {
6267
+ lock = pud_lock (mm , pudp );
6268
+ if (!unlikely (pud_leaf (pud ))) {
6269
+ spin_unlock (lock );
6270
+ goto retry ;
6271
+ }
6272
+ pfnmap_args_setup (args , lock , NULL , pud_pgprot (pud ),
6273
+ pud_pfn (pud ), PUD_MASK , pud_write (pud ),
6274
+ pud_special (pud ));
6275
+ return 0 ;
6276
+ }
6277
+
6278
+ pmdp = pmd_offset (pudp , address );
6279
+ pmd = pmdp_get_lockless (pmdp );
6280
+ if (pmd_leaf (pmd )) {
6281
+ lock = pmd_lock (mm , pmdp );
6282
+ if (!unlikely (pmd_leaf (pmd ))) {
6283
+ spin_unlock (lock );
6284
+ goto retry ;
6285
+ }
6286
+ pfnmap_args_setup (args , lock , NULL , pmd_pgprot (pmd ),
6287
+ pmd_pfn (pmd ), PMD_MASK , pmd_write (pmd ),
6288
+ pmd_special (pmd ));
6289
+ return 0 ;
6290
+ }
6291
+
6292
+ ptep = pte_offset_map_lock (mm , pmdp , address , & lock );
6293
+ if (!ptep )
6294
+ goto out ;
6295
+ pte = ptep_get (ptep );
6296
+ if (!pte_present (pte ))
6297
+ goto unlock ;
6298
+ pfnmap_args_setup (args , lock , ptep , pte_pgprot (pte ),
6299
+ pte_pfn (pte ), PAGE_MASK , pte_write (pte ),
6300
+ pte_special (pte ));
6301
+ return 0 ;
6302
+ unlock :
6303
+ pte_unmap_unlock (ptep , lock );
6304
+ out :
6305
+ return - EINVAL ;
6306
+ }
6307
+ EXPORT_SYMBOL_GPL (follow_pfnmap_start );
6308
+
6309
+ /**
6310
+ * follow_pfnmap_end(): End a follow_pfnmap_start() process
6311
+ * @args: Pointer to struct @follow_pfnmap_args
6312
+ *
6313
+ * Must be used in pair of follow_pfnmap_start(). See the start() function
6314
+ * above for more information.
6315
+ */
6316
+ void follow_pfnmap_end (struct follow_pfnmap_args * args )
6317
+ {
6318
+ if (args -> lock )
6319
+ spin_unlock (args -> lock );
6320
+ if (args -> ptep )
6321
+ pte_unmap (args -> ptep );
6322
+ }
6323
+ EXPORT_SYMBOL_GPL (follow_pfnmap_end );
6324
+
6175
6325
#ifdef CONFIG_HAVE_IOREMAP_PROT
6176
6326
/**
6177
6327
* generic_access_phys - generic implementation for iomem mmap access
0 commit comments