@@ -162,13 +162,97 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
162
162
}
163
163
EXPORT_SYMBOL (ttm_bo_vm_reserve );
164
164
165
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
166
+ /**
167
+ * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
168
+ * @vmf: Fault data
169
+ * @bo: The buffer object
170
+ * @page_offset: Page offset from bo start
171
+ * @fault_page_size: The size of the fault in pages.
172
+ * @pgprot: The page protections.
173
+ * Does additional checking whether it's possible to insert a PUD or PMD
174
+ * pfn and performs the insertion.
175
+ *
176
+ * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
177
+ * a huge fault was not possible, or on insertion error.
178
+ */
179
+ static vm_fault_t ttm_bo_vm_insert_huge (struct vm_fault * vmf ,
180
+ struct ttm_buffer_object * bo ,
181
+ pgoff_t page_offset ,
182
+ pgoff_t fault_page_size ,
183
+ pgprot_t pgprot )
184
+ {
185
+ pgoff_t i ;
186
+ vm_fault_t ret ;
187
+ unsigned long pfn ;
188
+ pfn_t pfnt ;
189
+ struct ttm_tt * ttm = bo -> ttm ;
190
+ bool write = vmf -> flags & FAULT_FLAG_WRITE ;
191
+
192
+ /* Fault should not cross bo boundary. */
193
+ page_offset &= ~(fault_page_size - 1 );
194
+ if (page_offset + fault_page_size > bo -> num_pages )
195
+ goto out_fallback ;
196
+
197
+ if (bo -> mem .bus .is_iomem )
198
+ pfn = ttm_bo_io_mem_pfn (bo , page_offset );
199
+ else
200
+ pfn = page_to_pfn (ttm -> pages [page_offset ]);
201
+
202
+ /* pfn must be fault_page_size aligned. */
203
+ if ((pfn & (fault_page_size - 1 )) != 0 )
204
+ goto out_fallback ;
205
+
206
+ /* Check that memory is contiguous. */
207
+ if (!bo -> mem .bus .is_iomem ) {
208
+ for (i = 1 ; i < fault_page_size ; ++ i ) {
209
+ if (page_to_pfn (ttm -> pages [page_offset + i ]) != pfn + i )
210
+ goto out_fallback ;
211
+ }
212
+ } else if (bo -> bdev -> driver -> io_mem_pfn ) {
213
+ for (i = 1 ; i < fault_page_size ; ++ i ) {
214
+ if (ttm_bo_io_mem_pfn (bo , page_offset + i ) != pfn + i )
215
+ goto out_fallback ;
216
+ }
217
+ }
218
+
219
+ pfnt = __pfn_to_pfn_t (pfn , PFN_DEV );
220
+ if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT ))
221
+ ret = vmf_insert_pfn_pmd_prot (vmf , pfnt , pgprot , write );
222
+ #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
223
+ else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT ))
224
+ ret = vmf_insert_pfn_pud_prot (vmf , pfnt , pgprot , write );
225
+ #endif
226
+ else
227
+ WARN_ON_ONCE (ret = VM_FAULT_FALLBACK );
228
+
229
+ if (ret != VM_FAULT_NOPAGE )
230
+ goto out_fallback ;
231
+
232
+ return VM_FAULT_NOPAGE ;
233
+ out_fallback :
234
+ count_vm_event (THP_FAULT_FALLBACK );
235
+ return VM_FAULT_FALLBACK ;
236
+ }
237
+ #else
238
+ static vm_fault_t ttm_bo_vm_insert_huge (struct vm_fault * vmf ,
239
+ struct ttm_buffer_object * bo ,
240
+ pgoff_t page_offset ,
241
+ pgoff_t fault_page_size ,
242
+ pgprot_t pgprot )
243
+ {
244
+ return VM_FAULT_FALLBACK ;
245
+ }
246
+ #endif
247
+
165
248
/**
166
249
* ttm_bo_vm_fault_reserved - TTM fault helper
167
250
* @vmf: The struct vm_fault given as argument to the fault callback
168
251
* @prot: The page protection to be used for this memory area.
169
252
* @num_prefault: Maximum number of prefault pages. The caller may want to
170
253
* specify this based on madvice settings and the size of the GPU object
171
254
* backed by the memory.
255
+ * @fault_page_size: The size of the fault in pages.
172
256
*
173
257
* This function inserts one or more page table entries pointing to the
174
258
* memory backing the buffer object, and then returns a return code
@@ -182,7 +266,8 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve);
182
266
*/
183
267
vm_fault_t ttm_bo_vm_fault_reserved (struct vm_fault * vmf ,
184
268
pgprot_t prot ,
185
- pgoff_t num_prefault )
269
+ pgoff_t num_prefault ,
270
+ pgoff_t fault_page_size )
186
271
{
187
272
struct vm_area_struct * vma = vmf -> vma ;
188
273
struct ttm_buffer_object * bo = vma -> vm_private_data ;
@@ -274,6 +359,13 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
274
359
prot = pgprot_decrypted (prot );
275
360
}
276
361
362
+ /* We don't prefault on huge faults. Yet. */
363
+ if (IS_ENABLED (CONFIG_TRANSPARENT_HUGEPAGE ) && fault_page_size != 1 ) {
364
+ ret = ttm_bo_vm_insert_huge (vmf , bo , page_offset ,
365
+ fault_page_size , prot );
366
+ goto out_io_unlock ;
367
+ }
368
+
277
369
/*
278
370
* Speculatively prefault a number of pages. Only error on
279
371
* first page.
@@ -340,7 +432,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
340
432
return ret ;
341
433
342
434
prot = vma -> vm_page_prot ;
343
- ret = ttm_bo_vm_fault_reserved (vmf , prot , TTM_BO_VM_NUM_PREFAULT );
435
+ ret = ttm_bo_vm_fault_reserved (vmf , prot , TTM_BO_VM_NUM_PREFAULT , 1 );
344
436
if (ret == VM_FAULT_RETRY && !(vmf -> flags & FAULT_FLAG_RETRY_NOWAIT ))
345
437
return ret ;
346
438
@@ -350,6 +442,66 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
350
442
}
351
443
EXPORT_SYMBOL (ttm_bo_vm_fault );
352
444
445
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
446
+ /**
447
+ * ttm_pgprot_is_wrprotecting - Is a page protection value write-protecting?
448
+ * @prot: The page protection value
449
+ *
450
+ * Return: true if @prot is write-protecting. false otherwise.
451
+ */
452
+ static bool ttm_pgprot_is_wrprotecting (pgprot_t prot )
453
+ {
454
+ /*
455
+ * This is meant to say "pgprot_wrprotect(prot) == prot" in a generic
456
+ * way. Unfortunately there is no generic pgprot_wrprotect.
457
+ */
458
+ return pte_val (pte_wrprotect (__pte (pgprot_val (prot )))) ==
459
+ pgprot_val (prot );
460
+ }
461
+
462
+ static vm_fault_t ttm_bo_vm_huge_fault (struct vm_fault * vmf ,
463
+ enum page_entry_size pe_size )
464
+ {
465
+ struct vm_area_struct * vma = vmf -> vma ;
466
+ pgprot_t prot ;
467
+ struct ttm_buffer_object * bo = vma -> vm_private_data ;
468
+ vm_fault_t ret ;
469
+ pgoff_t fault_page_size = 0 ;
470
+ bool write = vmf -> flags & FAULT_FLAG_WRITE ;
471
+
472
+ switch (pe_size ) {
473
+ case PE_SIZE_PMD :
474
+ fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT ;
475
+ break ;
476
+ #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
477
+ case PE_SIZE_PUD :
478
+ fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT ;
479
+ break ;
480
+ #endif
481
+ default :
482
+ WARN_ON_ONCE (1 );
483
+ return VM_FAULT_FALLBACK ;
484
+ }
485
+
486
+ /* Fallback on write dirty-tracking or COW */
487
+ if (write && ttm_pgprot_is_wrprotecting (vma -> vm_page_prot ))
488
+ return VM_FAULT_FALLBACK ;
489
+
490
+ ret = ttm_bo_vm_reserve (bo , vmf );
491
+ if (ret )
492
+ return ret ;
493
+
494
+ prot = vm_get_page_prot (vma -> vm_flags );
495
+ ret = ttm_bo_vm_fault_reserved (vmf , prot , 1 , fault_page_size );
496
+ if (ret == VM_FAULT_RETRY && !(vmf -> flags & FAULT_FLAG_RETRY_NOWAIT ))
497
+ return ret ;
498
+
499
+ dma_resv_unlock (bo -> base .resv );
500
+
501
+ return ret ;
502
+ }
503
+ #endif
504
+
353
505
void ttm_bo_vm_open (struct vm_area_struct * vma )
354
506
{
355
507
struct ttm_buffer_object * bo = vma -> vm_private_data ;
@@ -451,7 +603,10 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
451
603
.fault = ttm_bo_vm_fault ,
452
604
.open = ttm_bo_vm_open ,
453
605
.close = ttm_bo_vm_close ,
454
- .access = ttm_bo_vm_access
606
+ .access = ttm_bo_vm_access ,
607
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
608
+ .huge_fault = ttm_bo_vm_huge_fault ,
609
+ #endif
455
610
};
456
611
457
612
static struct ttm_buffer_object * ttm_bo_vm_lookup (struct ttm_bo_device * bdev ,
0 commit comments