@@ -156,13 +156,97 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
156
156
}
157
157
EXPORT_SYMBOL (ttm_bo_vm_reserve );
158
158
159
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
160
+ /**
161
+ * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
162
+ * @vmf: Fault data
163
+ * @bo: The buffer object
164
+ * @page_offset: Page offset from bo start
165
+ * @fault_page_size: The size of the fault in pages.
166
+ * @pgprot: The page protections.
167
+ * Does additional checking whether it's possible to insert a PUD or PMD
168
+ * pfn and performs the insertion.
169
+ *
170
+ * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
171
+ * a huge fault was not possible, or on insertion error.
172
+ */
173
+ static vm_fault_t ttm_bo_vm_insert_huge (struct vm_fault * vmf ,
174
+ struct ttm_buffer_object * bo ,
175
+ pgoff_t page_offset ,
176
+ pgoff_t fault_page_size ,
177
+ pgprot_t pgprot )
178
+ {
179
+ pgoff_t i ;
180
+ vm_fault_t ret ;
181
+ unsigned long pfn ;
182
+ pfn_t pfnt ;
183
+ struct ttm_tt * ttm = bo -> ttm ;
184
+ bool write = vmf -> flags & FAULT_FLAG_WRITE ;
185
+
186
+ /* Fault should not cross bo boundary. */
187
+ page_offset &= ~(fault_page_size - 1 );
188
+ if (page_offset + fault_page_size > bo -> num_pages )
189
+ goto out_fallback ;
190
+
191
+ if (bo -> mem .bus .is_iomem )
192
+ pfn = ttm_bo_io_mem_pfn (bo , page_offset );
193
+ else
194
+ pfn = page_to_pfn (ttm -> pages [page_offset ]);
195
+
196
+ /* pfn must be fault_page_size aligned. */
197
+ if ((pfn & (fault_page_size - 1 )) != 0 )
198
+ goto out_fallback ;
199
+
200
+ /* Check that memory is contiguous. */
201
+ if (!bo -> mem .bus .is_iomem ) {
202
+ for (i = 1 ; i < fault_page_size ; ++ i ) {
203
+ if (page_to_pfn (ttm -> pages [page_offset + i ]) != pfn + i )
204
+ goto out_fallback ;
205
+ }
206
+ } else if (bo -> bdev -> driver -> io_mem_pfn ) {
207
+ for (i = 1 ; i < fault_page_size ; ++ i ) {
208
+ if (ttm_bo_io_mem_pfn (bo , page_offset + i ) != pfn + i )
209
+ goto out_fallback ;
210
+ }
211
+ }
212
+
213
+ pfnt = __pfn_to_pfn_t (pfn , PFN_DEV );
214
+ if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT ))
215
+ ret = vmf_insert_pfn_pmd_prot (vmf , pfnt , pgprot , write );
216
+ #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
217
+ else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT ))
218
+ ret = vmf_insert_pfn_pud_prot (vmf , pfnt , pgprot , write );
219
+ #endif
220
+ else
221
+ WARN_ON_ONCE (ret = VM_FAULT_FALLBACK );
222
+
223
+ if (ret != VM_FAULT_NOPAGE )
224
+ goto out_fallback ;
225
+
226
+ return VM_FAULT_NOPAGE ;
227
+ out_fallback :
228
+ count_vm_event (THP_FAULT_FALLBACK );
229
+ return VM_FAULT_FALLBACK ;
230
+ }
231
+ #else
232
+ static vm_fault_t ttm_bo_vm_insert_huge (struct vm_fault * vmf ,
233
+ struct ttm_buffer_object * bo ,
234
+ pgoff_t page_offset ,
235
+ pgoff_t fault_page_size ,
236
+ pgprot_t pgprot )
237
+ {
238
+ return VM_FAULT_FALLBACK ;
239
+ }
240
+ #endif
241
+
159
242
/**
160
243
* ttm_bo_vm_fault_reserved - TTM fault helper
161
244
* @vmf: The struct vm_fault given as argument to the fault callback
162
245
* @prot: The page protection to be used for this memory area.
163
246
* @num_prefault: Maximum number of prefault pages. The caller may want to
164
247
* specify this based on madvice settings and the size of the GPU object
165
248
* backed by the memory.
249
+ * @fault_page_size: The size of the fault in pages.
166
250
*
167
251
* This function inserts one or more page table entries pointing to the
168
252
* memory backing the buffer object, and then returns a return code
@@ -176,7 +260,8 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve);
176
260
*/
177
261
vm_fault_t ttm_bo_vm_fault_reserved (struct vm_fault * vmf ,
178
262
pgprot_t prot ,
179
- pgoff_t num_prefault )
263
+ pgoff_t num_prefault ,
264
+ pgoff_t fault_page_size )
180
265
{
181
266
struct vm_area_struct * vma = vmf -> vma ;
182
267
struct ttm_buffer_object * bo = vma -> vm_private_data ;
@@ -268,6 +353,13 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
268
353
prot = pgprot_decrypted (prot );
269
354
}
270
355
356
+ /* We don't prefault on huge faults. Yet. */
357
+ if (IS_ENABLED (CONFIG_TRANSPARENT_HUGEPAGE ) && fault_page_size != 1 ) {
358
+ ret = ttm_bo_vm_insert_huge (vmf , bo , page_offset ,
359
+ fault_page_size , prot );
360
+ goto out_io_unlock ;
361
+ }
362
+
271
363
/*
272
364
* Speculatively prefault a number of pages. Only error on
273
365
* first page.
@@ -334,7 +426,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
334
426
return ret ;
335
427
336
428
prot = vma -> vm_page_prot ;
337
- ret = ttm_bo_vm_fault_reserved (vmf , prot , TTM_BO_VM_NUM_PREFAULT );
429
+ ret = ttm_bo_vm_fault_reserved (vmf , prot , TTM_BO_VM_NUM_PREFAULT , 1 );
338
430
if (ret == VM_FAULT_RETRY && !(vmf -> flags & FAULT_FLAG_RETRY_NOWAIT ))
339
431
return ret ;
340
432
@@ -344,6 +436,66 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
344
436
}
345
437
EXPORT_SYMBOL (ttm_bo_vm_fault );
346
438
439
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
440
+ /**
441
+ * ttm_pgprot_is_wrprotecting - Is a page protection value write-protecting?
442
+ * @prot: The page protection value
443
+ *
444
+ * Return: true if @prot is write-protecting. false otherwise.
445
+ */
446
+ static bool ttm_pgprot_is_wrprotecting (pgprot_t prot )
447
+ {
448
+ /*
449
+ * This is meant to say "pgprot_wrprotect(prot) == prot" in a generic
450
+ * way. Unfortunately there is no generic pgprot_wrprotect.
451
+ */
452
+ return pte_val (pte_wrprotect (__pte (pgprot_val (prot )))) ==
453
+ pgprot_val (prot );
454
+ }
455
+
456
+ static vm_fault_t ttm_bo_vm_huge_fault (struct vm_fault * vmf ,
457
+ enum page_entry_size pe_size )
458
+ {
459
+ struct vm_area_struct * vma = vmf -> vma ;
460
+ pgprot_t prot ;
461
+ struct ttm_buffer_object * bo = vma -> vm_private_data ;
462
+ vm_fault_t ret ;
463
+ pgoff_t fault_page_size = 0 ;
464
+ bool write = vmf -> flags & FAULT_FLAG_WRITE ;
465
+
466
+ switch (pe_size ) {
467
+ case PE_SIZE_PMD :
468
+ fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT ;
469
+ break ;
470
+ #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
471
+ case PE_SIZE_PUD :
472
+ fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT ;
473
+ break ;
474
+ #endif
475
+ default :
476
+ WARN_ON_ONCE (1 );
477
+ return VM_FAULT_FALLBACK ;
478
+ }
479
+
480
+ /* Fallback on write dirty-tracking or COW */
481
+ if (write && ttm_pgprot_is_wrprotecting (vma -> vm_page_prot ))
482
+ return VM_FAULT_FALLBACK ;
483
+
484
+ ret = ttm_bo_vm_reserve (bo , vmf );
485
+ if (ret )
486
+ return ret ;
487
+
488
+ prot = vm_get_page_prot (vma -> vm_flags );
489
+ ret = ttm_bo_vm_fault_reserved (vmf , prot , 1 , fault_page_size );
490
+ if (ret == VM_FAULT_RETRY && !(vmf -> flags & FAULT_FLAG_RETRY_NOWAIT ))
491
+ return ret ;
492
+
493
+ dma_resv_unlock (bo -> base .resv );
494
+
495
+ return ret ;
496
+ }
497
+ #endif
498
+
347
499
void ttm_bo_vm_open (struct vm_area_struct * vma )
348
500
{
349
501
struct ttm_buffer_object * bo = vma -> vm_private_data ;
@@ -445,7 +597,10 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
445
597
.fault = ttm_bo_vm_fault ,
446
598
.open = ttm_bo_vm_open ,
447
599
.close = ttm_bo_vm_close ,
448
- .access = ttm_bo_vm_access
600
+ .access = ttm_bo_vm_access ,
601
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
602
+ .huge_fault = ttm_bo_vm_huge_fault ,
603
+ #endif
449
604
};
450
605
451
606
static struct ttm_buffer_object * ttm_bo_vm_lookup (struct ttm_bo_device * bdev ,
0 commit comments