@@ -473,7 +473,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
473
473
* a lot of unnecessary write faults.
474
474
*/
475
475
if (vbo -> dirty && vbo -> dirty -> method == VMW_BO_DIRTY_MKWRITE )
476
- prot = vma -> vm_page_prot ;
476
+ prot = vm_get_page_prot ( vma -> vm_flags & ~ VM_SHARED ) ;
477
477
else
478
478
prot = vm_get_page_prot (vma -> vm_flags );
479
479
@@ -486,3 +486,75 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
486
486
487
487
return ret ;
488
488
}
489
+
490
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
491
+ vm_fault_t vmw_bo_vm_huge_fault (struct vm_fault * vmf ,
492
+ enum page_entry_size pe_size )
493
+ {
494
+ struct vm_area_struct * vma = vmf -> vma ;
495
+ struct ttm_buffer_object * bo = (struct ttm_buffer_object * )
496
+ vma -> vm_private_data ;
497
+ struct vmw_buffer_object * vbo =
498
+ container_of (bo , struct vmw_buffer_object , base );
499
+ pgprot_t prot ;
500
+ vm_fault_t ret ;
501
+ pgoff_t fault_page_size ;
502
+ bool write = vmf -> flags & FAULT_FLAG_WRITE ;
503
+ bool is_cow_mapping =
504
+ (vma -> vm_flags & (VM_SHARED | VM_MAYWRITE )) == VM_MAYWRITE ;
505
+
506
+ switch (pe_size ) {
507
+ case PE_SIZE_PMD :
508
+ fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT ;
509
+ break ;
510
+ #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
511
+ case PE_SIZE_PUD :
512
+ fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT ;
513
+ break ;
514
+ #endif
515
+ default :
516
+ WARN_ON_ONCE (1 );
517
+ return VM_FAULT_FALLBACK ;
518
+ }
519
+
520
+ /* Always do write dirty-tracking and COW on PTE level. */
521
+ if (write && (READ_ONCE (vbo -> dirty ) || is_cow_mapping ))
522
+ return VM_FAULT_FALLBACK ;
523
+
524
+ ret = ttm_bo_vm_reserve (bo , vmf );
525
+ if (ret )
526
+ return ret ;
527
+
528
+ if (vbo -> dirty ) {
529
+ pgoff_t allowed_prefault ;
530
+ unsigned long page_offset ;
531
+
532
+ page_offset = vmf -> pgoff -
533
+ drm_vma_node_start (& bo -> base .vma_node );
534
+ if (page_offset >= bo -> num_pages ||
535
+ vmw_resources_clean (vbo , page_offset ,
536
+ page_offset + PAGE_SIZE ,
537
+ & allowed_prefault )) {
538
+ ret = VM_FAULT_SIGBUS ;
539
+ goto out_unlock ;
540
+ }
541
+
542
+ /*
543
+ * Write protect, so we get a new fault on write, and can
544
+ * split.
545
+ */
546
+ prot = vm_get_page_prot (vma -> vm_flags & ~VM_SHARED );
547
+ } else {
548
+ prot = vm_get_page_prot (vma -> vm_flags );
549
+ }
550
+
551
+ ret = ttm_bo_vm_fault_reserved (vmf , prot , 1 , fault_page_size );
552
+ if (ret == VM_FAULT_RETRY && !(vmf -> flags & FAULT_FLAG_RETRY_NOWAIT ))
553
+ return ret ;
554
+
555
+ out_unlock :
556
+ dma_resv_unlock (bo -> base .resv );
557
+
558
+ return ret ;
559
+ }
560
+ #endif
0 commit comments