@@ -3039,59 +3039,6 @@ static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp)
3039
3039
return hva_to_pfn (kfp );
3040
3040
}
3041
3041
3042
- kvm_pfn_t __gfn_to_pfn_memslot (const struct kvm_memory_slot * slot , gfn_t gfn ,
3043
- bool interruptible , bool no_wait ,
3044
- bool write_fault , bool * writable )
3045
- {
3046
- struct kvm_follow_pfn kfp = {
3047
- .slot = slot ,
3048
- .gfn = gfn ,
3049
- .map_writable = writable ,
3050
- };
3051
-
3052
- if (write_fault )
3053
- kfp .flags |= FOLL_WRITE ;
3054
- if (no_wait )
3055
- kfp .flags |= FOLL_NOWAIT ;
3056
- if (interruptible )
3057
- kfp .flags |= FOLL_INTERRUPTIBLE ;
3058
-
3059
- return kvm_follow_pfn (& kfp );
3060
- }
3061
- EXPORT_SYMBOL_GPL (__gfn_to_pfn_memslot );
3062
-
3063
- kvm_pfn_t gfn_to_pfn_prot (struct kvm * kvm , gfn_t gfn , bool write_fault ,
3064
- bool * writable )
3065
- {
3066
- struct kvm_follow_pfn kfp = {
3067
- .slot = gfn_to_memslot (kvm , gfn ),
3068
- .gfn = gfn ,
3069
- .flags = write_fault ? FOLL_WRITE : 0 ,
3070
- .map_writable = writable ,
3071
- };
3072
-
3073
- return kvm_follow_pfn (& kfp );
3074
- }
3075
- EXPORT_SYMBOL_GPL (gfn_to_pfn_prot );
3076
-
3077
- kvm_pfn_t gfn_to_pfn_memslot (const struct kvm_memory_slot * slot , gfn_t gfn )
3078
- {
3079
- struct kvm_follow_pfn kfp = {
3080
- .slot = slot ,
3081
- .gfn = gfn ,
3082
- .flags = FOLL_WRITE ,
3083
- };
3084
-
3085
- return kvm_follow_pfn (& kfp );
3086
- }
3087
- EXPORT_SYMBOL_GPL (gfn_to_pfn_memslot );
3088
-
3089
- kvm_pfn_t gfn_to_pfn (struct kvm * kvm , gfn_t gfn )
3090
- {
3091
- return gfn_to_pfn_memslot (gfn_to_memslot (kvm , gfn ), gfn );
3092
- }
3093
- EXPORT_SYMBOL_GPL (gfn_to_pfn );
3094
-
3095
3042
kvm_pfn_t __kvm_faultin_pfn (const struct kvm_memory_slot * slot , gfn_t gfn ,
3096
3043
unsigned int foll , bool * writable ,
3097
3044
struct page * * refcounted_page )
0 commit comments