@@ -41,17 +41,15 @@ struct host_vm_change {
4141 int index ;
4242 struct mm_struct * mm ;
4343 void * data ;
44- int force ;
4544};
4645
47- #define INIT_HVC (mm , force , userspace ) \
46+ #define INIT_HVC (mm , userspace ) \
4847 ((struct host_vm_change) \
4948 { .ops = { { .type = NONE } }, \
5049 .mm = mm, \
5150 .data = NULL, \
5251 .userspace = userspace, \
53- .index = 0, \
54- .force = force })
52+ .index = 0 })
5553
5654void report_enomem (void )
5755{
@@ -235,7 +233,7 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
235233
236234 prot = ((r ? UM_PROT_READ : 0 ) | (w ? UM_PROT_WRITE : 0 ) |
237235 (x ? UM_PROT_EXEC : 0 ));
238- if (hvc -> force || pte_newpage (* pte )) {
236+ if (pte_newpage (* pte )) {
239237 if (pte_present (* pte )) {
240238 if (pte_newpage (* pte ))
241239 ret = add_mmap (addr , pte_val (* pte ) & PAGE_MASK ,
@@ -261,7 +259,7 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
261259 do {
262260 next = pmd_addr_end (addr , end );
263261 if (!pmd_present (* pmd )) {
264- if (hvc -> force || pmd_newpage (* pmd )) {
262+ if (pmd_newpage (* pmd )) {
265263 ret = add_munmap (addr , next - addr , hvc );
266264 pmd_mkuptodate (* pmd );
267265 }
@@ -283,7 +281,7 @@ static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
283281 do {
284282 next = pud_addr_end (addr , end );
285283 if (!pud_present (* pud )) {
286- if (hvc -> force || pud_newpage (* pud )) {
284+ if (pud_newpage (* pud )) {
287285 ret = add_munmap (addr , next - addr , hvc );
288286 pud_mkuptodate (* pud );
289287 }
@@ -305,7 +303,7 @@ static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
305303 do {
306304 next = p4d_addr_end (addr , end );
307305 if (!p4d_present (* p4d )) {
308- if (hvc -> force || p4d_newpage (* p4d )) {
306+ if (p4d_newpage (* p4d )) {
309307 ret = add_munmap (addr , next - addr , hvc );
310308 p4d_mkuptodate (* p4d );
311309 }
@@ -316,19 +314,19 @@ static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
316314}
317315
318316static void fix_range_common (struct mm_struct * mm , unsigned long start_addr ,
319- unsigned long end_addr , int force )
317+ unsigned long end_addr )
320318{
321319 pgd_t * pgd ;
322320 struct host_vm_change hvc ;
323321 unsigned long addr = start_addr , next ;
324322 int ret = 0 , userspace = 1 ;
325323
326- hvc = INIT_HVC (mm , force , userspace );
324+ hvc = INIT_HVC (mm , userspace );
327325 pgd = pgd_offset (mm , addr );
328326 do {
329327 next = pgd_addr_end (addr , end_addr );
330328 if (!pgd_present (* pgd )) {
331- if (force || pgd_newpage (* pgd )) {
329+ if (pgd_newpage (* pgd )) {
332330 ret = add_munmap (addr , next - addr , & hvc );
333331 pgd_mkuptodate (* pgd );
334332 }
@@ -349,11 +347,11 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
349347 pmd_t * pmd ;
350348 pte_t * pte ;
351349 unsigned long addr , last ;
352- int updated = 0 , err = 0 , force = 0 , userspace = 0 ;
350+ int updated = 0 , err = 0 , userspace = 0 ;
353351 struct host_vm_change hvc ;
354352
355353 mm = & init_mm ;
356- hvc = INIT_HVC (mm , force , userspace );
354+ hvc = INIT_HVC (mm , userspace );
357355 for (addr = start ; addr < end ;) {
358356 pgd = pgd_offset (mm , addr );
359357 if (!pgd_present (* pgd )) {
@@ -537,7 +535,7 @@ void __flush_tlb_one(unsigned long addr)
537535}
538536
539537static void fix_range (struct mm_struct * mm , unsigned long start_addr ,
540- unsigned long end_addr , int force )
538+ unsigned long end_addr )
541539{
542540 /*
543541 * Don't bother flushing if this address space is about to be
@@ -546,15 +544,15 @@ static void fix_range(struct mm_struct *mm, unsigned long start_addr,
546544 if (atomic_read (& mm -> mm_users ) == 0 )
547545 return ;
548546
549- fix_range_common (mm , start_addr , end_addr , force );
547+ fix_range_common (mm , start_addr , end_addr );
550548}
551549
552550void flush_tlb_range (struct vm_area_struct * vma , unsigned long start ,
553551 unsigned long end )
554552{
555553 if (vma -> vm_mm == NULL )
556554 flush_tlb_kernel_range_common (start , end );
557- else fix_range (vma -> vm_mm , start , end , 0 );
555+ else fix_range (vma -> vm_mm , start , end );
558556}
559557EXPORT_SYMBOL (flush_tlb_range );
560558
@@ -564,17 +562,5 @@ void flush_tlb_mm(struct mm_struct *mm)
564562 VMA_ITERATOR (vmi , mm , 0 );
565563
566564 for_each_vma (vmi , vma )
567- fix_range (mm , vma -> vm_start , vma -> vm_end , 0 );
568- }
569-
570- void force_flush_all (void )
571- {
572- struct mm_struct * mm = current -> mm ;
573- struct vm_area_struct * vma ;
574- VMA_ITERATOR (vmi , mm , 0 );
575-
576- mmap_read_lock (mm );
577- for_each_vma (vmi , vma )
578- fix_range (mm , vma -> vm_start , vma -> vm_end , 1 );
579- mmap_read_unlock (mm );
565+ fix_range (mm , vma -> vm_start , vma -> vm_end );
580566}
0 commit comments