@@ -41,17 +41,15 @@ struct host_vm_change {
41
41
int index ;
42
42
struct mm_struct * mm ;
43
43
void * data ;
44
- int force ;
45
44
};
46
45
47
- #define INIT_HVC (mm , force , userspace ) \
46
+ #define INIT_HVC (mm , userspace ) \
48
47
((struct host_vm_change) \
49
48
{ .ops = { { .type = NONE } }, \
50
49
.mm = mm, \
51
50
.data = NULL, \
52
51
.userspace = userspace, \
53
- .index = 0, \
54
- .force = force })
52
+ .index = 0 })
55
53
56
54
void report_enomem (void )
57
55
{
@@ -235,7 +233,7 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
235
233
236
234
prot = ((r ? UM_PROT_READ : 0 ) | (w ? UM_PROT_WRITE : 0 ) |
237
235
(x ? UM_PROT_EXEC : 0 ));
238
- if (hvc -> force || pte_newpage (* pte )) {
236
+ if (pte_newpage (* pte )) {
239
237
if (pte_present (* pte )) {
240
238
if (pte_newpage (* pte ))
241
239
ret = add_mmap (addr , pte_val (* pte ) & PAGE_MASK ,
@@ -261,7 +259,7 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
261
259
do {
262
260
next = pmd_addr_end (addr , end );
263
261
if (!pmd_present (* pmd )) {
264
- if (hvc -> force || pmd_newpage (* pmd )) {
262
+ if (pmd_newpage (* pmd )) {
265
263
ret = add_munmap (addr , next - addr , hvc );
266
264
pmd_mkuptodate (* pmd );
267
265
}
@@ -283,7 +281,7 @@ static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
283
281
do {
284
282
next = pud_addr_end (addr , end );
285
283
if (!pud_present (* pud )) {
286
- if (hvc -> force || pud_newpage (* pud )) {
284
+ if (pud_newpage (* pud )) {
287
285
ret = add_munmap (addr , next - addr , hvc );
288
286
pud_mkuptodate (* pud );
289
287
}
@@ -305,7 +303,7 @@ static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
305
303
do {
306
304
next = p4d_addr_end (addr , end );
307
305
if (!p4d_present (* p4d )) {
308
- if (hvc -> force || p4d_newpage (* p4d )) {
306
+ if (p4d_newpage (* p4d )) {
309
307
ret = add_munmap (addr , next - addr , hvc );
310
308
p4d_mkuptodate (* p4d );
311
309
}
@@ -316,19 +314,19 @@ static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
316
314
}
317
315
318
316
static void fix_range_common (struct mm_struct * mm , unsigned long start_addr ,
319
- unsigned long end_addr , int force )
317
+ unsigned long end_addr )
320
318
{
321
319
pgd_t * pgd ;
322
320
struct host_vm_change hvc ;
323
321
unsigned long addr = start_addr , next ;
324
322
int ret = 0 , userspace = 1 ;
325
323
326
- hvc = INIT_HVC (mm , force , userspace );
324
+ hvc = INIT_HVC (mm , userspace );
327
325
pgd = pgd_offset (mm , addr );
328
326
do {
329
327
next = pgd_addr_end (addr , end_addr );
330
328
if (!pgd_present (* pgd )) {
331
- if (force || pgd_newpage (* pgd )) {
329
+ if (pgd_newpage (* pgd )) {
332
330
ret = add_munmap (addr , next - addr , & hvc );
333
331
pgd_mkuptodate (* pgd );
334
332
}
@@ -349,11 +347,11 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
349
347
pmd_t * pmd ;
350
348
pte_t * pte ;
351
349
unsigned long addr , last ;
352
- int updated = 0 , err = 0 , force = 0 , userspace = 0 ;
350
+ int updated = 0 , err = 0 , userspace = 0 ;
353
351
struct host_vm_change hvc ;
354
352
355
353
mm = & init_mm ;
356
- hvc = INIT_HVC (mm , force , userspace );
354
+ hvc = INIT_HVC (mm , userspace );
357
355
for (addr = start ; addr < end ;) {
358
356
pgd = pgd_offset (mm , addr );
359
357
if (!pgd_present (* pgd )) {
@@ -537,7 +535,7 @@ void __flush_tlb_one(unsigned long addr)
537
535
}
538
536
539
537
static void fix_range (struct mm_struct * mm , unsigned long start_addr ,
540
- unsigned long end_addr , int force )
538
+ unsigned long end_addr )
541
539
{
542
540
/*
543
541
* Don't bother flushing if this address space is about to be
@@ -546,15 +544,15 @@ static void fix_range(struct mm_struct *mm, unsigned long start_addr,
546
544
if (atomic_read (& mm -> mm_users ) == 0 )
547
545
return ;
548
546
549
- fix_range_common (mm , start_addr , end_addr , force );
547
+ fix_range_common (mm , start_addr , end_addr );
550
548
}
551
549
552
550
void flush_tlb_range (struct vm_area_struct * vma , unsigned long start ,
553
551
unsigned long end )
554
552
{
555
553
if (vma -> vm_mm == NULL )
556
554
flush_tlb_kernel_range_common (start , end );
557
- else fix_range (vma -> vm_mm , start , end , 0 );
555
+ else fix_range (vma -> vm_mm , start , end );
558
556
}
559
557
EXPORT_SYMBOL (flush_tlb_range );
560
558
@@ -564,17 +562,5 @@ void flush_tlb_mm(struct mm_struct *mm)
564
562
VMA_ITERATOR (vmi , mm , 0 );
565
563
566
564
for_each_vma (vmi , vma )
567
- fix_range (mm , vma -> vm_start , vma -> vm_end , 0 );
568
- }
569
-
570
- void force_flush_all (void )
571
- {
572
- struct mm_struct * mm = current -> mm ;
573
- struct vm_area_struct * vma ;
574
- VMA_ITERATOR (vmi , mm , 0 );
575
-
576
- mmap_read_lock (mm );
577
- for_each_vma (vmi , vma )
578
- fix_range (mm , vma -> vm_start , vma -> vm_end , 1 );
579
- mmap_read_unlock (mm );
565
+ fix_range (mm , vma -> vm_start , vma -> vm_end );
580
566
}
0 commit comments