@@ -170,14 +170,16 @@ static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
170170 return ret ;
171171}
172172
173- static int fix_range_common (struct mm_struct * mm , unsigned long start_addr ,
174- unsigned long end_addr )
173+ int um_tlb_sync (struct mm_struct * mm )
175174{
176175 pgd_t * pgd ;
177176 struct vm_ops ops ;
178- unsigned long addr = start_addr , next ;
177+ unsigned long addr = mm -> context . sync_tlb_range_from , next ;
179178 int ret = 0 ;
180179
180+ if (mm -> context .sync_tlb_range_to == 0 )
181+ return 0 ;
182+
181183 ops .mm_idp = & mm -> context .id ;
182184 if (mm == & init_mm ) {
183185 ops .mmap = kern_map ;
@@ -191,7 +193,7 @@ static int fix_range_common(struct mm_struct *mm, unsigned long start_addr,
191193
192194 pgd = pgd_offset (mm , addr );
193195 do {
194- next = pgd_addr_end (addr , end_addr );
196+ next = pgd_addr_end (addr , mm -> context . sync_tlb_range_to );
195197 if (!pgd_present (* pgd )) {
196198 if (pgd_newpage (* pgd )) {
197199 ret = ops .unmap (ops .mm_idp , addr ,
@@ -200,87 +202,16 @@ static int fix_range_common(struct mm_struct *mm, unsigned long start_addr,
200202 }
201203 } else
202204 ret = update_p4d_range (pgd , addr , next , & ops );
203- } while (pgd ++ , addr = next , ((addr < end_addr ) && !ret ));
205+ } while (pgd ++ , addr = next ,
206+ ((addr < mm -> context .sync_tlb_range_to ) && !ret ));
204207
205208 if (ret == - ENOMEM )
206209 report_enomem ();
207210
208- return ret ;
209- }
210-
211- static void flush_tlb_kernel_range_common (unsigned long start , unsigned long end )
212- {
213- int err ;
214-
215- err = fix_range_common (& init_mm , start , end );
216-
217- if (err )
218- panic ("flush_tlb_kernel failed, errno = %d\n" , err );
219- }
220-
221- void flush_tlb_page (struct vm_area_struct * vma , unsigned long address )
222- {
223- pgd_t * pgd ;
224- p4d_t * p4d ;
225- pud_t * pud ;
226- pmd_t * pmd ;
227- pte_t * pte ;
228- struct mm_struct * mm = vma -> vm_mm ;
229- int r , w , x , prot ;
230- struct mm_id * mm_id ;
231-
232- address &= PAGE_MASK ;
233-
234- pgd = pgd_offset (mm , address );
235- if (!pgd_present (* pgd ))
236- goto kill ;
237-
238- p4d = p4d_offset (pgd , address );
239- if (!p4d_present (* p4d ))
240- goto kill ;
241-
242- pud = pud_offset (p4d , address );
243- if (!pud_present (* pud ))
244- goto kill ;
245-
246- pmd = pmd_offset (pud , address );
247- if (!pmd_present (* pmd ))
248- goto kill ;
249-
250- pte = pte_offset_kernel (pmd , address );
251-
252- r = pte_read (* pte );
253- w = pte_write (* pte );
254- x = pte_exec (* pte );
255- if (!pte_young (* pte )) {
256- r = 0 ;
257- w = 0 ;
258- } else if (!pte_dirty (* pte )) {
259- w = 0 ;
260- }
261-
262- mm_id = & mm -> context .id ;
263- prot = ((r ? UM_PROT_READ : 0 ) | (w ? UM_PROT_WRITE : 0 ) |
264- (x ? UM_PROT_EXEC : 0 ));
265- if (pte_newpage (* pte )) {
266- if (pte_present (* pte )) {
267- unsigned long long offset ;
268- int fd ;
269-
270- fd = phys_mapping (pte_val (* pte ) & PAGE_MASK , & offset );
271- map (mm_id , address , PAGE_SIZE , prot , fd , offset );
272- } else
273- unmap (mm_id , address , PAGE_SIZE );
274- } else if (pte_newprot (* pte ))
275- protect (mm_id , address , PAGE_SIZE , prot );
276-
277- * pte = pte_mkuptodate (* pte );
211+ mm -> context .sync_tlb_range_from = 0 ;
212+ mm -> context .sync_tlb_range_to = 0 ;
278213
279- return ;
280-
281- kill :
282- printk (KERN_ERR "Failed to flush page for address 0x%lx\n" , address );
283- force_sig (SIGKILL );
214+ return ret ;
284215}
285216
286217void flush_tlb_all (void )
@@ -295,48 +226,11 @@ void flush_tlb_all(void)
295226 flush_tlb_mm (current -> mm );
296227}
297228
298- void flush_tlb_kernel_range (unsigned long start , unsigned long end )
299- {
300- flush_tlb_kernel_range_common (start , end );
301- }
302-
303- void flush_tlb_kernel_vm (void )
304- {
305- flush_tlb_kernel_range_common (start_vm , end_vm );
306- }
307-
308- void __flush_tlb_one (unsigned long addr )
309- {
310- flush_tlb_kernel_range_common (addr , addr + PAGE_SIZE );
311- }
312-
313- static void fix_range (struct mm_struct * mm , unsigned long start_addr ,
314- unsigned long end_addr )
315- {
316- /*
317- * Don't bother flushing if this address space is about to be
318- * destroyed.
319- */
320- if (atomic_read (& mm -> mm_users ) == 0 )
321- return ;
322-
323- fix_range_common (mm , start_addr , end_addr );
324- }
325-
326- void flush_tlb_range (struct vm_area_struct * vma , unsigned long start ,
327- unsigned long end )
328- {
329- if (vma -> vm_mm == NULL )
330- flush_tlb_kernel_range_common (start , end );
331- else fix_range (vma -> vm_mm , start , end );
332- }
333- EXPORT_SYMBOL (flush_tlb_range );
334-
335229void flush_tlb_mm (struct mm_struct * mm )
336230{
337231 struct vm_area_struct * vma ;
338232 VMA_ITERATOR (vmi , mm , 0 );
339233
340234 for_each_vma (vmi , vma )
341- fix_range (mm , vma -> vm_start , vma -> vm_end );
235+ um_tlb_mark_sync (mm , vma -> vm_start , vma -> vm_end );
342236}
0 commit comments