@@ -148,22 +148,25 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
148
148
static pte_t get_clear_contig (struct mm_struct * mm ,
149
149
unsigned long addr ,
150
150
pte_t * ptep ,
151
- unsigned long pte_num )
151
+ unsigned long ncontig )
152
152
{
153
- pte_t orig_pte = ptep_get (ptep );
154
- unsigned long i ;
155
-
156
- for (i = 0 ; i < pte_num ; i ++ , addr += PAGE_SIZE , ptep ++ ) {
157
- pte_t pte = ptep_get_and_clear (mm , addr , ptep );
158
-
159
- if (pte_dirty (pte ))
160
- orig_pte = pte_mkdirty (orig_pte );
161
-
162
- if (pte_young (pte ))
163
- orig_pte = pte_mkyoung (orig_pte );
153
+ pte_t pte , tmp_pte ;
154
+ bool present ;
155
+
156
+ pte = ptep_get_and_clear (mm , addr , ptep );
157
+ present = pte_present (pte );
158
+ while (-- ncontig ) {
159
+ ptep ++ ;
160
+ addr += PAGE_SIZE ;
161
+ tmp_pte = ptep_get_and_clear (mm , addr , ptep );
162
+ if (present ) {
163
+ if (pte_dirty (tmp_pte ))
164
+ pte = pte_mkdirty (pte );
165
+ if (pte_young (tmp_pte ))
166
+ pte = pte_mkyoung (pte );
167
+ }
164
168
}
165
-
166
- return orig_pte ;
169
+ return pte ;
167
170
}
168
171
169
172
static pte_t get_clear_contig_flush (struct mm_struct * mm ,
@@ -212,6 +215,26 @@ static void clear_flush(struct mm_struct *mm,
212
215
flush_tlb_range (& vma , saddr , addr );
213
216
}
214
217
218
+ static int num_contig_ptes_from_size (unsigned long sz , size_t * pgsize )
219
+ {
220
+ unsigned long hugepage_shift ;
221
+
222
+ if (sz >= PGDIR_SIZE )
223
+ hugepage_shift = PGDIR_SHIFT ;
224
+ else if (sz >= P4D_SIZE )
225
+ hugepage_shift = P4D_SHIFT ;
226
+ else if (sz >= PUD_SIZE )
227
+ hugepage_shift = PUD_SHIFT ;
228
+ else if (sz >= PMD_SIZE )
229
+ hugepage_shift = PMD_SHIFT ;
230
+ else
231
+ hugepage_shift = PAGE_SHIFT ;
232
+
233
+ * pgsize = 1 << hugepage_shift ;
234
+
235
+ return sz >> hugepage_shift ;
236
+ }
237
+
215
238
/*
216
239
* When dealing with NAPOT mappings, the privileged specification indicates that
217
240
* "if an update needs to be made, the OS generally should first mark all of the
@@ -226,22 +249,10 @@ void set_huge_pte_at(struct mm_struct *mm,
226
249
pte_t pte ,
227
250
unsigned long sz )
228
251
{
229
- unsigned long hugepage_shift , pgsize ;
252
+ size_t pgsize ;
230
253
int i , pte_num ;
231
254
232
- if (sz >= PGDIR_SIZE )
233
- hugepage_shift = PGDIR_SHIFT ;
234
- else if (sz >= P4D_SIZE )
235
- hugepage_shift = P4D_SHIFT ;
236
- else if (sz >= PUD_SIZE )
237
- hugepage_shift = PUD_SHIFT ;
238
- else if (sz >= PMD_SIZE )
239
- hugepage_shift = PMD_SHIFT ;
240
- else
241
- hugepage_shift = PAGE_SHIFT ;
242
-
243
- pte_num = sz >> hugepage_shift ;
244
- pgsize = 1 << hugepage_shift ;
255
+ pte_num = num_contig_ptes_from_size (sz , & pgsize );
245
256
246
257
if (!pte_present (pte )) {
247
258
for (i = 0 ; i < pte_num ; i ++ , ptep ++ , addr += pgsize )
@@ -295,13 +306,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
295
306
unsigned long addr ,
296
307
pte_t * ptep , unsigned long sz )
297
308
{
309
+ size_t pgsize ;
298
310
pte_t orig_pte = ptep_get (ptep );
299
311
int pte_num ;
300
312
301
313
if (!pte_napot (orig_pte ))
302
314
return ptep_get_and_clear (mm , addr , ptep );
303
315
304
- pte_num = napot_pte_num ( napot_cont_order ( orig_pte ) );
316
+ pte_num = num_contig_ptes_from_size ( sz , & pgsize );
305
317
306
318
return get_clear_contig (mm , addr , ptep , pte_num );
307
319
}
@@ -351,6 +363,7 @@ void huge_pte_clear(struct mm_struct *mm,
351
363
pte_t * ptep ,
352
364
unsigned long sz )
353
365
{
366
+ size_t pgsize ;
354
367
pte_t pte = ptep_get (ptep );
355
368
int i , pte_num ;
356
369
@@ -359,8 +372,9 @@ void huge_pte_clear(struct mm_struct *mm,
359
372
return ;
360
373
}
361
374
362
- pte_num = napot_pte_num (napot_cont_order (pte ));
363
- for (i = 0 ; i < pte_num ; i ++ , addr += PAGE_SIZE , ptep ++ )
375
+ pte_num = num_contig_ptes_from_size (sz , & pgsize );
376
+
377
+ for (i = 0 ; i < pte_num ; i ++ , addr += pgsize , ptep ++ )
364
378
pte_clear (mm , addr , ptep );
365
379
}
366
380
0 commit comments