@@ -1303,35 +1303,30 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
1303
1303
know the hardware page-walk will no longer touch them.
1304
1304
The 'pte' argument is the *parent* PTE, pointing to the page that is to
1305
1305
be freed. */
1306
- static struct page * dma_pte_list_pagetables (struct dmar_domain * domain ,
1307
- int level , struct dma_pte * pte ,
1308
- struct page * freelist )
1306
+ static void dma_pte_list_pagetables (struct dmar_domain * domain ,
1307
+ int level , struct dma_pte * pte ,
1308
+ struct list_head * freelist )
1309
1309
{
1310
1310
struct page * pg ;
1311
1311
1312
1312
pg = pfn_to_page (dma_pte_addr (pte ) >> PAGE_SHIFT );
1313
- pg -> freelist = freelist ;
1314
- freelist = pg ;
1313
+ list_add_tail (& pg -> lru , freelist );
1315
1314
1316
1315
if (level == 1 )
1317
- return freelist ;
1316
+ return ;
1318
1317
1319
1318
pte = page_address (pg );
1320
1319
do {
1321
1320
if (dma_pte_present (pte ) && !dma_pte_superpage (pte ))
1322
- freelist = dma_pte_list_pagetables (domain , level - 1 ,
1323
- pte , freelist );
1321
+ dma_pte_list_pagetables (domain , level - 1 , pte , freelist );
1324
1322
pte ++ ;
1325
1323
} while (!first_pte_in_page (pte ));
1326
-
1327
- return freelist ;
1328
1324
}
1329
1325
1330
- static struct page * dma_pte_clear_level (struct dmar_domain * domain , int level ,
1331
- struct dma_pte * pte , unsigned long pfn ,
1332
- unsigned long start_pfn ,
1333
- unsigned long last_pfn ,
1334
- struct page * freelist )
1326
+ static void dma_pte_clear_level (struct dmar_domain * domain , int level ,
1327
+ struct dma_pte * pte , unsigned long pfn ,
1328
+ unsigned long start_pfn , unsigned long last_pfn ,
1329
+ struct list_head * freelist )
1335
1330
{
1336
1331
struct dma_pte * first_pte = NULL , * last_pte = NULL ;
1337
1332
@@ -1350,18 +1345,18 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1350
1345
/* These suborbinate page tables are going away entirely. Don't
1351
1346
bother to clear them; we're just going to *free* them. */
1352
1347
if (level > 1 && !dma_pte_superpage (pte ))
1353
- freelist = dma_pte_list_pagetables (domain , level - 1 , pte , freelist );
1348
+ dma_pte_list_pagetables (domain , level - 1 , pte , freelist );
1354
1349
1355
1350
dma_clear_pte (pte );
1356
1351
if (!first_pte )
1357
1352
first_pte = pte ;
1358
1353
last_pte = pte ;
1359
1354
} else if (level > 1 ) {
1360
1355
/* Recurse down into a level that isn't *entirely* obsolete */
1361
- freelist = dma_pte_clear_level (domain , level - 1 ,
1362
- phys_to_virt (dma_pte_addr (pte )),
1363
- level_pfn , start_pfn , last_pfn ,
1364
- freelist );
1356
+ dma_pte_clear_level (domain , level - 1 ,
1357
+ phys_to_virt (dma_pte_addr (pte )),
1358
+ level_pfn , start_pfn , last_pfn ,
1359
+ freelist );
1365
1360
}
1366
1361
next :
1367
1362
pfn = level_pfn + level_size (level );
@@ -1370,47 +1365,28 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1370
1365
if (first_pte )
1371
1366
domain_flush_cache (domain , first_pte ,
1372
1367
(void * )++ last_pte - (void * )first_pte );
1373
-
1374
- return freelist ;
1375
1368
}
1376
1369
1377
1370
/* We can't just free the pages because the IOMMU may still be walking
1378
1371
the page tables, and may have cached the intermediate levels. The
1379
1372
pages can only be freed after the IOTLB flush has been done. */
1380
- static struct page * domain_unmap (struct dmar_domain * domain ,
1381
- unsigned long start_pfn ,
1382
- unsigned long last_pfn ,
1383
- struct page * freelist )
1373
+ static void domain_unmap (struct dmar_domain * domain , unsigned long start_pfn ,
1374
+ unsigned long last_pfn , struct list_head * freelist )
1384
1375
{
1385
1376
BUG_ON (!domain_pfn_supported (domain , start_pfn ));
1386
1377
BUG_ON (!domain_pfn_supported (domain , last_pfn ));
1387
1378
BUG_ON (start_pfn > last_pfn );
1388
1379
1389
1380
/* we don't need lock here; nobody else touches the iova range */
1390
- freelist = dma_pte_clear_level (domain , agaw_to_level (domain -> agaw ),
1391
- domain -> pgd , 0 , start_pfn , last_pfn ,
1392
- freelist );
1381
+ dma_pte_clear_level (domain , agaw_to_level (domain -> agaw ),
1382
+ domain -> pgd , 0 , start_pfn , last_pfn , freelist );
1393
1383
1394
1384
/* free pgd */
1395
1385
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN (domain -> gaw )) {
1396
1386
struct page * pgd_page = virt_to_page (domain -> pgd );
1397
- pgd_page -> freelist = freelist ;
1398
- freelist = pgd_page ;
1399
-
1387
+ list_add_tail (& pgd_page -> lru , freelist );
1400
1388
domain -> pgd = NULL ;
1401
1389
}
1402
-
1403
- return freelist ;
1404
- }
1405
-
1406
- static void dma_free_pagelist (struct page * freelist )
1407
- {
1408
- struct page * pg ;
1409
-
1410
- while ((pg = freelist )) {
1411
- freelist = pg -> freelist ;
1412
- free_pgtable_page (page_address (pg ));
1413
- }
1414
1390
}
1415
1391
1416
1392
/* iommu handling */
@@ -2095,11 +2071,10 @@ static void domain_exit(struct dmar_domain *domain)
2095
2071
domain_remove_dev_info (domain );
2096
2072
2097
2073
if (domain -> pgd ) {
2098
- struct page * freelist ;
2074
+ LIST_HEAD ( freelist ) ;
2099
2075
2100
- freelist = domain_unmap (domain , 0 ,
2101
- DOMAIN_MAX_PFN (domain -> gaw ), NULL );
2102
- dma_free_pagelist (freelist );
2076
+ domain_unmap (domain , 0 , DOMAIN_MAX_PFN (domain -> gaw ), & freelist );
2077
+ put_pages_list (& freelist );
2103
2078
}
2104
2079
2105
2080
free_domain_mem (domain );
@@ -4192,19 +4167,17 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
4192
4167
{
4193
4168
struct dmar_drhd_unit * drhd ;
4194
4169
struct intel_iommu * iommu ;
4195
- struct page * freelist ;
4170
+ LIST_HEAD ( freelist ) ;
4196
4171
4197
- freelist = domain_unmap (si_domain ,
4198
- start_vpfn , last_vpfn ,
4199
- NULL );
4172
+ domain_unmap (si_domain , start_vpfn , last_vpfn , & freelist );
4200
4173
4201
4174
rcu_read_lock ();
4202
4175
for_each_active_iommu (iommu , drhd )
4203
4176
iommu_flush_iotlb_psi (iommu , si_domain ,
4204
4177
start_vpfn , mhp -> nr_pages ,
4205
- ! freelist , 0 );
4178
+ list_empty ( & freelist ) , 0 );
4206
4179
rcu_read_unlock ();
4207
- dma_free_pagelist ( freelist );
4180
+ put_pages_list ( & freelist );
4208
4181
}
4209
4182
break ;
4210
4183
}
@@ -5211,8 +5184,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
5211
5184
start_pfn = iova >> VTD_PAGE_SHIFT ;
5212
5185
last_pfn = (iova + size - 1 ) >> VTD_PAGE_SHIFT ;
5213
5186
5214
- gather -> freelist = domain_unmap (dmar_domain , start_pfn ,
5215
- last_pfn , gather -> freelist );
5187
+ domain_unmap (dmar_domain , start_pfn , last_pfn , & gather -> freelist );
5216
5188
5217
5189
if (dmar_domain -> max_addr == iova + size )
5218
5190
dmar_domain -> max_addr = iova ;
@@ -5248,9 +5220,10 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
5248
5220
5249
5221
for_each_domain_iommu (iommu_id , dmar_domain )
5250
5222
iommu_flush_iotlb_psi (g_iommus [iommu_id ], dmar_domain ,
5251
- start_pfn , nrpages , !gather -> freelist , 0 );
5223
+ start_pfn , nrpages ,
5224
+ list_empty (& gather -> freelist ), 0 );
5252
5225
5253
- dma_free_pagelist ( gather -> freelist );
5226
+ put_pages_list ( & gather -> freelist );
5254
5227
}
5255
5228
5256
5229
static phys_addr_t intel_iommu_iova_to_phys (struct iommu_domain * domain ,
0 commit comments