@@ -151,6 +151,26 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
151
151
return container_of (dom , struct protection_domain , domain );
152
152
}
153
153
154
+ static void amd_iommu_domain_get_pgtable (struct protection_domain * domain ,
155
+ struct domain_pgtable * pgtable )
156
+ {
157
+ u64 pt_root = atomic64_read (& domain -> pt_root );
158
+
159
+ pgtable -> root = (u64 * )(pt_root & PAGE_MASK );
160
+ pgtable -> mode = pt_root & 7 ; /* lowest 3 bits encode pgtable mode */
161
+ }
162
+
163
+ static u64 amd_iommu_domain_encode_pgtable (u64 * root , int mode )
164
+ {
165
+ u64 pt_root ;
166
+
167
+ /* lowest 3 bits encode pgtable mode */
168
+ pt_root = mode & 7 ;
169
+ pt_root |= (u64 )root ;
170
+
171
+ return pt_root ;
172
+ }
173
+
154
174
static struct iommu_dev_data * alloc_dev_data (u16 devid )
155
175
{
156
176
struct iommu_dev_data * dev_data ;
@@ -1397,13 +1417,18 @@ static struct page *free_sub_pt(unsigned long root, int mode,
1397
1417
1398
1418
static void free_pagetable (struct protection_domain * domain )
1399
1419
{
1400
- unsigned long root = ( unsigned long ) domain -> pt_root ;
1420
+ struct domain_pgtable pgtable ;
1401
1421
struct page * freelist = NULL ;
1422
+ unsigned long root ;
1423
+
1424
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
1425
+ atomic64_set (& domain -> pt_root , 0 );
1402
1426
1403
- BUG_ON (domain -> mode < PAGE_MODE_NONE ||
1404
- domain -> mode > PAGE_MODE_6_LEVEL );
1427
+ BUG_ON (pgtable . mode < PAGE_MODE_NONE ||
1428
+ pgtable . mode > PAGE_MODE_6_LEVEL );
1405
1429
1406
- freelist = free_sub_pt (root , domain -> mode , freelist );
1430
+ root = (unsigned long )pgtable .root ;
1431
+ freelist = free_sub_pt (root , pgtable .mode , freelist );
1407
1432
1408
1433
free_page_list (freelist );
1409
1434
}
@@ -1417,24 +1442,28 @@ static bool increase_address_space(struct protection_domain *domain,
1417
1442
unsigned long address ,
1418
1443
gfp_t gfp )
1419
1444
{
1445
+ struct domain_pgtable pgtable ;
1420
1446
unsigned long flags ;
1421
1447
bool ret = false;
1422
- u64 * pte ;
1448
+ u64 * pte , root ;
1423
1449
1424
1450
spin_lock_irqsave (& domain -> lock , flags );
1425
1451
1426
- if (address <= PM_LEVEL_SIZE (domain -> mode ) ||
1427
- WARN_ON_ONCE (domain -> mode == PAGE_MODE_6_LEVEL ))
1452
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
1453
+
1454
+ if (address <= PM_LEVEL_SIZE (pgtable .mode ) ||
1455
+ WARN_ON_ONCE (pgtable .mode == PAGE_MODE_6_LEVEL ))
1428
1456
goto out ;
1429
1457
1430
1458
pte = (void * )get_zeroed_page (gfp );
1431
1459
if (!pte )
1432
1460
goto out ;
1433
1461
1434
- * pte = PM_LEVEL_PDE (domain -> mode ,
1435
- iommu_virt_to_phys (domain -> pt_root ));
1436
- domain -> pt_root = pte ;
1437
- domain -> mode += 1 ;
1462
+ * pte = PM_LEVEL_PDE (pgtable .mode , iommu_virt_to_phys (pgtable .root ));
1463
+
1464
+ root = amd_iommu_domain_encode_pgtable (pte , pgtable .mode + 1 );
1465
+
1466
+ atomic64_set (& domain -> pt_root , root );
1438
1467
1439
1468
ret = true;
1440
1469
@@ -1451,16 +1480,22 @@ static u64 *alloc_pte(struct protection_domain *domain,
1451
1480
gfp_t gfp ,
1452
1481
bool * updated )
1453
1482
{
1483
+ struct domain_pgtable pgtable ;
1454
1484
int level , end_lvl ;
1455
1485
u64 * pte , * page ;
1456
1486
1457
1487
BUG_ON (!is_power_of_2 (page_size ));
1458
1488
1459
- while (address > PM_LEVEL_SIZE (domain -> mode ))
1489
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
1490
+
1491
+ while (address > PM_LEVEL_SIZE (pgtable .mode )) {
1460
1492
* updated = increase_address_space (domain , address , gfp ) || * updated ;
1493
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
1494
+ }
1495
+
1461
1496
1462
- level = domain -> mode - 1 ;
1463
- pte = & domain -> pt_root [PM_LEVEL_INDEX (level , address )];
1497
+ level = pgtable . mode - 1 ;
1498
+ pte = & pgtable . root [PM_LEVEL_INDEX (level , address )];
1464
1499
address = PAGE_SIZE_ALIGN (address , page_size );
1465
1500
end_lvl = PAGE_SIZE_LEVEL (page_size );
1466
1501
@@ -1536,16 +1571,19 @@ static u64 *fetch_pte(struct protection_domain *domain,
1536
1571
unsigned long address ,
1537
1572
unsigned long * page_size )
1538
1573
{
1574
+ struct domain_pgtable pgtable ;
1539
1575
int level ;
1540
1576
u64 * pte ;
1541
1577
1542
1578
* page_size = 0 ;
1543
1579
1544
- if (address > PM_LEVEL_SIZE (domain -> mode ))
1580
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
1581
+
1582
+ if (address > PM_LEVEL_SIZE (pgtable .mode ))
1545
1583
return NULL ;
1546
1584
1547
- level = domain -> mode - 1 ;
1548
- pte = & domain -> pt_root [PM_LEVEL_INDEX (level , address )];
1585
+ level = pgtable . mode - 1 ;
1586
+ pte = & pgtable . root [PM_LEVEL_INDEX (level , address )];
1549
1587
* page_size = PTE_LEVEL_PAGE_SIZE (level );
1550
1588
1551
1589
while (level > 0 ) {
@@ -1806,6 +1844,7 @@ static void dma_ops_domain_free(struct protection_domain *domain)
1806
1844
static struct protection_domain * dma_ops_domain_alloc (void )
1807
1845
{
1808
1846
struct protection_domain * domain ;
1847
+ u64 * pt_root , root ;
1809
1848
1810
1849
domain = kzalloc (sizeof (struct protection_domain ), GFP_KERNEL );
1811
1850
if (!domain )
@@ -1814,12 +1853,14 @@ static struct protection_domain *dma_ops_domain_alloc(void)
1814
1853
if (protection_domain_init (domain ))
1815
1854
goto free_domain ;
1816
1855
1817
- domain -> mode = PAGE_MODE_3_LEVEL ;
1818
- domain -> pt_root = (void * )get_zeroed_page (GFP_KERNEL );
1819
- domain -> flags = PD_DMA_OPS_MASK ;
1820
- if (!domain -> pt_root )
1856
+ pt_root = (void * )get_zeroed_page (GFP_KERNEL );
1857
+ if (!pt_root )
1821
1858
goto free_domain ;
1822
1859
1860
+ root = amd_iommu_domain_encode_pgtable (pt_root , PAGE_MODE_3_LEVEL );
1861
+ atomic64_set (& domain -> pt_root , root );
1862
+ domain -> flags = PD_DMA_OPS_MASK ;
1863
+
1823
1864
if (iommu_get_dma_cookie (& domain -> domain ) == - ENOMEM )
1824
1865
goto free_domain ;
1825
1866
@@ -1843,14 +1884,17 @@ static bool dma_ops_domain(struct protection_domain *domain)
1843
1884
static void set_dte_entry (u16 devid , struct protection_domain * domain ,
1844
1885
bool ats , bool ppr )
1845
1886
{
1887
+ struct domain_pgtable pgtable ;
1846
1888
u64 pte_root = 0 ;
1847
1889
u64 flags = 0 ;
1848
1890
u32 old_domid ;
1849
1891
1850
- if (domain -> mode != PAGE_MODE_NONE )
1851
- pte_root = iommu_virt_to_phys (domain -> pt_root );
1892
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
1893
+
1894
+ if (pgtable .mode != PAGE_MODE_NONE )
1895
+ pte_root = iommu_virt_to_phys (pgtable .root );
1852
1896
1853
- pte_root |= (domain -> mode & DEV_ENTRY_MODE_MASK )
1897
+ pte_root |= (pgtable . mode & DEV_ENTRY_MODE_MASK )
1854
1898
<< DEV_ENTRY_MODE_SHIFT ;
1855
1899
pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV ;
1856
1900
@@ -2375,20 +2419,23 @@ static struct protection_domain *protection_domain_alloc(void)
2375
2419
static struct iommu_domain * amd_iommu_domain_alloc (unsigned type )
2376
2420
{
2377
2421
struct protection_domain * pdomain ;
2422
+ u64 * pt_root , root ;
2378
2423
2379
2424
switch (type ) {
2380
2425
case IOMMU_DOMAIN_UNMANAGED :
2381
2426
pdomain = protection_domain_alloc ();
2382
2427
if (!pdomain )
2383
2428
return NULL ;
2384
2429
2385
- pdomain -> mode = PAGE_MODE_3_LEVEL ;
2386
- pdomain -> pt_root = (void * )get_zeroed_page (GFP_KERNEL );
2387
- if (!pdomain -> pt_root ) {
2430
+ pt_root = (void * )get_zeroed_page (GFP_KERNEL );
2431
+ if (!pt_root ) {
2388
2432
protection_domain_free (pdomain );
2389
2433
return NULL ;
2390
2434
}
2391
2435
2436
+ root = amd_iommu_domain_encode_pgtable (pt_root , PAGE_MODE_3_LEVEL );
2437
+ atomic64_set (& pdomain -> pt_root , root );
2438
+
2392
2439
pdomain -> domain .geometry .aperture_start = 0 ;
2393
2440
pdomain -> domain .geometry .aperture_end = ~0ULL ;
2394
2441
pdomain -> domain .geometry .force_aperture = true;
@@ -2406,7 +2453,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2406
2453
if (!pdomain )
2407
2454
return NULL ;
2408
2455
2409
- pdomain -> mode = PAGE_MODE_NONE ;
2456
+ atomic64_set ( & pdomain -> pt_root , PAGE_MODE_NONE ) ;
2410
2457
break ;
2411
2458
default :
2412
2459
return NULL ;
@@ -2418,6 +2465,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2418
2465
static void amd_iommu_domain_free (struct iommu_domain * dom )
2419
2466
{
2420
2467
struct protection_domain * domain ;
2468
+ struct domain_pgtable pgtable ;
2421
2469
2422
2470
domain = to_pdomain (dom );
2423
2471
@@ -2435,7 +2483,9 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
2435
2483
dma_ops_domain_free (domain );
2436
2484
break ;
2437
2485
default :
2438
- if (domain -> mode != PAGE_MODE_NONE )
2486
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
2487
+
2488
+ if (pgtable .mode != PAGE_MODE_NONE )
2439
2489
free_pagetable (domain );
2440
2490
2441
2491
if (domain -> flags & PD_IOMMUV2_MASK )
@@ -2518,10 +2568,12 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2518
2568
gfp_t gfp )
2519
2569
{
2520
2570
struct protection_domain * domain = to_pdomain (dom );
2571
+ struct domain_pgtable pgtable ;
2521
2572
int prot = 0 ;
2522
2573
int ret ;
2523
2574
2524
- if (domain -> mode == PAGE_MODE_NONE )
2575
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
2576
+ if (pgtable .mode == PAGE_MODE_NONE )
2525
2577
return - EINVAL ;
2526
2578
2527
2579
if (iommu_prot & IOMMU_READ )
@@ -2541,8 +2593,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2541
2593
struct iommu_iotlb_gather * gather )
2542
2594
{
2543
2595
struct protection_domain * domain = to_pdomain (dom );
2596
+ struct domain_pgtable pgtable ;
2544
2597
2545
- if (domain -> mode == PAGE_MODE_NONE )
2598
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
2599
+ if (pgtable .mode == PAGE_MODE_NONE )
2546
2600
return 0 ;
2547
2601
2548
2602
return iommu_unmap_page (domain , iova , page_size );
@@ -2553,9 +2607,11 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2553
2607
{
2554
2608
struct protection_domain * domain = to_pdomain (dom );
2555
2609
unsigned long offset_mask , pte_pgsize ;
2610
+ struct domain_pgtable pgtable ;
2556
2611
u64 * pte , __pte ;
2557
2612
2558
- if (domain -> mode == PAGE_MODE_NONE )
2613
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
2614
+ if (pgtable .mode == PAGE_MODE_NONE )
2559
2615
return iova ;
2560
2616
2561
2617
pte = fetch_pte (domain , iova , & pte_pgsize );
@@ -2708,16 +2764,26 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
2708
2764
void amd_iommu_domain_direct_map (struct iommu_domain * dom )
2709
2765
{
2710
2766
struct protection_domain * domain = to_pdomain (dom );
2767
+ struct domain_pgtable pgtable ;
2711
2768
unsigned long flags ;
2769
+ u64 pt_root ;
2712
2770
2713
2771
spin_lock_irqsave (& domain -> lock , flags );
2714
2772
2773
+ /* First save pgtable configuration*/
2774
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
2775
+
2715
2776
/* Update data structure */
2716
- domain -> mode = PAGE_MODE_NONE ;
2777
+ pt_root = amd_iommu_domain_encode_pgtable (NULL , PAGE_MODE_NONE );
2778
+ atomic64_set (& domain -> pt_root , pt_root );
2717
2779
2718
2780
/* Make changes visible to IOMMUs */
2719
2781
update_domain (domain );
2720
2782
2783
+ /* Restore old pgtable in domain->ptroot to free page-table */
2784
+ pt_root = amd_iommu_domain_encode_pgtable (pgtable .root , pgtable .mode );
2785
+ atomic64_set (& domain -> pt_root , pt_root );
2786
+
2721
2787
/* Page-table is not visible to IOMMU anymore, so free it */
2722
2788
free_pagetable (domain );
2723
2789
@@ -2908,9 +2974,11 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
2908
2974
static int __set_gcr3 (struct protection_domain * domain , int pasid ,
2909
2975
unsigned long cr3 )
2910
2976
{
2977
+ struct domain_pgtable pgtable ;
2911
2978
u64 * pte ;
2912
2979
2913
- if (domain -> mode != PAGE_MODE_NONE )
2980
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
2981
+ if (pgtable .mode != PAGE_MODE_NONE )
2914
2982
return - EINVAL ;
2915
2983
2916
2984
pte = __get_gcr3_pte (domain -> gcr3_tbl , domain -> glx , pasid , true);
@@ -2924,9 +2992,11 @@ static int __set_gcr3(struct protection_domain *domain, int pasid,
2924
2992
2925
2993
static int __clear_gcr3 (struct protection_domain * domain , int pasid )
2926
2994
{
2995
+ struct domain_pgtable pgtable ;
2927
2996
u64 * pte ;
2928
2997
2929
- if (domain -> mode != PAGE_MODE_NONE )
2998
+ amd_iommu_domain_get_pgtable (domain , & pgtable );
2999
+ if (pgtable .mode != PAGE_MODE_NONE )
2930
3000
return - EINVAL ;
2931
3001
2932
3002
pte = __get_gcr3_pte (domain -> gcr3_tbl , domain -> glx , pasid , false);
0 commit comments