@@ -77,7 +77,6 @@ struct iommu_cmd {
77
77
struct kmem_cache * amd_iommu_irq_cache ;
78
78
79
79
static void detach_device (struct device * dev );
80
- static int domain_enable_v2 (struct protection_domain * domain , int pasids );
81
80
82
81
/****************************************************************************
83
82
*
@@ -1575,6 +1574,42 @@ static void free_gcr3_table(struct protection_domain *domain)
1575
1574
free_page ((unsigned long )domain -> gcr3_tbl );
1576
1575
}
1577
1576
1577
+ /*
1578
+ * Number of GCR3 table levels required. Level must be 4-Kbyte
1579
+ * page and can contain up to 512 entries.
1580
+ */
1581
+ static int get_gcr3_levels (int pasids )
1582
+ {
1583
+ int levels ;
1584
+
1585
+ if (pasids == -1 )
1586
+ return amd_iommu_max_glx_val ;
1587
+
1588
+ levels = get_count_order (pasids );
1589
+
1590
+ return levels ? (DIV_ROUND_UP (levels , 9 ) - 1 ) : levels ;
1591
+ }
1592
+
1593
+ /* Note: This function expects iommu_domain->lock to be held prior calling the function. */
1594
+ static int setup_gcr3_table (struct protection_domain * domain , int pasids )
1595
+ {
1596
+ int levels = get_gcr3_levels (pasids );
1597
+
1598
+ if (levels > amd_iommu_max_glx_val )
1599
+ return - EINVAL ;
1600
+
1601
+ domain -> gcr3_tbl = alloc_pgtable_page (domain -> nid , GFP_ATOMIC );
1602
+ if (domain -> gcr3_tbl == NULL )
1603
+ return - ENOMEM ;
1604
+
1605
+ domain -> glx = levels ;
1606
+ domain -> flags |= PD_IOMMUV2_MASK ;
1607
+
1608
+ amd_iommu_domain_update (domain );
1609
+
1610
+ return 0 ;
1611
+ }
1612
+
1578
1613
static void set_dte_entry (struct amd_iommu * iommu , u16 devid ,
1579
1614
struct protection_domain * domain , bool ats , bool ppr )
1580
1615
{
@@ -2065,7 +2100,7 @@ static int protection_domain_init_v2(struct protection_domain *domain)
2065
2100
2066
2101
domain -> domain .pgsize_bitmap = AMD_IOMMU_PGSIZES_V2 ;
2067
2102
2068
- if (domain_enable_v2 (domain , 1 )) {
2103
+ if (setup_gcr3_table (domain , 1 )) {
2069
2104
domain_id_free (domain -> id );
2070
2105
return - ENOMEM ;
2071
2106
}
@@ -2514,30 +2549,6 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
2514
2549
}
2515
2550
EXPORT_SYMBOL (amd_iommu_domain_direct_map );
2516
2551
2517
- /* Note: This function expects iommu_domain->lock to be held prior calling the function. */
2518
- static int domain_enable_v2 (struct protection_domain * domain , int pasids )
2519
- {
2520
- int levels ;
2521
-
2522
- /* Number of GCR3 table levels required */
2523
- for (levels = 0 ; (pasids - 1 ) & ~0x1ff ; pasids >>= 9 )
2524
- levels += 1 ;
2525
-
2526
- if (levels > amd_iommu_max_glx_val )
2527
- return - EINVAL ;
2528
-
2529
- domain -> gcr3_tbl = (void * )get_zeroed_page (GFP_ATOMIC );
2530
- if (domain -> gcr3_tbl == NULL )
2531
- return - ENOMEM ;
2532
-
2533
- domain -> glx = levels ;
2534
- domain -> flags |= PD_IOMMUV2_MASK ;
2535
-
2536
- amd_iommu_domain_update (domain );
2537
-
2538
- return 0 ;
2539
- }
2540
-
2541
2552
int amd_iommu_domain_enable_v2 (struct iommu_domain * dom , int pasids )
2542
2553
{
2543
2554
struct protection_domain * pdom = to_pdomain (dom );
@@ -2556,7 +2567,7 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
2556
2567
goto out ;
2557
2568
2558
2569
if (!pdom -> gcr3_tbl )
2559
- ret = domain_enable_v2 (pdom , pasids );
2570
+ ret = setup_gcr3_table (pdom , pasids );
2560
2571
2561
2572
out :
2562
2573
spin_unlock_irqrestore (& pdom -> lock , flags );
0 commit comments