@@ -2394,8 +2394,14 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
2394
2394
}
2395
2395
2396
2396
out_err :
2397
+
2397
2398
iommu_completion_wait (iommu );
2398
2399
2400
+ if (FEATURE_NUM_INT_REMAP_SUP_2K (amd_iommu_efr2 ))
2401
+ dev_data -> max_irqs = MAX_IRQS_PER_TABLE_2K ;
2402
+ else
2403
+ dev_data -> max_irqs = MAX_IRQS_PER_TABLE_512 ;
2404
+
2399
2405
if (dev_is_pci (dev ))
2400
2406
pci_prepare_ats (to_pci_dev (dev ), PAGE_SHIFT );
2401
2407
@@ -3076,6 +3082,13 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
3076
3082
raw_spin_unlock_irqrestore (& iommu -> lock , flags );
3077
3083
}
3078
3084
3085
+ static inline u8 iommu_get_int_tablen (struct iommu_dev_data * dev_data )
3086
+ {
3087
+ if (dev_data && dev_data -> max_irqs == MAX_IRQS_PER_TABLE_2K )
3088
+ return DTE_INTTABLEN_2K ;
3089
+ return DTE_INTTABLEN_512 ;
3090
+ }
3091
+
3079
3092
static void set_dte_irq_entry (struct amd_iommu * iommu , u16 devid ,
3080
3093
struct irq_remap_table * table )
3081
3094
{
@@ -3090,7 +3103,7 @@ static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
3090
3103
new &= ~DTE_IRQ_PHYS_ADDR_MASK ;
3091
3104
new |= iommu_virt_to_phys (table -> table );
3092
3105
new |= DTE_IRQ_REMAP_INTCTL ;
3093
- new |= DTE_INTTABLEN_512 ;
3106
+ new |= iommu_get_int_tablen ( dev_data ) ;
3094
3107
new |= DTE_IRQ_REMAP_ENABLE ;
3095
3108
WRITE_ONCE (dte -> data [2 ], new );
3096
3109
@@ -3171,13 +3184,14 @@ static inline size_t get_irq_table_size(unsigned int max_irqs)
3171
3184
}
3172
3185
3173
3186
static struct irq_remap_table * alloc_irq_table (struct amd_iommu * iommu ,
3174
- u16 devid , struct pci_dev * pdev )
3187
+ u16 devid , struct pci_dev * pdev ,
3188
+ unsigned int max_irqs )
3175
3189
{
3176
3190
struct irq_remap_table * table = NULL ;
3177
3191
struct irq_remap_table * new_table = NULL ;
3178
3192
struct amd_iommu_pci_seg * pci_seg ;
3179
3193
unsigned long flags ;
3180
- int order = get_order (get_irq_table_size (MAX_IRQS_PER_TABLE ));
3194
+ int order = get_order (get_irq_table_size (max_irqs ));
3181
3195
int nid = iommu && iommu -> dev ? dev_to_node (& iommu -> dev -> dev ) : NUMA_NO_NODE ;
3182
3196
u16 alias ;
3183
3197
@@ -3239,13 +3253,14 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
3239
3253
}
3240
3254
3241
3255
static int alloc_irq_index (struct amd_iommu * iommu , u16 devid , int count ,
3242
- bool align , struct pci_dev * pdev )
3256
+ bool align , struct pci_dev * pdev ,
3257
+ unsigned long max_irqs )
3243
3258
{
3244
3259
struct irq_remap_table * table ;
3245
3260
int index , c , alignment = 1 ;
3246
3261
unsigned long flags ;
3247
3262
3248
- table = alloc_irq_table (iommu , devid , pdev );
3263
+ table = alloc_irq_table (iommu , devid , pdev , max_irqs );
3249
3264
if (!table )
3250
3265
return - ENODEV ;
3251
3266
@@ -3256,7 +3271,7 @@ static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
3256
3271
3257
3272
/* Scan table for free entries */
3258
3273
for (index = ALIGN (table -> min_index , alignment ), c = 0 ;
3259
- index < MAX_IRQS_PER_TABLE ;) {
3274
+ index < max_irqs ;) {
3260
3275
if (!iommu -> irte_ops -> is_allocated (table , index )) {
3261
3276
c += 1 ;
3262
3277
} else {
@@ -3526,6 +3541,14 @@ static void fill_msi_msg(struct msi_msg *msg, u32 index)
3526
3541
msg -> data = index ;
3527
3542
msg -> address_lo = 0 ;
3528
3543
msg -> arch_addr_lo .base_address = X86_MSI_BASE_ADDRESS_LOW ;
3544
+ /*
3545
+ * The struct msi_msg.dest_mode_logical is used to set the DM bit
3546
+ * in MSI Message Address Register. For device w/ 2K int-remap support,
3547
+ * this is bit must be set to 1 regardless of the actual destination
3548
+ * mode, which is signified by the IRTE[DM].
3549
+ */
3550
+ if (FEATURE_NUM_INT_REMAP_SUP_2K (amd_iommu_efr2 ))
3551
+ msg -> arch_addr_lo .dest_mode_logical = true;
3529
3552
msg -> address_hi = X86_MSI_BASE_ADDRESS_HIGH ;
3530
3553
}
3531
3554
@@ -3588,6 +3611,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3588
3611
struct amd_ir_data * data = NULL ;
3589
3612
struct amd_iommu * iommu ;
3590
3613
struct irq_cfg * cfg ;
3614
+ struct iommu_dev_data * dev_data ;
3615
+ unsigned long max_irqs ;
3591
3616
int i , ret , devid , seg , sbdf ;
3592
3617
int index ;
3593
3618
@@ -3606,14 +3631,17 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3606
3631
if (!iommu )
3607
3632
return - EINVAL ;
3608
3633
3634
+ dev_data = search_dev_data (iommu , devid );
3635
+ max_irqs = dev_data ? dev_data -> max_irqs : MAX_IRQS_PER_TABLE_512 ;
3636
+
3609
3637
ret = irq_domain_alloc_irqs_parent (domain , virq , nr_irqs , arg );
3610
3638
if (ret < 0 )
3611
3639
return ret ;
3612
3640
3613
3641
if (info -> type == X86_IRQ_ALLOC_TYPE_IOAPIC ) {
3614
3642
struct irq_remap_table * table ;
3615
3643
3616
- table = alloc_irq_table (iommu , devid , NULL );
3644
+ table = alloc_irq_table (iommu , devid , NULL , max_irqs );
3617
3645
if (table ) {
3618
3646
if (!table -> min_index ) {
3619
3647
/*
@@ -3634,9 +3662,11 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3634
3662
bool align = (info -> type == X86_IRQ_ALLOC_TYPE_PCI_MSI );
3635
3663
3636
3664
index = alloc_irq_index (iommu , devid , nr_irqs , align ,
3637
- msi_desc_to_pci_dev (info -> desc ));
3665
+ msi_desc_to_pci_dev (info -> desc ),
3666
+ max_irqs );
3638
3667
} else {
3639
- index = alloc_irq_index (iommu , devid , nr_irqs , false, NULL );
3668
+ index = alloc_irq_index (iommu , devid , nr_irqs , false, NULL ,
3669
+ max_irqs );
3640
3670
}
3641
3671
3642
3672
if (index < 0 ) {
0 commit comments