|
220 | 220 | #define DEV_ENTRY_EX 0x67 |
221 | 221 | #define DEV_ENTRY_SYSMGT1 0x68 |
222 | 222 | #define DEV_ENTRY_SYSMGT2 0x69 |
| 223 | +#define DTE_DATA1_SYSMGT_MASK GENMASK_ULL(41, 40) |
| 224 | + |
223 | 225 | #define DEV_ENTRY_IRQ_TBL_EN 0x80 |
224 | 226 | #define DEV_ENTRY_INIT_PASS 0xb8 |
225 | 227 | #define DEV_ENTRY_EINT_PASS 0xb9 |
|
407 | 409 | #define DTE_FLAG_HAD (3ULL << 7) |
408 | 410 | #define DTE_FLAG_GIOV BIT_ULL(54) |
409 | 411 | #define DTE_FLAG_GV BIT_ULL(55) |
410 | | -#define DTE_GLX_SHIFT (56) |
411 | | -#define DTE_GLX_MASK (3) |
| 412 | +#define DTE_GLX GENMASK_ULL(57, 56) |
412 | 413 | #define DTE_FLAG_IR BIT_ULL(61) |
413 | 414 | #define DTE_FLAG_IW BIT_ULL(62) |
414 | 415 |
|
415 | 416 | #define DTE_FLAG_IOTLB BIT_ULL(32) |
416 | 417 | #define DTE_FLAG_MASK (0x3ffULL << 32) |
417 | 418 | #define DEV_DOMID_MASK 0xffffULL |
418 | 419 |
|
419 | | -#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) |
420 | | -#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) |
421 | | -#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) |
422 | | - |
423 | | -#define DTE_GCR3_SHIFT_A 58 |
424 | | -#define DTE_GCR3_SHIFT_B 16 |
425 | | -#define DTE_GCR3_SHIFT_C 43 |
| 420 | +#define DTE_GCR3_14_12 GENMASK_ULL(60, 58) |
| 421 | +#define DTE_GCR3_30_15 GENMASK_ULL(31, 16) |
| 422 | +#define DTE_GCR3_51_31 GENMASK_ULL(63, 43) |
426 | 423 |
|
427 | 424 | #define DTE_GPT_LEVEL_SHIFT 54 |
| 425 | +#define DTE_GPT_LEVEL_MASK GENMASK_ULL(55, 54) |
428 | 426 |
|
429 | 427 | #define GCR3_VALID 0x01ULL |
430 | 428 |
|
| 429 | +/* DTE[128:179] | DTE[184:191] */ |
| 430 | +#define DTE_DATA2_INTR_MASK ~GENMASK_ULL(55, 52) |
| 431 | + |
431 | 432 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) |
432 | 433 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) |
433 | 434 | #define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD) |
@@ -468,7 +469,7 @@ extern bool amd_iommu_dump; |
468 | 469 | #define DUMP_printk(format, arg...) \ |
469 | 470 | do { \ |
470 | 471 | if (amd_iommu_dump) \ |
471 | | - pr_info("AMD-Vi: " format, ## arg); \ |
| 472 | + pr_info(format, ## arg); \ |
472 | 473 | } while(0); |
473 | 474 |
|
474 | 475 | /* global flag if IOMMUs cache non-present entries */ |
@@ -516,6 +517,9 @@ extern struct kmem_cache *amd_iommu_irq_cache; |
516 | 517 | #define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \ |
517 | 518 | list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list) |
518 | 519 |
|
| 520 | +#define for_each_ivhd_dte_flags(entry) \ |
| 521 | + list_for_each_entry((entry), &amd_ivhd_dev_flags_list, list) |
| 522 | + |
519 | 523 | struct amd_iommu; |
520 | 524 | struct iommu_domain; |
521 | 525 | struct irq_domain; |
@@ -837,6 +841,7 @@ struct devid_map { |
837 | 841 | struct iommu_dev_data { |
838 | 842 | /*Protect against attach/detach races */ |
839 | 843 | struct mutex mutex; |
| 844 | + spinlock_t dte_lock; /* DTE lock for 256-bit access */ |
840 | 845 |
|
841 | 846 | struct list_head list; /* For domain->dev_list */ |
842 | 847 | struct llist_node dev_data_list; /* For global dev_data_list */ |
@@ -881,7 +886,21 @@ extern struct list_head amd_iommu_list; |
881 | 886 | * Structure defining one entry in the device table |
882 | 887 | */ |
883 | 888 | struct dev_table_entry { |
884 | | - u64 data[4]; |
| 889 | + union { |
| 890 | + u64 data[4]; |
| 891 | + u128 data128[2]; |
| 892 | + }; |
| 893 | +}; |
| 894 | + |
| 895 | +/* |
| 896 | + * Structure to sture persistent DTE flags from IVHD |
| 897 | + */ |
| 898 | +struct ivhd_dte_flags { |
| 899 | + struct list_head list; |
| 900 | + u16 segid; |
| 901 | + u16 devid_first; |
| 902 | + u16 devid_last; |
| 903 | + struct dev_table_entry dte; |
885 | 904 | }; |
886 | 905 |
|
887 | 906 | /* |
|
0 commit comments