Skip to content

Commit 8b3f787

Browse files
ssuthiku-amdjoergroedel
authored andcommitted
iommu/amd: Introduce helper function to update 256-bit DTE
The current implementation does not follow 128-bit write requirement to update DTE as specified in the AMD I/O Virtualization Techonology (IOMMU) Specification. Therefore, modify the struct dev_table_entry to contain union of u128 data array, and introduce a helper functions update_dte256() to update DTE using two 128-bit cmpxchg operations to update 256-bit DTE with the modified structure, and take into account the DTE[V, GV] bits when programming the DTE to ensure proper order of DTE programming and flushing. In addition, introduce a per-DTE spin_lock struct dev_data.dte_lock to provide synchronization when updating the DTE to prevent cmpxchg128 failure. Suggested-by: Jason Gunthorpe <[email protected]> Suggested-by: Uros Bizjak <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Reviewed-by: Uros Bizjak <[email protected]> Signed-off-by: Suravee Suthikulpanit <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 7bea695 commit 8b3f787

File tree

2 files changed

+132
-1
lines changed

2 files changed

+132
-1
lines changed

drivers/iommu/amd/amd_iommu_types.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -427,9 +427,13 @@
427427
#define DTE_GCR3_SHIFT_C 43
428428

429429
#define DTE_GPT_LEVEL_SHIFT 54
430+
#define DTE_GPT_LEVEL_MASK GENMASK_ULL(55, 54)
430431

431432
#define GCR3_VALID 0x01ULL
432433

434+
/* DTE[128:179] | DTE[184:191] */
435+
#define DTE_DATA2_INTR_MASK ~GENMASK_ULL(55, 52)
436+
433437
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
434438
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
435439
#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD)
@@ -842,6 +846,7 @@ struct devid_map {
842846
struct iommu_dev_data {
843847
/*Protect against attach/detach races */
844848
struct mutex mutex;
849+
spinlock_t dte_lock; /* DTE lock for 256-bit access */
845850

846851
struct list_head list; /* For domain->dev_list */
847852
struct llist_node dev_data_list; /* For global dev_data_list */
@@ -886,7 +891,10 @@ extern struct list_head amd_iommu_list;
886891
* Structure defining one entry in the device table
887892
*/
888893
struct dev_table_entry {
889-
u64 data[4];
894+
union {
895+
u64 data[4];
896+
u128 data128[2];
897+
};
890898
};
891899

892900
/*

drivers/iommu/amd/iommu.c

Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,12 +83,125 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
8383
static void set_dte_entry(struct amd_iommu *iommu,
8484
struct iommu_dev_data *dev_data);
8585

86+
static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
87+
8688
/****************************************************************************
8789
*
8890
* Helper functions
8991
*
9092
****************************************************************************/
9193

94+
static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val)
95+
{
96+
/*
97+
* Note:
98+
* We use arch_cmpxchg128_local() because:
99+
* - Need cmpxchg16b instruction mainly for 128-bit store to DTE
100+
* (not necessary for cmpxchg since this function is already
101+
* protected by a spin_lock for this DTE).
102+
* - Neither need LOCK_PREFIX nor try loop because of the spin_lock.
103+
*/
104+
arch_cmpxchg128_local(ptr, *ptr, val);
105+
}
106+
107+
static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new)
108+
{
109+
struct dev_table_entry old;
110+
111+
old.data128[1] = ptr->data128[1];
112+
/*
113+
* Preserve DTE_DATA2_INTR_MASK. This needs to be
114+
* done here since it requires to be inside
115+
* spin_lock(&dev_data->dte_lock) context.
116+
*/
117+
new->data[2] &= ~DTE_DATA2_INTR_MASK;
118+
new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK;
119+
120+
amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]);
121+
}
122+
123+
static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new)
124+
{
125+
amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]);
126+
}
127+
128+
/*
129+
* Note:
130+
* IOMMU reads the entire Device Table entry in a single 256-bit transaction
131+
* but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver
132+
* need to ensure the following:
133+
* - DTE[V|GV] bit is being written last when setting.
134+
* - DTE[V|GV] bit is being written first when clearing.
135+
*
136+
* This function is used only by code, which updates DMA translation part of the DTE.
137+
* So, only consider control bits related to DMA when updating the entry.
138+
*/
139+
static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
140+
struct dev_table_entry *new)
141+
{
142+
unsigned long flags;
143+
struct dev_table_entry *dev_table = get_dev_table(iommu);
144+
struct dev_table_entry *ptr = &dev_table[dev_data->devid];
145+
146+
spin_lock_irqsave(&dev_data->dte_lock, flags);
147+
148+
if (!(ptr->data[0] & DTE_FLAG_V)) {
149+
/* Existing DTE is not valid. */
150+
write_dte_upper128(ptr, new);
151+
write_dte_lower128(ptr, new);
152+
iommu_flush_dte_sync(iommu, dev_data->devid);
153+
} else if (!(new->data[0] & DTE_FLAG_V)) {
154+
/* Existing DTE is valid. New DTE is not valid. */
155+
write_dte_lower128(ptr, new);
156+
write_dte_upper128(ptr, new);
157+
iommu_flush_dte_sync(iommu, dev_data->devid);
158+
} else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
159+
/*
160+
* Both DTEs are valid.
161+
* Existing DTE has no guest page table.
162+
*/
163+
write_dte_upper128(ptr, new);
164+
write_dte_lower128(ptr, new);
165+
iommu_flush_dte_sync(iommu, dev_data->devid);
166+
} else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
167+
/*
168+
* Both DTEs are valid.
169+
* Existing DTE has guest page table,
170+
* new DTE has no guest page table,
171+
*/
172+
write_dte_lower128(ptr, new);
173+
write_dte_upper128(ptr, new);
174+
iommu_flush_dte_sync(iommu, dev_data->devid);
175+
} else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) !=
176+
FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) {
177+
/*
178+
* Both DTEs are valid and have guest page table,
179+
* but have different number of levels. So, we need
180+
* to upadte both upper and lower 128-bit value, which
181+
* require disabling and flushing.
182+
*/
183+
struct dev_table_entry clear = {};
184+
185+
/* First disable DTE */
186+
write_dte_lower128(ptr, &clear);
187+
iommu_flush_dte_sync(iommu, dev_data->devid);
188+
189+
/* Then update DTE */
190+
write_dte_upper128(ptr, new);
191+
write_dte_lower128(ptr, new);
192+
iommu_flush_dte_sync(iommu, dev_data->devid);
193+
} else {
194+
/*
195+
* Both DTEs are valid and have guest page table,
196+
* and same number of levels. We just need to only
197+
* update the lower 128-bit. So no need to disable DTE.
198+
*/
199+
write_dte_lower128(ptr, new);
200+
}
201+
202+
spin_unlock_irqrestore(&dev_data->dte_lock, flags);
203+
}
204+
92205
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
93206
{
94207
return (pdom && (pdom->pd_mode == PD_MODE_V2));
@@ -209,6 +322,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
209322
return NULL;
210323

211324
mutex_init(&dev_data->mutex);
325+
spin_lock_init(&dev_data->dte_lock);
212326
dev_data->devid = devid;
213327
ratelimit_default_init(&dev_data->rs);
214328

@@ -1261,6 +1375,15 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
12611375
return iommu_queue_command(iommu, &cmd);
12621376
}
12631377

1378+
static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid)
1379+
{
1380+
int ret;
1381+
1382+
ret = iommu_flush_dte(iommu, devid);
1383+
if (!ret)
1384+
iommu_completion_wait(iommu);
1385+
}
1386+
12641387
static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
12651388
{
12661389
u32 devid;

0 commit comments

Comments
 (0)