Skip to content

Commit 796aec4

Browse files
committed
Merge tag 'idxd-for-linus-may2024' of git bundle from Arjan
Pull DSA and IAA accelerator mis-alignment fix from Arjan van de Ven: "The DSA (memory copy/zero/etc) and IAA (compression) accelerators in the Sapphire Rapids and Emerald Rapids SOCs turn out to have a bug that has security implications. Both of these accelerators work by the application submitting a 64 byte command to the device; this command contains an opcode as well as the virtual address of the return value that the device will update on completion... and a set of opcode specific values. In a typical scenario a ring 3 application mmaps the device file and uses the ENQCMD or MOVDIR64 instructions (which are variations of a 64 byte atomic write) on this mmap'd memory region to directly submit commands to a device hardware. The return value as specified in the command, is supposed to be 32 (or 64) bytes aligned in memory, and generally the hardware checks and enforces this alignment. However in testing it has been found that there are conditions (controlled by the submitter) where this enforcement does not happen... which makes it possible for the return value to span a page boundary. And this is where it goes wrong - the accelerators will perform the virtual to physical address lookup on the first of the two pages, but end up continue writing to the next consecutive physical (host) page rather than the consecutive virtual page. In addition, the device will end up in a hung state on such unaligned write of the return value. This patch series has the proposed software side solution consisting of three parts: - Don't allow these two PCI devices to be assigned to VM guests (we cannot trust a VM guest to behave correctly and not cause this condition) - Don't allow ring 3 applications to set up the mmap unless they have CAP_SYS_RAWIO permissions. This makes it no longer possible for non-root applications to directly submit commands to the accelerator - Add a write() method to the device so that an application can submit its commands to the kernel driver, which performs the needed sanity checks before submitting it to the hardware. This switch from mmap to write is an incompatible interface change to non-root userspace, but we have not found a way to avoid this. All software we know of uses a small set of accessor libraries for these accelerators, for which libqpl and libdml (on github) are the most common. As part of the security release, updated versions of these libraries will be released that transparently fall back to write(). Intel has assigned CVE-2024-21823 to this hardware issue" * tag 'idxd-for-linus-may2024' of git bundle from Arjan: dmaengine: idxd: add a write() method for applications to submit work dmaengine: idxd: add a new security check to deal with a hardware erratum VFIO: Add the SPR_DSA and SPR_IAX devices to the denylist
2 parents a5131c3 + 6827738 commit 796aec4

File tree

7 files changed

+113
-5
lines changed

7 files changed

+113
-5
lines changed

drivers/dma/idxd/cdev.c

Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -400,6 +400,18 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
400400
int rc;
401401

402402
dev_dbg(&pdev->dev, "%s called\n", __func__);
403+
404+
/*
405+
* Due to an erratum in some of the devices supported by the driver,
406+
* direct user submission to the device can be unsafe.
407+
* (See the INTEL-SA-01084 security advisory)
408+
*
409+
* For the devices that exhibit this behavior, require that the user
410+
* has CAP_SYS_RAWIO capabilities.
411+
*/
412+
if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
413+
return -EPERM;
414+
403415
rc = check_vma(wq, vma, __func__);
404416
if (rc < 0)
405417
return rc;
@@ -414,6 +426,70 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
414426
vma->vm_page_prot);
415427
}
416428

429+
static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
430+
struct dsa_hw_desc __user *udesc)
431+
{
432+
struct idxd_wq *wq = ctx->wq;
433+
struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev;
434+
const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40;
435+
void __iomem *portal = idxd_wq_portal_addr(wq);
436+
struct dsa_hw_desc descriptor __aligned(64);
437+
int rc;
438+
439+
rc = copy_from_user(&descriptor, udesc, sizeof(descriptor));
440+
if (rc)
441+
return -EFAULT;
442+
443+
/*
444+
* DSA devices are capable of indirect ("batch") command submission.
445+
* On devices where direct user submissions are not safe, we cannot
446+
* allow this since there is no good way for us to verify these
447+
* indirect commands.
448+
*/
449+
if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
450+
!wq->idxd->user_submission_safe)
451+
return -EINVAL;
452+
/*
453+
* As per the programming specification, the completion address must be
454+
* aligned to 32 or 64 bytes. If this is violated the hardware
455+
* engine can get very confused (security issue).
456+
*/
457+
if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align))
458+
return -EINVAL;
459+
460+
if (wq_dedicated(wq))
461+
iosubmit_cmds512(portal, &descriptor, 1);
462+
else {
463+
descriptor.priv = 0;
464+
descriptor.pasid = ctx->pasid;
465+
rc = idxd_enqcmds(wq, portal, &descriptor);
466+
if (rc < 0)
467+
return rc;
468+
}
469+
470+
return 0;
471+
}
472+
473+
static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len,
474+
loff_t *unused)
475+
{
476+
struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf;
477+
struct idxd_user_context *ctx = filp->private_data;
478+
ssize_t written = 0;
479+
int i;
480+
481+
for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
482+
int rc = idxd_submit_user_descriptor(ctx, udesc + i);
483+
484+
if (rc)
485+
return written ? written : rc;
486+
487+
written += sizeof(struct dsa_hw_desc);
488+
}
489+
490+
return written;
491+
}
492+
417493
static __poll_t idxd_cdev_poll(struct file *filp,
418494
struct poll_table_struct *wait)
419495
{
@@ -436,6 +512,7 @@ static const struct file_operations idxd_cdev_fops = {
436512
.open = idxd_cdev_open,
437513
.release = idxd_cdev_release,
438514
.mmap = idxd_cdev_mmap,
515+
.write = idxd_cdev_write,
439516
.poll = idxd_cdev_poll,
440517
};
441518

drivers/dma/idxd/idxd.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,7 @@ struct idxd_driver_data {
288288
int evl_cr_off;
289289
int cr_status_off;
290290
int cr_result_off;
291+
bool user_submission_safe;
291292
load_device_defaults_fn_t load_device_defaults;
292293
};
293294

@@ -374,6 +375,8 @@ struct idxd_device {
374375

375376
struct dentry *dbgfs_dir;
376377
struct dentry *dbgfs_evl_file;
378+
379+
bool user_submission_safe;
377380
};
378381

379382
static inline unsigned int evl_ent_size(struct idxd_device *idxd)

drivers/dma/idxd/init.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
4747
.align = 32,
4848
.dev_type = &dsa_device_type,
4949
.evl_cr_off = offsetof(struct dsa_evl_entry, cr),
50+
.user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
5051
.cr_status_off = offsetof(struct dsa_completion_record, status),
5152
.cr_result_off = offsetof(struct dsa_completion_record, result),
5253
},
@@ -57,6 +58,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
5758
.align = 64,
5859
.dev_type = &iax_device_type,
5960
.evl_cr_off = offsetof(struct iax_evl_entry, cr),
61+
.user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
6062
.cr_status_off = offsetof(struct iax_completion_record, status),
6163
.cr_result_off = offsetof(struct iax_completion_record, error_code),
6264
.load_device_defaults = idxd_load_iaa_device_defaults,
@@ -774,6 +776,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
774776
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
775777
idxd->hw.version);
776778

779+
idxd->user_submission_safe = data->user_submission_safe;
780+
777781
return 0;
778782

779783
err_dev_register:

drivers/dma/idxd/registers.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,6 @@
66
#include <uapi/linux/idxd.h>
77

88
/* PCI Config */
9-
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
10-
#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
11-
129
#define DEVICE_VERSION_1 0x100
1310
#define DEVICE_VERSION_2 0x200
1411

drivers/dma/idxd/sysfs.c

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1197,12 +1197,35 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
11971197
static struct device_attribute dev_attr_wq_enqcmds_retries =
11981198
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
11991199

1200+
static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap)
1201+
{
1202+
ssize_t pos;
1203+
int i;
1204+
1205+
pos = 0;
1206+
for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) {
1207+
unsigned long val = opcap_bmap[i];
1208+
1209+
/* On systems where direct user submissions are not safe, we need to clear out
1210+
* the BATCH capability from the capability mask in sysfs since we cannot support
1211+
* that command on such systems.
1212+
*/
1213+
if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
1214+
clear_bit(DSA_OPCODE_BATCH % 64, &val);
1215+
1216+
pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
1217+
pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ',');
1218+
}
1219+
1220+
return pos;
1221+
}
1222+
12001223
static ssize_t wq_op_config_show(struct device *dev,
12011224
struct device_attribute *attr, char *buf)
12021225
{
12031226
struct idxd_wq *wq = confdev_to_wq(dev);
12041227

1205-
return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
1228+
return op_cap_show_common(dev, buf, wq->opcap_bmap);
12061229
}
12071230

12081231
static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
@@ -1455,7 +1478,7 @@ static ssize_t op_cap_show(struct device *dev,
14551478
{
14561479
struct idxd_device *idxd = confdev_to_idxd(dev);
14571480

1458-
return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
1481+
return op_cap_show_common(dev, buf, idxd->opcap_bmap);
14591482
}
14601483
static DEVICE_ATTR_RO(op_cap);
14611484

drivers/vfio/pci/vfio_pci.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@ static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
7171
case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
7272
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
7373
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
74+
case PCI_DEVICE_ID_INTEL_DSA_SPR0:
75+
case PCI_DEVICE_ID_INTEL_IAX_SPR0:
7476
return true;
7577
default:
7678
return false;

include/linux/pci_ids.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2687,8 +2687,10 @@
26872687
#define PCI_DEVICE_ID_INTEL_I960 0x0960
26882688
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
26892689
#define PCI_DEVICE_ID_INTEL_HDA_HSW_0 0x0a0c
2690+
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
26902691
#define PCI_DEVICE_ID_INTEL_HDA_HSW_2 0x0c0c
26912692
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
2693+
#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
26922694
#define PCI_DEVICE_ID_INTEL_HDA_HSW_3 0x0d0c
26932695
#define PCI_DEVICE_ID_INTEL_HDA_BYT 0x0f04
26942696
#define PCI_DEVICE_ID_INTEL_SST_BYT 0x0f28

0 commit comments

Comments
 (0)