Skip to content

Commit 403a2e2

Browse files
davejiangvinodkoul
authored andcommitted
dmaengine: idxd: change MSIX allocation based on per wq activation
Change the driver where WQ interrupt is requested only when wq is being enabled. This new scheme set things up so that request_threaded_irq() is only called when a kernel wq type is being enabled. This also sets up for future interrupt request where different interrupt handler such as wq occupancy interrupt can be setup instead of the wq completion interrupt. Not calling request_irq() until the WQ actually needs an irq also prevents wasting of CPU irq vectors on x86 systems, which is a limited resource. idxd_flush_pending_descs() is moved to device.c since descriptor flushing is now part of wq disable rather than shutdown(). Signed-off-by: Dave Jiang <[email protected]> Link: https://lore.kernel.org/r/163942149487.2412839.6691222855803875848.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <[email protected]>
1 parent 23a50c8 commit 403a2e2

File tree

6 files changed

+132
-185
lines changed

6 files changed

+132
-185
lines changed

drivers/dma/idxd/device.c

Lines changed: 100 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -19,36 +19,6 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
1919
static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
2020

2121
/* Interrupt control bits */
22-
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
23-
{
24-
struct idxd_irq_entry *ie;
25-
struct irq_data *data;
26-
27-
ie = idxd_get_ie(idxd, vec_id);
28-
data = irq_get_irq_data(ie->vector);
29-
pci_msi_mask_irq(data);
30-
}
31-
32-
void idxd_mask_msix_vectors(struct idxd_device *idxd)
33-
{
34-
struct pci_dev *pdev = idxd->pdev;
35-
int msixcnt = pci_msix_vec_count(pdev);
36-
int i;
37-
38-
for (i = 0; i < msixcnt; i++)
39-
idxd_mask_msix_vector(idxd, i);
40-
}
41-
42-
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
43-
{
44-
struct idxd_irq_entry *ie;
45-
struct irq_data *data;
46-
47-
ie = idxd_get_ie(idxd, vec_id);
48-
data = irq_get_irq_data(ie->vector);
49-
pci_msi_unmask_irq(data);
50-
}
51-
5222
void idxd_unmask_error_interrupts(struct idxd_device *idxd)
5323
{
5424
union genctrl_reg genctrl;
@@ -593,7 +563,6 @@ void idxd_device_reset(struct idxd_device *idxd)
593563
idxd_device_clear_state(idxd);
594564
idxd->state = IDXD_DEV_DISABLED;
595565
idxd_unmask_error_interrupts(idxd);
596-
idxd_msix_perm_setup(idxd);
597566
spin_unlock(&idxd->dev_lock);
598567
}
599568

@@ -732,36 +701,6 @@ void idxd_device_clear_state(struct idxd_device *idxd)
732701
idxd_device_wqs_clear_state(idxd);
733702
}
734703

735-
void idxd_msix_perm_setup(struct idxd_device *idxd)
736-
{
737-
union msix_perm mperm;
738-
int i, msixcnt;
739-
740-
msixcnt = pci_msix_vec_count(idxd->pdev);
741-
if (msixcnt < 0)
742-
return;
743-
744-
mperm.bits = 0;
745-
mperm.pasid = idxd->pasid;
746-
mperm.pasid_en = device_pasid_enabled(idxd);
747-
for (i = 1; i < msixcnt; i++)
748-
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
749-
}
750-
751-
void idxd_msix_perm_clear(struct idxd_device *idxd)
752-
{
753-
union msix_perm mperm;
754-
int i, msixcnt;
755-
756-
msixcnt = pci_msix_vec_count(idxd->pdev);
757-
if (msixcnt < 0)
758-
return;
759-
760-
mperm.bits = 0;
761-
for (i = 1; i < msixcnt; i++)
762-
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
763-
}
764-
765704
static void idxd_group_config_write(struct idxd_group *group)
766705
{
767706
struct idxd_device *idxd = group->idxd;
@@ -1158,6 +1097,106 @@ int idxd_device_load_config(struct idxd_device *idxd)
11581097
return 0;
11591098
}
11601099

1100+
static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
1101+
{
1102+
struct idxd_desc *desc, *itr;
1103+
struct llist_node *head;
1104+
LIST_HEAD(flist);
1105+
enum idxd_complete_type ctype;
1106+
1107+
spin_lock(&ie->list_lock);
1108+
head = llist_del_all(&ie->pending_llist);
1109+
if (head) {
1110+
llist_for_each_entry_safe(desc, itr, head, llnode)
1111+
list_add_tail(&desc->list, &ie->work_list);
1112+
}
1113+
1114+
list_for_each_entry_safe(desc, itr, &ie->work_list, list)
1115+
list_move_tail(&desc->list, &flist);
1116+
spin_unlock(&ie->list_lock);
1117+
1118+
list_for_each_entry_safe(desc, itr, &flist, list) {
1119+
list_del(&desc->list);
1120+
ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
1121+
idxd_dma_complete_txd(desc, ctype, true);
1122+
}
1123+
}
1124+
1125+
static void idxd_device_set_perm_entry(struct idxd_device *idxd,
1126+
struct idxd_irq_entry *ie)
1127+
{
1128+
union msix_perm mperm;
1129+
1130+
if (ie->pasid == INVALID_IOASID)
1131+
return;
1132+
1133+
mperm.bits = 0;
1134+
mperm.pasid = ie->pasid;
1135+
mperm.pasid_en = 1;
1136+
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1137+
}
1138+
1139+
static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
1140+
struct idxd_irq_entry *ie)
1141+
{
1142+
iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1143+
}
1144+
1145+
void idxd_wq_free_irq(struct idxd_wq *wq)
1146+
{
1147+
struct idxd_device *idxd = wq->idxd;
1148+
struct idxd_irq_entry *ie = &wq->ie;
1149+
1150+
synchronize_irq(ie->vector);
1151+
free_irq(ie->vector, ie);
1152+
idxd_flush_pending_descs(ie);
1153+
if (idxd->request_int_handles)
1154+
idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
1155+
idxd_device_clear_perm_entry(idxd, ie);
1156+
ie->vector = -1;
1157+
ie->int_handle = INVALID_INT_HANDLE;
1158+
ie->pasid = INVALID_IOASID;
1159+
}
1160+
1161+
int idxd_wq_request_irq(struct idxd_wq *wq)
1162+
{
1163+
struct idxd_device *idxd = wq->idxd;
1164+
struct pci_dev *pdev = idxd->pdev;
1165+
struct device *dev = &pdev->dev;
1166+
struct idxd_irq_entry *ie;
1167+
int rc;
1168+
1169+
ie = &wq->ie;
1170+
ie->vector = pci_irq_vector(pdev, ie->id);
1171+
ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
1172+
idxd_device_set_perm_entry(idxd, ie);
1173+
1174+
rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
1175+
if (rc < 0) {
1176+
dev_err(dev, "Failed to request irq %d.\n", ie->vector);
1177+
goto err_irq;
1178+
}
1179+
1180+
if (idxd->request_int_handles) {
1181+
rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
1182+
IDXD_IRQ_MSIX);
1183+
if (rc < 0)
1184+
goto err_int_handle;
1185+
} else {
1186+
ie->int_handle = ie->id;
1187+
}
1188+
1189+
return 0;
1190+
1191+
err_int_handle:
1192+
ie->int_handle = INVALID_INT_HANDLE;
1193+
free_irq(ie->vector, ie);
1194+
err_irq:
1195+
idxd_device_clear_perm_entry(idxd, ie);
1196+
ie->pasid = INVALID_IOASID;
1197+
return rc;
1198+
}
1199+
11611200
int __drv_enable_wq(struct idxd_wq *wq)
11621201
{
11631202
struct idxd_device *idxd = wq->idxd;

drivers/dma/idxd/dma.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,14 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
289289

290290
mutex_lock(&wq->wq_lock);
291291
wq->type = IDXD_WQT_KERNEL;
292+
293+
rc = idxd_wq_request_irq(wq);
294+
if (rc < 0) {
295+
idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
296+
dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
297+
goto err_irq;
298+
}
299+
292300
rc = __drv_enable_wq(wq);
293301
if (rc < 0) {
294302
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
@@ -329,6 +337,8 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
329337
err_res_alloc:
330338
__drv_disable_wq(wq);
331339
err:
340+
idxd_wq_free_irq(wq);
341+
err_irq:
332342
wq->type = IDXD_WQT_NONE;
333343
mutex_unlock(&wq->wq_lock);
334344
return rc;
@@ -344,6 +354,8 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
344354
idxd_wq_free_resources(wq);
345355
__drv_disable_wq(wq);
346356
percpu_ref_exit(&wq->wq_active);
357+
idxd_wq_free_irq(wq);
358+
wq->type = IDXD_WQT_NONE;
347359
mutex_unlock(&wq->wq_lock);
348360
}
349361

drivers/dma/idxd/idxd.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -548,15 +548,10 @@ void idxd_wqs_quiesce(struct idxd_device *idxd);
548548
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
549549

550550
/* device interrupt control */
551-
void idxd_msix_perm_setup(struct idxd_device *idxd);
552-
void idxd_msix_perm_clear(struct idxd_device *idxd);
553551
irqreturn_t idxd_misc_thread(int vec, void *data);
554552
irqreturn_t idxd_wq_thread(int irq, void *data);
555553
void idxd_mask_error_interrupts(struct idxd_device *idxd);
556554
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
557-
void idxd_mask_msix_vectors(struct idxd_device *idxd);
558-
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
559-
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
560555

561556
/* device control */
562557
int idxd_register_idxd_drv(void);
@@ -595,6 +590,8 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq);
595590
void __idxd_wq_quiesce(struct idxd_wq *wq);
596591
void idxd_wq_quiesce(struct idxd_wq *wq);
597592
int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
593+
void idxd_wq_free_irq(struct idxd_wq *wq);
594+
int idxd_wq_request_irq(struct idxd_wq *wq);
598595

599596
/* submission */
600597
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);

0 commit comments

Comments
 (0)