Skip to content

Commit ed6889d

Browse files
committed
Merge tag 'dmaengine-fix-5.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul: "Core: - Documentation typo fixes - fix the channel indexes - dmatest: fixes for process hang and iterations Drivers: - hisilicon: build error fix without PCI_MSI - ti-k3: deadlock fix - uniphier-xdmac: fix for reg region - pch: fix data race - tegra: fix clock state" * tag 'dmaengine-fix-5.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: dmatest: Fix process hang when reading 'wait' parameter dmaengine: dmatest: Fix iteration non-stop logic dmaengine: tegra-apb: Ensure that clock is enabled during of DMA synchronization dmaengine: fix channel index enumeration dmaengine: mmp_tdma: Reset channel error on release dmaengine: mmp_tdma: Do not ignore slave config validation errors dmaengine: pch_dma.c: Avoid data race between probe and irq handler dt-bindings: dma: uniphier-xdmac: switch to single reg region include/linux/dmaengine: Typos fixes in API documentation dmaengine: xilinx_dma: Add missing check for empty list dmaengine: ti: k3-psil: fix deadlock on error path dmaengine: hisilicon: Fix build error without PCI_MSI
2 parents 690e2ab + aa72f1d commit ed6889d

File tree

10 files changed

+65
-60
lines changed

10 files changed

+65
-60
lines changed

Documentation/devicetree/bindings/dma/socionext,uniphier-xdmac.yaml

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,7 @@ properties:
2222
const: socionext,uniphier-xdmac
2323

2424
reg:
25-
items:
26-
- description: XDMAC base register region (offset and length)
27-
- description: XDMAC extension register region (offset and length)
25+
maxItems: 1
2826

2927
interrupts:
3028
maxItems: 1
@@ -49,12 +47,13 @@ required:
4947
- reg
5048
- interrupts
5149
- "#dma-cells"
50+
- dma-channels
5251

5352
examples:
5453
- |
5554
xdmac: dma-controller@5fc10000 {
5655
compatible = "socionext,uniphier-xdmac";
57-
reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>;
56+
reg = <0x5fc10000 0x5300>;
5857
interrupts = <0 188 4>;
5958
#dma-cells = <2>;
6059
dma-channels = <16>;

drivers/dma/Kconfig

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,8 @@ config FSL_RAID
241241

242242
config HISI_DMA
243243
tristate "HiSilicon DMA Engine support"
244-
depends on ARM64 || (COMPILE_TEST && PCI_MSI)
244+
depends on ARM64 || COMPILE_TEST
245+
depends on PCI_MSI
245246
select DMA_ENGINE
246247
select DMA_VIRTUAL_CHANNELS
247248
help

drivers/dma/dmaengine.c

Lines changed: 26 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev)
232232
struct dma_chan_dev *chan_dev;
233233

234234
chan_dev = container_of(dev, typeof(*chan_dev), device);
235-
if (atomic_dec_and_test(chan_dev->idr_ref)) {
236-
ida_free(&dma_ida, chan_dev->dev_id);
237-
kfree(chan_dev->idr_ref);
238-
}
239235
kfree(chan_dev);
240236
}
241237

@@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device)
10431039
}
10441040

10451041
static int __dma_async_device_channel_register(struct dma_device *device,
1046-
struct dma_chan *chan,
1047-
int chan_id)
1042+
struct dma_chan *chan)
10481043
{
10491044
int rc = 0;
1050-
int chancnt = device->chancnt;
1051-
atomic_t *idr_ref;
1052-
struct dma_chan *tchan;
1053-
1054-
tchan = list_first_entry_or_null(&device->channels,
1055-
struct dma_chan, device_node);
1056-
if (!tchan)
1057-
return -ENODEV;
1058-
1059-
if (tchan->dev) {
1060-
idr_ref = tchan->dev->idr_ref;
1061-
} else {
1062-
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1063-
if (!idr_ref)
1064-
return -ENOMEM;
1065-
atomic_set(idr_ref, 0);
1066-
}
10671045

10681046
chan->local = alloc_percpu(typeof(*chan->local));
10691047
if (!chan->local)
@@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device,
10791057
* When the chan_id is a negative value, we are dynamically adding
10801058
* the channel. Otherwise we are static enumerating.
10811059
*/
1082-
chan->chan_id = chan_id < 0 ? chancnt : chan_id;
1060+
mutex_lock(&device->chan_mutex);
1061+
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1062+
mutex_unlock(&device->chan_mutex);
1063+
if (chan->chan_id < 0) {
1064+
pr_err("%s: unable to alloc ida for chan: %d\n",
1065+
__func__, chan->chan_id);
1066+
goto err_out;
1067+
}
1068+
10831069
chan->dev->device.class = &dma_devclass;
10841070
chan->dev->device.parent = device->dev;
10851071
chan->dev->chan = chan;
1086-
chan->dev->idr_ref = idr_ref;
10871072
chan->dev->dev_id = device->dev_id;
1088-
atomic_inc(idr_ref);
10891073
dev_set_name(&chan->dev->device, "dma%dchan%d",
10901074
device->dev_id, chan->chan_id);
1091-
10921075
rc = device_register(&chan->dev->device);
10931076
if (rc)
1094-
goto err_out;
1077+
goto err_out_ida;
10951078
chan->client_count = 0;
1096-
device->chancnt = chan->chan_id + 1;
1079+
device->chancnt++;
10971080

10981081
return 0;
10991082

1083+
err_out_ida:
1084+
mutex_lock(&device->chan_mutex);
1085+
ida_free(&device->chan_ida, chan->chan_id);
1086+
mutex_unlock(&device->chan_mutex);
11001087
err_out:
11011088
free_percpu(chan->local);
11021089
kfree(chan->dev);
1103-
if (atomic_dec_return(idr_ref) == 0)
1104-
kfree(idr_ref);
11051090
return rc;
11061091
}
11071092

@@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device,
11101095
{
11111096
int rc;
11121097

1113-
rc = __dma_async_device_channel_register(device, chan, -1);
1098+
rc = __dma_async_device_channel_register(device, chan);
11141099
if (rc < 0)
11151100
return rc;
11161101

@@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
11301115
device->chancnt--;
11311116
chan->dev->chan = NULL;
11321117
mutex_unlock(&dma_list_mutex);
1118+
mutex_lock(&device->chan_mutex);
1119+
ida_free(&device->chan_ida, chan->chan_id);
1120+
mutex_unlock(&device->chan_mutex);
11331121
device_unregister(&chan->dev->device);
11341122
free_percpu(chan->local);
11351123
}
@@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
11521140
*/
11531141
int dma_async_device_register(struct dma_device *device)
11541142
{
1155-
int rc, i = 0;
1143+
int rc;
11561144
struct dma_chan* chan;
11571145

11581146
if (!device)
@@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device)
12571245
if (rc != 0)
12581246
return rc;
12591247

1248+
mutex_init(&device->chan_mutex);
1249+
ida_init(&device->chan_ida);
1250+
12601251
/* represent channels in sysfs. Probably want devs too */
12611252
list_for_each_entry(chan, &device->channels, device_node) {
1262-
rc = __dma_async_device_channel_register(device, chan, i++);
1253+
rc = __dma_async_device_channel_register(device, chan);
12631254
if (rc < 0)
12641255
goto err_out;
12651256
}
@@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device)
13341325
*/
13351326
dma_cap_set(DMA_PRIVATE, device->cap_mask);
13361327
dma_channel_rebalance();
1328+
ida_free(&dma_ida, device->dev_id);
13371329
dma_device_put(device);
13381330
mutex_unlock(&dma_list_mutex);
13391331
}

drivers/dma/dmatest.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
240240
struct dmatest_thread *thread;
241241

242242
list_for_each_entry(thread, &dtc->threads, node) {
243-
if (!thread->done)
243+
if (!thread->done && !thread->pending)
244244
return true;
245245
}
246246
}
@@ -662,8 +662,8 @@ static int dmatest_func(void *data)
662662
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
663663

664664
ktime = ktime_get();
665-
while (!kthread_should_stop()
666-
&& !(params->iterations && total_tests >= params->iterations)) {
665+
while (!(kthread_should_stop() ||
666+
(params->iterations && total_tests >= params->iterations))) {
667667
struct dma_async_tx_descriptor *tx = NULL;
668668
struct dmaengine_unmap_data *um;
669669
dma_addr_t *dsts;

drivers/dma/mmp_tdma.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
363363
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
364364
size);
365365
tdmac->desc_arr = NULL;
366+
if (tdmac->status == DMA_ERROR)
367+
tdmac->status = DMA_COMPLETE;
366368

367369
return;
368370
}
@@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
443445
if (!desc)
444446
goto err_out;
445447

446-
mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
448+
if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
449+
goto err_out;
447450

448451
while (buf < buf_len) {
449452
desc = &tdmac->desc_arr[i];

drivers/dma/pch_dma.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
865865
}
866866

867867
pci_set_master(pdev);
868+
pd->dma.dev = &pdev->dev;
868869

869870
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
870871
if (err) {
@@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
880881
goto err_free_irq;
881882
}
882883

883-
pd->dma.dev = &pdev->dev;
884884

885885
INIT_LIST_HEAD(&pd->dma.channels);
886886

drivers/dma/tegra20-apb-dma.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -816,6 +816,13 @@ static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
816816
static void tegra_dma_synchronize(struct dma_chan *dc)
817817
{
818818
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
819+
int err;
820+
821+
err = pm_runtime_get_sync(tdc->tdma->dev);
822+
if (err < 0) {
823+
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
824+
return;
825+
}
819826

820827
/*
821828
* CPU, which handles interrupt, could be busy in
@@ -825,6 +832,8 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
825832
wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
826833

827834
tasklet_kill(&tdc->tasklet);
835+
836+
pm_runtime_put(tdc->tdma->dev);
828837
}
829838

830839
static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,

drivers/dma/ti/k3-psil.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
2727
soc_ep_map = &j721e_ep_map;
2828
} else {
2929
pr_err("PSIL: No compatible machine found for map\n");
30+
mutex_unlock(&ep_map_mutex);
3031
return ERR_PTR(-ENOTSUPP);
3132
}
3233
pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);

drivers/dma/xilinx/xilinx_dma.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1230,16 +1230,16 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
12301230
return ret;
12311231

12321232
spin_lock_irqsave(&chan->lock, flags);
1233-
1234-
desc = list_last_entry(&chan->active_list,
1235-
struct xilinx_dma_tx_descriptor, node);
1236-
/*
1237-
* VDMA and simple mode do not support residue reporting, so the
1238-
* residue field will always be 0.
1239-
*/
1240-
if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1241-
residue = xilinx_dma_get_residue(chan, desc);
1242-
1233+
if (!list_empty(&chan->active_list)) {
1234+
desc = list_last_entry(&chan->active_list,
1235+
struct xilinx_dma_tx_descriptor, node);
1236+
/*
1237+
* VDMA and simple mode do not support residue reporting, so the
1238+
* residue field will always be 0.
1239+
*/
1240+
if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1241+
residue = xilinx_dma_get_residue(chan, desc);
1242+
}
12431243
spin_unlock_irqrestore(&chan->lock, flags);
12441244

12451245
dma_set_residue(txstate, residue);

include/linux/dmaengine.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,9 @@ enum dma_transfer_direction {
8383
/**
8484
* Interleaved Transfer Request
8585
* ----------------------------
86-
* A chunk is collection of contiguous bytes to be transfered.
86+
* A chunk is collection of contiguous bytes to be transferred.
8787
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
88-
* ICGs may or maynot change between chunks.
88+
* ICGs may or may not change between chunks.
8989
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
9090
* that when repeated an integral number of times, specifies the transfer.
9191
* A transfer template is specification of a Frame, the number of times
@@ -341,13 +341,11 @@ struct dma_chan {
341341
* @chan: driver channel device
342342
* @device: sysfs device
343343
* @dev_id: parent dma_device dev_id
344-
* @idr_ref: reference count to gate release of dma_device dev_id
345344
*/
346345
struct dma_chan_dev {
347346
struct dma_chan *chan;
348347
struct device device;
349348
int dev_id;
350-
atomic_t *idr_ref;
351349
};
352350

353351
/**
@@ -835,6 +833,8 @@ struct dma_device {
835833
int dev_id;
836834
struct device *dev;
837835
struct module *owner;
836+
struct ida chan_ida;
837+
struct mutex chan_mutex; /* to protect chan_ida */
838838

839839
u32 src_addr_widths;
840840
u32 dst_addr_widths;
@@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
10691069
* dmaengine_synchronize() needs to be called before it is safe to free
10701070
* any memory that is accessed by previously submitted descriptors or before
10711071
* freeing any resources accessed from within the completion callback of any
1072-
* perviously submitted descriptors.
1072+
* previously submitted descriptors.
10731073
*
10741074
* This function can be called from atomic context as well as from within a
10751075
* complete callback of a descriptor submitted on the same channel.
@@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
10911091
*
10921092
* Synchronizes to the DMA channel termination to the current context. When this
10931093
* function returns it is guaranteed that all transfers for previously issued
1094-
* descriptors have stopped and and it is safe to free the memory assoicated
1094+
* descriptors have stopped and it is safe to free the memory associated
10951095
* with them. Furthermore it is guaranteed that all complete callback functions
10961096
* for a previously submitted descriptor have finished running and it is safe to
10971097
* free resources accessed from within the complete callbacks.

0 commit comments

Comments
 (0)