Skip to content

Commit 58ac1b3

Browse files
arndbgregkh
authored andcommitted
ARM: PL011: Fix DMA support
Since there is no guarantee that the memory returned by dma_alloc_coherent() is associated with a 'struct page', using the architecture specific phys_to_page() is wrong, but using virt_to_page() would be as well. Stop using sg lists altogether and just use the *_single() functions instead. This also simplifies the code a bit since the scatterlists in this driver always have only one entry anyway. https://lore.kernel.org/lkml/[email protected]/ Use consistent names for dma buffers gc: Add a commit log from the initial thread: https://lore.kernel.org/lkml/[email protected]/ Use consistent names for dma buffers Fixes: cb06ff1 ("ARM: PL011: Add support for Rx DMA buffer polling.") Signed-off-by: Arnd Bergmann <[email protected]> Tested-by: Gregory CLEMENT <[email protected]> Signed-off-by: Gregory CLEMENT <[email protected]> Cc: stable <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 08ce9a1 commit 58ac1b3

File tree

1 file changed

+54
-58
lines changed

1 file changed

+54
-58
lines changed

drivers/tty/serial/amba-pl011.c

Lines changed: 54 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -218,17 +218,18 @@ static struct vendor_data vendor_st = {
218218

219219
/* Deals with DMA transactions */
220220

221-
struct pl011_sgbuf {
222-
struct scatterlist sg;
223-
char *buf;
221+
struct pl011_dmabuf {
222+
dma_addr_t dma;
223+
size_t len;
224+
char *buf;
224225
};
225226

226227
struct pl011_dmarx_data {
227228
struct dma_chan *chan;
228229
struct completion complete;
229230
bool use_buf_b;
230-
struct pl011_sgbuf sgbuf_a;
231-
struct pl011_sgbuf sgbuf_b;
231+
struct pl011_dmabuf dbuf_a;
232+
struct pl011_dmabuf dbuf_b;
232233
dma_cookie_t cookie;
233234
bool running;
234235
struct timer_list timer;
@@ -241,7 +242,8 @@ struct pl011_dmarx_data {
241242

242243
struct pl011_dmatx_data {
243244
struct dma_chan *chan;
244-
struct scatterlist sg;
245+
dma_addr_t dma;
246+
size_t len;
245247
char *buf;
246248
bool queued;
247249
};
@@ -366,32 +368,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
366368

367369
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
368370

369-
static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
371+
static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
370372
enum dma_data_direction dir)
371373
{
372-
dma_addr_t dma_addr;
373-
374-
sg->buf = dma_alloc_coherent(chan->device->dev,
375-
PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
376-
if (!sg->buf)
374+
db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
375+
&db->dma, GFP_KERNEL);
376+
if (!db->buf)
377377
return -ENOMEM;
378-
379-
sg_init_table(&sg->sg, 1);
380-
sg_set_page(&sg->sg, phys_to_page(dma_addr),
381-
PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
382-
sg_dma_address(&sg->sg) = dma_addr;
383-
sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
378+
db->len = PL011_DMA_BUFFER_SIZE;
384379

385380
return 0;
386381
}
387382

388-
static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
383+
static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
389384
enum dma_data_direction dir)
390385
{
391-
if (sg->buf) {
386+
if (db->buf) {
392387
dma_free_coherent(chan->device->dev,
393-
PL011_DMA_BUFFER_SIZE, sg->buf,
394-
sg_dma_address(&sg->sg));
388+
PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
395389
}
396390
}
397391

@@ -552,8 +546,8 @@ static void pl011_dma_tx_callback(void *data)
552546

553547
uart_port_lock_irqsave(&uap->port, &flags);
554548
if (uap->dmatx.queued)
555-
dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
556-
DMA_TO_DEVICE);
549+
dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
550+
dmatx->len, DMA_TO_DEVICE);
557551

558552
dmacr = uap->dmacr;
559553
uap->dmacr = dmacr & ~UART011_TXDMAE;
@@ -639,18 +633,19 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
639633
memcpy(&dmatx->buf[first], &xmit->buf[0], second);
640634
}
641635

642-
dmatx->sg.length = count;
643-
644-
if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
636+
dmatx->len = count;
637+
dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
638+
DMA_TO_DEVICE);
639+
if (dmatx->dma == DMA_MAPPING_ERROR) {
645640
uap->dmatx.queued = false;
646641
dev_dbg(uap->port.dev, "unable to map TX DMA\n");
647642
return -EBUSY;
648643
}
649644

650-
desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
645+
desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
651646
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
652647
if (!desc) {
653-
dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
648+
dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
654649
uap->dmatx.queued = false;
655650
/*
656651
* If DMA cannot be used right now, we complete this
@@ -813,8 +808,8 @@ __acquires(&uap->port.lock)
813808
dmaengine_terminate_async(uap->dmatx.chan);
814809

815810
if (uap->dmatx.queued) {
816-
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
817-
DMA_TO_DEVICE);
811+
dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
812+
uap->dmatx.len, DMA_TO_DEVICE);
818813
uap->dmatx.queued = false;
819814
uap->dmacr &= ~UART011_TXDMAE;
820815
pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -828,15 +823,15 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
828823
struct dma_chan *rxchan = uap->dmarx.chan;
829824
struct pl011_dmarx_data *dmarx = &uap->dmarx;
830825
struct dma_async_tx_descriptor *desc;
831-
struct pl011_sgbuf *sgbuf;
826+
struct pl011_dmabuf *dbuf;
832827

833828
if (!rxchan)
834829
return -EIO;
835830

836831
/* Start the RX DMA job */
837-
sgbuf = uap->dmarx.use_buf_b ?
838-
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
839-
desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
832+
dbuf = uap->dmarx.use_buf_b ?
833+
&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
834+
desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
840835
DMA_DEV_TO_MEM,
841836
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
842837
/*
@@ -876,8 +871,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
876871
bool readfifo)
877872
{
878873
struct tty_port *port = &uap->port.state->port;
879-
struct pl011_sgbuf *sgbuf = use_buf_b ?
880-
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
874+
struct pl011_dmabuf *dbuf = use_buf_b ?
875+
&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
881876
int dma_count = 0;
882877
u32 fifotaken = 0; /* only used for vdbg() */
883878

@@ -886,7 +881,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
886881

887882
if (uap->dmarx.poll_rate) {
888883
/* The data can be taken by polling */
889-
dmataken = sgbuf->sg.length - dmarx->last_residue;
884+
dmataken = dbuf->len - dmarx->last_residue;
890885
/* Recalculate the pending size */
891886
if (pending >= dmataken)
892887
pending -= dmataken;
@@ -900,7 +895,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
900895
* Note that tty_insert_flip_buf() tries to take as many chars
901896
* as it can.
902897
*/
903-
dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
898+
dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
904899
pending);
905900

906901
uap->port.icount.rx += dma_count;
@@ -911,7 +906,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
911906

912907
/* Reset the last_residue for Rx DMA poll */
913908
if (uap->dmarx.poll_rate)
914-
dmarx->last_residue = sgbuf->sg.length;
909+
dmarx->last_residue = dbuf->len;
915910

916911
/*
917912
* Only continue with trying to read the FIFO if all DMA chars have
@@ -946,8 +941,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
946941
{
947942
struct pl011_dmarx_data *dmarx = &uap->dmarx;
948943
struct dma_chan *rxchan = dmarx->chan;
949-
struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
950-
&dmarx->sgbuf_b : &dmarx->sgbuf_a;
944+
struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
945+
&dmarx->dbuf_b : &dmarx->dbuf_a;
951946
size_t pending;
952947
struct dma_tx_state state;
953948
enum dma_status dmastat;
@@ -969,7 +964,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
969964
pl011_write(uap->dmacr, uap, REG_DMACR);
970965
uap->dmarx.running = false;
971966

972-
pending = sgbuf->sg.length - state.residue;
967+
pending = dbuf->len - state.residue;
973968
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
974969
/* Then we terminate the transfer - we now know our residue */
975970
dmaengine_terminate_all(rxchan);
@@ -996,8 +991,8 @@ static void pl011_dma_rx_callback(void *data)
996991
struct pl011_dmarx_data *dmarx = &uap->dmarx;
997992
struct dma_chan *rxchan = dmarx->chan;
998993
bool lastbuf = dmarx->use_buf_b;
999-
struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1000-
&dmarx->sgbuf_b : &dmarx->sgbuf_a;
994+
struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
995+
&dmarx->dbuf_b : &dmarx->dbuf_a;
1001996
size_t pending;
1002997
struct dma_tx_state state;
1003998
int ret;
@@ -1015,7 +1010,7 @@ static void pl011_dma_rx_callback(void *data)
10151010
* the DMA irq handler. So we check the residue here.
10161011
*/
10171012
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1018-
pending = sgbuf->sg.length - state.residue;
1013+
pending = dbuf->len - state.residue;
10191014
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
10201015
/* Then we terminate the transfer - we now know our residue */
10211016
dmaengine_terminate_all(rxchan);
@@ -1067,16 +1062,16 @@ static void pl011_dma_rx_poll(struct timer_list *t)
10671062
unsigned long flags;
10681063
unsigned int dmataken = 0;
10691064
unsigned int size = 0;
1070-
struct pl011_sgbuf *sgbuf;
1065+
struct pl011_dmabuf *dbuf;
10711066
int dma_count;
10721067
struct dma_tx_state state;
10731068

1074-
sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1069+
dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
10751070
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
10761071
if (likely(state.residue < dmarx->last_residue)) {
1077-
dmataken = sgbuf->sg.length - dmarx->last_residue;
1072+
dmataken = dbuf->len - dmarx->last_residue;
10781073
size = dmarx->last_residue - state.residue;
1079-
dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1074+
dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
10801075
size);
10811076
if (dma_count == size)
10821077
dmarx->last_residue = state.residue;
@@ -1123,7 +1118,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
11231118
return;
11241119
}
11251120

1126-
sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1121+
uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
11271122

11281123
/* The DMA buffer is now the FIFO the TTY subsystem can use */
11291124
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
@@ -1133,20 +1128,20 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
11331128
goto skip_rx;
11341129

11351130
/* Allocate and map DMA RX buffers */
1136-
ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1131+
ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
11371132
DMA_FROM_DEVICE);
11381133
if (ret) {
11391134
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
11401135
"RX buffer A", ret);
11411136
goto skip_rx;
11421137
}
11431138

1144-
ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1139+
ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
11451140
DMA_FROM_DEVICE);
11461141
if (ret) {
11471142
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
11481143
"RX buffer B", ret);
1149-
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1144+
pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
11501145
DMA_FROM_DEVICE);
11511146
goto skip_rx;
11521147
}
@@ -1200,8 +1195,9 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
12001195
/* In theory, this should already be done by pl011_dma_flush_buffer */
12011196
dmaengine_terminate_all(uap->dmatx.chan);
12021197
if (uap->dmatx.queued) {
1203-
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1204-
DMA_TO_DEVICE);
1198+
dma_unmap_single(uap->dmatx.chan->device->dev,
1199+
uap->dmatx.dma, uap->dmatx.len,
1200+
DMA_TO_DEVICE);
12051201
uap->dmatx.queued = false;
12061202
}
12071203

@@ -1212,8 +1208,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
12121208
if (uap->using_rx_dma) {
12131209
dmaengine_terminate_all(uap->dmarx.chan);
12141210
/* Clean up the RX DMA */
1215-
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1216-
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1211+
pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
1212+
pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
12171213
if (uap->dmarx.poll_rate)
12181214
del_timer_sync(&uap->dmarx.timer);
12191215
uap->using_rx_dma = false;

0 commit comments

Comments
 (0)