@@ -218,17 +218,18 @@ static struct vendor_data vendor_st = {
218
218
219
219
/* Deals with DMA transactions */
220
220
221
- struct pl011_sgbuf {
222
- struct scatterlist sg ;
223
- char * buf ;
221
+ struct pl011_dmabuf {
222
+ dma_addr_t dma ;
223
+ size_t len ;
224
+ char * buf ;
224
225
};
225
226
226
227
struct pl011_dmarx_data {
227
228
struct dma_chan * chan ;
228
229
struct completion complete ;
229
230
bool use_buf_b ;
230
- struct pl011_sgbuf sgbuf_a ;
231
- struct pl011_sgbuf sgbuf_b ;
231
+ struct pl011_dmabuf dbuf_a ;
232
+ struct pl011_dmabuf dbuf_b ;
232
233
dma_cookie_t cookie ;
233
234
bool running ;
234
235
struct timer_list timer ;
@@ -241,7 +242,8 @@ struct pl011_dmarx_data {
241
242
242
243
struct pl011_dmatx_data {
243
244
struct dma_chan * chan ;
244
- struct scatterlist sg ;
245
+ dma_addr_t dma ;
246
+ size_t len ;
245
247
char * buf ;
246
248
bool queued ;
247
249
};
@@ -366,32 +368,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
366
368
367
369
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
368
370
369
- static int pl011_sgbuf_init (struct dma_chan * chan , struct pl011_sgbuf * sg ,
371
+ static int pl011_dmabuf_init (struct dma_chan * chan , struct pl011_dmabuf * db ,
370
372
enum dma_data_direction dir )
371
373
{
372
- dma_addr_t dma_addr ;
373
-
374
- sg -> buf = dma_alloc_coherent (chan -> device -> dev ,
375
- PL011_DMA_BUFFER_SIZE , & dma_addr , GFP_KERNEL );
376
- if (!sg -> buf )
374
+ db -> buf = dma_alloc_coherent (chan -> device -> dev , PL011_DMA_BUFFER_SIZE ,
375
+ & db -> dma , GFP_KERNEL );
376
+ if (!db -> buf )
377
377
return - ENOMEM ;
378
-
379
- sg_init_table (& sg -> sg , 1 );
380
- sg_set_page (& sg -> sg , phys_to_page (dma_addr ),
381
- PL011_DMA_BUFFER_SIZE , offset_in_page (dma_addr ));
382
- sg_dma_address (& sg -> sg ) = dma_addr ;
383
- sg_dma_len (& sg -> sg ) = PL011_DMA_BUFFER_SIZE ;
378
+ db -> len = PL011_DMA_BUFFER_SIZE ;
384
379
385
380
return 0 ;
386
381
}
387
382
388
- static void pl011_sgbuf_free (struct dma_chan * chan , struct pl011_sgbuf * sg ,
383
+ static void pl011_dmabuf_free (struct dma_chan * chan , struct pl011_dmabuf * db ,
389
384
enum dma_data_direction dir )
390
385
{
391
- if (sg -> buf ) {
386
+ if (db -> buf ) {
392
387
dma_free_coherent (chan -> device -> dev ,
393
- PL011_DMA_BUFFER_SIZE , sg -> buf ,
394
- sg_dma_address (& sg -> sg ));
388
+ PL011_DMA_BUFFER_SIZE , db -> buf , db -> dma );
395
389
}
396
390
}
397
391
@@ -552,8 +546,8 @@ static void pl011_dma_tx_callback(void *data)
552
546
553
547
uart_port_lock_irqsave (& uap -> port , & flags );
554
548
if (uap -> dmatx .queued )
555
- dma_unmap_sg (dmatx -> chan -> device -> dev , & dmatx -> sg , 1 ,
556
- DMA_TO_DEVICE );
549
+ dma_unmap_single (dmatx -> chan -> device -> dev , dmatx -> dma ,
550
+ dmatx -> len , DMA_TO_DEVICE );
557
551
558
552
dmacr = uap -> dmacr ;
559
553
uap -> dmacr = dmacr & ~UART011_TXDMAE ;
@@ -639,18 +633,19 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
639
633
memcpy (& dmatx -> buf [first ], & xmit -> buf [0 ], second );
640
634
}
641
635
642
- dmatx -> sg .length = count ;
643
-
644
- if (dma_map_sg (dma_dev -> dev , & dmatx -> sg , 1 , DMA_TO_DEVICE ) != 1 ) {
636
+ dmatx -> len = count ;
637
+ dmatx -> dma = dma_map_single (dma_dev -> dev , dmatx -> buf , count ,
638
+ DMA_TO_DEVICE );
639
+ if (dmatx -> dma == DMA_MAPPING_ERROR ) {
645
640
uap -> dmatx .queued = false;
646
641
dev_dbg (uap -> port .dev , "unable to map TX DMA\n" );
647
642
return - EBUSY ;
648
643
}
649
644
650
- desc = dmaengine_prep_slave_sg (chan , & dmatx -> sg , 1 , DMA_MEM_TO_DEV ,
645
+ desc = dmaengine_prep_slave_single (chan , dmatx -> dma , dmatx -> len , DMA_MEM_TO_DEV ,
651
646
DMA_PREP_INTERRUPT | DMA_CTRL_ACK );
652
647
if (!desc ) {
653
- dma_unmap_sg (dma_dev -> dev , & dmatx -> sg , 1 , DMA_TO_DEVICE );
648
+ dma_unmap_single (dma_dev -> dev , dmatx -> dma , dmatx -> len , DMA_TO_DEVICE );
654
649
uap -> dmatx .queued = false;
655
650
/*
656
651
* If DMA cannot be used right now, we complete this
@@ -813,8 +808,8 @@ __acquires(&uap->port.lock)
813
808
dmaengine_terminate_async (uap -> dmatx .chan );
814
809
815
810
if (uap -> dmatx .queued ) {
816
- dma_unmap_sg (uap -> dmatx .chan -> device -> dev , & uap -> dmatx .sg , 1 ,
817
- DMA_TO_DEVICE );
811
+ dma_unmap_single (uap -> dmatx .chan -> device -> dev , uap -> dmatx .dma ,
812
+ uap -> dmatx . len , DMA_TO_DEVICE );
818
813
uap -> dmatx .queued = false;
819
814
uap -> dmacr &= ~UART011_TXDMAE ;
820
815
pl011_write (uap -> dmacr , uap , REG_DMACR );
@@ -828,15 +823,15 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
828
823
struct dma_chan * rxchan = uap -> dmarx .chan ;
829
824
struct pl011_dmarx_data * dmarx = & uap -> dmarx ;
830
825
struct dma_async_tx_descriptor * desc ;
831
- struct pl011_sgbuf * sgbuf ;
826
+ struct pl011_dmabuf * dbuf ;
832
827
833
828
if (!rxchan )
834
829
return - EIO ;
835
830
836
831
/* Start the RX DMA job */
837
- sgbuf = uap -> dmarx .use_buf_b ?
838
- & uap -> dmarx .sgbuf_b : & uap -> dmarx .sgbuf_a ;
839
- desc = dmaengine_prep_slave_sg (rxchan , & sgbuf -> sg , 1 ,
832
+ dbuf = uap -> dmarx .use_buf_b ?
833
+ & uap -> dmarx .dbuf_b : & uap -> dmarx .dbuf_a ;
834
+ desc = dmaengine_prep_slave_single (rxchan , dbuf -> dma , dbuf -> len ,
840
835
DMA_DEV_TO_MEM ,
841
836
DMA_PREP_INTERRUPT | DMA_CTRL_ACK );
842
837
/*
@@ -876,8 +871,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
876
871
bool readfifo )
877
872
{
878
873
struct tty_port * port = & uap -> port .state -> port ;
879
- struct pl011_sgbuf * sgbuf = use_buf_b ?
880
- & uap -> dmarx .sgbuf_b : & uap -> dmarx .sgbuf_a ;
874
+ struct pl011_dmabuf * dbuf = use_buf_b ?
875
+ & uap -> dmarx .dbuf_b : & uap -> dmarx .dbuf_a ;
881
876
int dma_count = 0 ;
882
877
u32 fifotaken = 0 ; /* only used for vdbg() */
883
878
@@ -886,7 +881,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
886
881
887
882
if (uap -> dmarx .poll_rate ) {
888
883
/* The data can be taken by polling */
889
- dmataken = sgbuf -> sg . length - dmarx -> last_residue ;
884
+ dmataken = dbuf -> len - dmarx -> last_residue ;
890
885
/* Recalculate the pending size */
891
886
if (pending >= dmataken )
892
887
pending -= dmataken ;
@@ -900,7 +895,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
900
895
* Note that tty_insert_flip_buf() tries to take as many chars
901
896
* as it can.
902
897
*/
903
- dma_count = tty_insert_flip_string (port , sgbuf -> buf + dmataken ,
898
+ dma_count = tty_insert_flip_string (port , dbuf -> buf + dmataken ,
904
899
pending );
905
900
906
901
uap -> port .icount .rx += dma_count ;
@@ -911,7 +906,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
911
906
912
907
/* Reset the last_residue for Rx DMA poll */
913
908
if (uap -> dmarx .poll_rate )
914
- dmarx -> last_residue = sgbuf -> sg . length ;
909
+ dmarx -> last_residue = dbuf -> len ;
915
910
916
911
/*
917
912
* Only continue with trying to read the FIFO if all DMA chars have
@@ -946,8 +941,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
946
941
{
947
942
struct pl011_dmarx_data * dmarx = & uap -> dmarx ;
948
943
struct dma_chan * rxchan = dmarx -> chan ;
949
- struct pl011_sgbuf * sgbuf = dmarx -> use_buf_b ?
950
- & dmarx -> sgbuf_b : & dmarx -> sgbuf_a ;
944
+ struct pl011_dmabuf * dbuf = dmarx -> use_buf_b ?
945
+ & dmarx -> dbuf_b : & dmarx -> dbuf_a ;
951
946
size_t pending ;
952
947
struct dma_tx_state state ;
953
948
enum dma_status dmastat ;
@@ -969,7 +964,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
969
964
pl011_write (uap -> dmacr , uap , REG_DMACR );
970
965
uap -> dmarx .running = false;
971
966
972
- pending = sgbuf -> sg . length - state .residue ;
967
+ pending = dbuf -> len - state .residue ;
973
968
BUG_ON (pending > PL011_DMA_BUFFER_SIZE );
974
969
/* Then we terminate the transfer - we now know our residue */
975
970
dmaengine_terminate_all (rxchan );
@@ -996,8 +991,8 @@ static void pl011_dma_rx_callback(void *data)
996
991
struct pl011_dmarx_data * dmarx = & uap -> dmarx ;
997
992
struct dma_chan * rxchan = dmarx -> chan ;
998
993
bool lastbuf = dmarx -> use_buf_b ;
999
- struct pl011_sgbuf * sgbuf = dmarx -> use_buf_b ?
1000
- & dmarx -> sgbuf_b : & dmarx -> sgbuf_a ;
994
+ struct pl011_dmabuf * dbuf = dmarx -> use_buf_b ?
995
+ & dmarx -> dbuf_b : & dmarx -> dbuf_a ;
1001
996
size_t pending ;
1002
997
struct dma_tx_state state ;
1003
998
int ret ;
@@ -1015,7 +1010,7 @@ static void pl011_dma_rx_callback(void *data)
1015
1010
* the DMA irq handler. So we check the residue here.
1016
1011
*/
1017
1012
rxchan -> device -> device_tx_status (rxchan , dmarx -> cookie , & state );
1018
- pending = sgbuf -> sg . length - state .residue ;
1013
+ pending = dbuf -> len - state .residue ;
1019
1014
BUG_ON (pending > PL011_DMA_BUFFER_SIZE );
1020
1015
/* Then we terminate the transfer - we now know our residue */
1021
1016
dmaengine_terminate_all (rxchan );
@@ -1067,16 +1062,16 @@ static void pl011_dma_rx_poll(struct timer_list *t)
1067
1062
unsigned long flags ;
1068
1063
unsigned int dmataken = 0 ;
1069
1064
unsigned int size = 0 ;
1070
- struct pl011_sgbuf * sgbuf ;
1065
+ struct pl011_dmabuf * dbuf ;
1071
1066
int dma_count ;
1072
1067
struct dma_tx_state state ;
1073
1068
1074
- sgbuf = dmarx -> use_buf_b ? & uap -> dmarx .sgbuf_b : & uap -> dmarx .sgbuf_a ;
1069
+ dbuf = dmarx -> use_buf_b ? & uap -> dmarx .dbuf_b : & uap -> dmarx .dbuf_a ;
1075
1070
rxchan -> device -> device_tx_status (rxchan , dmarx -> cookie , & state );
1076
1071
if (likely (state .residue < dmarx -> last_residue )) {
1077
- dmataken = sgbuf -> sg . length - dmarx -> last_residue ;
1072
+ dmataken = dbuf -> len - dmarx -> last_residue ;
1078
1073
size = dmarx -> last_residue - state .residue ;
1079
- dma_count = tty_insert_flip_string (port , sgbuf -> buf + dmataken ,
1074
+ dma_count = tty_insert_flip_string (port , dbuf -> buf + dmataken ,
1080
1075
size );
1081
1076
if (dma_count == size )
1082
1077
dmarx -> last_residue = state .residue ;
@@ -1123,7 +1118,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
1123
1118
return ;
1124
1119
}
1125
1120
1126
- sg_init_one ( & uap -> dmatx .sg , uap -> dmatx . buf , PL011_DMA_BUFFER_SIZE ) ;
1121
+ uap -> dmatx .len = PL011_DMA_BUFFER_SIZE ;
1127
1122
1128
1123
/* The DMA buffer is now the FIFO the TTY subsystem can use */
1129
1124
uap -> port .fifosize = PL011_DMA_BUFFER_SIZE ;
@@ -1133,20 +1128,20 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
1133
1128
goto skip_rx ;
1134
1129
1135
1130
/* Allocate and map DMA RX buffers */
1136
- ret = pl011_sgbuf_init (uap -> dmarx .chan , & uap -> dmarx .sgbuf_a ,
1131
+ ret = pl011_dmabuf_init (uap -> dmarx .chan , & uap -> dmarx .dbuf_a ,
1137
1132
DMA_FROM_DEVICE );
1138
1133
if (ret ) {
1139
1134
dev_err (uap -> port .dev , "failed to init DMA %s: %d\n" ,
1140
1135
"RX buffer A" , ret );
1141
1136
goto skip_rx ;
1142
1137
}
1143
1138
1144
- ret = pl011_sgbuf_init (uap -> dmarx .chan , & uap -> dmarx .sgbuf_b ,
1139
+ ret = pl011_dmabuf_init (uap -> dmarx .chan , & uap -> dmarx .dbuf_b ,
1145
1140
DMA_FROM_DEVICE );
1146
1141
if (ret ) {
1147
1142
dev_err (uap -> port .dev , "failed to init DMA %s: %d\n" ,
1148
1143
"RX buffer B" , ret );
1149
- pl011_sgbuf_free (uap -> dmarx .chan , & uap -> dmarx .sgbuf_a ,
1144
+ pl011_dmabuf_free (uap -> dmarx .chan , & uap -> dmarx .dbuf_a ,
1150
1145
DMA_FROM_DEVICE );
1151
1146
goto skip_rx ;
1152
1147
}
@@ -1200,8 +1195,9 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
1200
1195
/* In theory, this should already be done by pl011_dma_flush_buffer */
1201
1196
dmaengine_terminate_all (uap -> dmatx .chan );
1202
1197
if (uap -> dmatx .queued ) {
1203
- dma_unmap_sg (uap -> dmatx .chan -> device -> dev , & uap -> dmatx .sg , 1 ,
1204
- DMA_TO_DEVICE );
1198
+ dma_unmap_single (uap -> dmatx .chan -> device -> dev ,
1199
+ uap -> dmatx .dma , uap -> dmatx .len ,
1200
+ DMA_TO_DEVICE );
1205
1201
uap -> dmatx .queued = false;
1206
1202
}
1207
1203
@@ -1212,8 +1208,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
1212
1208
if (uap -> using_rx_dma ) {
1213
1209
dmaengine_terminate_all (uap -> dmarx .chan );
1214
1210
/* Clean up the RX DMA */
1215
- pl011_sgbuf_free (uap -> dmarx .chan , & uap -> dmarx .sgbuf_a , DMA_FROM_DEVICE );
1216
- pl011_sgbuf_free (uap -> dmarx .chan , & uap -> dmarx .sgbuf_b , DMA_FROM_DEVICE );
1211
+ pl011_dmabuf_free (uap -> dmarx .chan , & uap -> dmarx .dbuf_a , DMA_FROM_DEVICE );
1212
+ pl011_dmabuf_free (uap -> dmarx .chan , & uap -> dmarx .dbuf_b , DMA_FROM_DEVICE );
1217
1213
if (uap -> dmarx .poll_rate )
1218
1214
del_timer_sync (& uap -> dmarx .timer );
1219
1215
uap -> using_rx_dma = false;
0 commit comments