@@ -205,6 +205,52 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
205205 mt76_dma_sync_idx (dev , q );
206206}
207207
208+ static int
209+ mt76_dma_add_rx_buf (struct mt76_dev * dev , struct mt76_queue * q ,
210+ struct mt76_queue_buf * buf , void * data )
211+ {
212+ struct mt76_desc * desc = & q -> desc [q -> head ];
213+ struct mt76_queue_entry * entry = & q -> entry [q -> head ];
214+ struct mt76_txwi_cache * txwi = NULL ;
215+ u32 buf1 = 0 , ctrl ;
216+ int idx = q -> head ;
217+ int rx_token ;
218+
219+ ctrl = FIELD_PREP (MT_DMA_CTL_SD_LEN0 , buf [0 ].len );
220+
221+ if ((q -> flags & MT_QFLAG_WED ) &&
222+ FIELD_GET (MT_QFLAG_WED_TYPE , q -> flags ) == MT76_WED_Q_RX ) {
223+ txwi = mt76_get_rxwi (dev );
224+ if (!txwi )
225+ return - ENOMEM ;
226+
227+ rx_token = mt76_rx_token_consume (dev , data , txwi , buf -> addr );
228+ if (rx_token < 0 ) {
229+ mt76_put_rxwi (dev , txwi );
230+ return - ENOMEM ;
231+ }
232+
233+ buf1 |= FIELD_PREP (MT_DMA_CTL_TOKEN , rx_token );
234+ ctrl |= MT_DMA_CTL_TO_HOST ;
235+ }
236+
237+ WRITE_ONCE (desc -> buf0 , cpu_to_le32 (buf -> addr ));
238+ WRITE_ONCE (desc -> buf1 , cpu_to_le32 (buf1 ));
239+ WRITE_ONCE (desc -> ctrl , cpu_to_le32 (ctrl ));
240+ WRITE_ONCE (desc -> info , 0 );
241+
242+ entry -> dma_addr [0 ] = buf -> addr ;
243+ entry -> dma_len [0 ] = buf -> len ;
244+ entry -> txwi = txwi ;
245+ entry -> buf = data ;
246+ entry -> wcid = 0xffff ;
247+ entry -> skip_buf1 = true;
248+ q -> head = (q -> head + 1 ) % q -> ndesc ;
249+ q -> queued ++ ;
250+
251+ return idx ;
252+ }
253+
208254static int
209255mt76_dma_add_buf (struct mt76_dev * dev , struct mt76_queue * q ,
210256 struct mt76_queue_buf * buf , int nbufs , u32 info ,
@@ -215,6 +261,11 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
215261 int i , idx = -1 ;
216262 u32 ctrl , next ;
217263
264+ if (txwi ) {
265+ q -> entry [q -> head ].txwi = DMA_DUMMY_DATA ;
266+ q -> entry [q -> head ].skip_buf0 = true;
267+ }
268+
218269 for (i = 0 ; i < nbufs ; i += 2 , buf += 2 ) {
219270 u32 buf0 = buf [0 ].addr , buf1 = 0 ;
220271
@@ -224,51 +275,28 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
224275 desc = & q -> desc [idx ];
225276 entry = & q -> entry [idx ];
226277
227- if ((q -> flags & MT_QFLAG_WED ) &&
228- FIELD_GET (MT_QFLAG_WED_TYPE , q -> flags ) == MT76_WED_Q_RX ) {
229- struct mt76_txwi_cache * t = txwi ;
230- int rx_token ;
231-
232- if (!t )
233- return - ENOMEM ;
234-
235- rx_token = mt76_rx_token_consume (dev , (void * )skb , t ,
236- buf [0 ].addr );
237- if (rx_token < 0 )
238- return - ENOMEM ;
239-
240- buf1 |= FIELD_PREP (MT_DMA_CTL_TOKEN , rx_token );
241- ctrl = FIELD_PREP (MT_DMA_CTL_SD_LEN0 , buf [0 ].len ) |
242- MT_DMA_CTL_TO_HOST ;
243- } else {
244- if (txwi ) {
245- q -> entry [next ].txwi = DMA_DUMMY_DATA ;
246- q -> entry [next ].skip_buf0 = true;
247- }
248-
249- if (buf [0 ].skip_unmap )
250- entry -> skip_buf0 = true;
251- entry -> skip_buf1 = i == nbufs - 1 ;
252-
253- entry -> dma_addr [0 ] = buf [0 ].addr ;
254- entry -> dma_len [0 ] = buf [0 ].len ;
255-
256- ctrl = FIELD_PREP (MT_DMA_CTL_SD_LEN0 , buf [0 ].len );
257- if (i < nbufs - 1 ) {
258- entry -> dma_addr [1 ] = buf [1 ].addr ;
259- entry -> dma_len [1 ] = buf [1 ].len ;
260- buf1 = buf [1 ].addr ;
261- ctrl |= FIELD_PREP (MT_DMA_CTL_SD_LEN1 , buf [1 ].len );
262- if (buf [1 ].skip_unmap )
263- entry -> skip_buf1 = true;
264- }
265-
266- if (i == nbufs - 1 )
267- ctrl |= MT_DMA_CTL_LAST_SEC0 ;
268- else if (i == nbufs - 2 )
269- ctrl |= MT_DMA_CTL_LAST_SEC1 ;
278+ if (buf [0 ].skip_unmap )
279+ entry -> skip_buf0 = true;
280+ entry -> skip_buf1 = i == nbufs - 1 ;
281+
282+ entry -> dma_addr [0 ] = buf [0 ].addr ;
283+ entry -> dma_len [0 ] = buf [0 ].len ;
284+
285+ ctrl = FIELD_PREP (MT_DMA_CTL_SD_LEN0 , buf [0 ].len );
286+ if (i < nbufs - 1 ) {
287+ entry -> dma_addr [1 ] = buf [1 ].addr ;
288+ entry -> dma_len [1 ] = buf [1 ].len ;
289+ buf1 = buf [1 ].addr ;
290+ ctrl |= FIELD_PREP (MT_DMA_CTL_SD_LEN1 , buf [1 ].len );
291+ if (buf [1 ].skip_unmap )
292+ entry -> skip_buf1 = true;
270293 }
271294
295+ if (i == nbufs - 1 )
296+ ctrl |= MT_DMA_CTL_LAST_SEC0 ;
297+ else if (i == nbufs - 2 )
298+ ctrl |= MT_DMA_CTL_LAST_SEC1 ;
299+
272300 WRITE_ONCE (desc -> buf0 , cpu_to_le32 (buf0 ));
273301 WRITE_ONCE (desc -> buf1 , cpu_to_le32 (buf1 ));
274302 WRITE_ONCE (desc -> info , cpu_to_le32 (info ));
@@ -581,17 +609,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
581609 spin_lock_bh (& q -> lock );
582610
583611 while (q -> queued < q -> ndesc - 1 ) {
584- struct mt76_txwi_cache * t = NULL ;
585612 struct mt76_queue_buf qbuf ;
586613 void * buf = NULL ;
587614
588- if ((q -> flags & MT_QFLAG_WED ) &&
589- FIELD_GET (MT_QFLAG_WED_TYPE , q -> flags ) == MT76_WED_Q_RX ) {
590- t = mt76_get_rxwi (dev );
591- if (!t )
592- break ;
593- }
594-
595615 buf = page_frag_alloc (rx_page , q -> buf_size , GFP_ATOMIC );
596616 if (!buf )
597617 break ;
@@ -605,7 +625,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
605625 qbuf .addr = addr + offset ;
606626 qbuf .len = len - offset ;
607627 qbuf .skip_unmap = false;
608- if (mt76_dma_add_buf (dev , q , & qbuf , 1 , 0 , buf , t ) < 0 ) {
628+ if (mt76_dma_add_rx_buf (dev , q , & qbuf , buf ) < 0 ) {
609629 dma_unmap_single (dev -> dma_dev , addr , len ,
610630 DMA_FROM_DEVICE );
611631 skb_free_frag (buf );
0 commit comments