Skip to content

Commit 86d7476

Browse files
sgoutham-marvelldavem330
authored andcommitted
octeontx2-pf: TCP segmentation offload support
Adds TCP segmentation offload (TSO) support. First version of the silicon didn't support TSO offload, for this driver level TSO support is added. Signed-off-by: Sunil Goutham <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 85069e9 commit 86d7476

File tree

5 files changed

+273
-4
lines changed

5 files changed

+273
-4
lines changed

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
#include <linux/interrupt.h>
1212
#include <linux/pci.h>
13+
#include <net/tso.h>
1314

1415
#include "otx2_reg.h"
1516
#include "otx2_common.h"
@@ -522,6 +523,11 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
522523
if (err)
523524
return err;
524525

526+
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
527+
TSO_HEADER_SIZE);
528+
if (err)
529+
return err;
530+
525531
sq->sqe_base = sq->sqe->base;
526532
sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
527533
if (!sq->sg)
@@ -1211,6 +1217,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
12111217
pfvf->hw.sqb_size = rsp->sqb_size;
12121218
pfvf->hw.rx_chan_base = rsp->rx_chan_base;
12131219
pfvf->hw.tx_chan_base = rsp->tx_chan_base;
1220+
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
1221+
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
12141222
}
12151223

12161224
void mbox_handler_msix_offset(struct otx2_nic *pfvf,

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,11 @@ struct otx2_hw {
129129
u16 rq_skid;
130130
u8 cq_time_wait;
131131

132+
/* For TSO segmentation */
133+
u8 lso_tsov4_idx;
134+
u8 lso_tsov6_idx;
135+
u8 hw_tso;
136+
132137
/* MSI-X */
133138
u8 cint_cnt; /* CQ interrupt count */
134139
u16 npa_msixoff; /* Offset of NPA vectors */
@@ -189,11 +194,17 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
189194

190195
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
191196
{
197+
struct otx2_hw *hw = &pfvf->hw;
198+
192199
pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
193200
pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
194201
pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
195202

203+
hw->hw_tso = true;
204+
196205
if (is_96xx_A0(pfvf->pdev)) {
206+
hw->hw_tso = false;
207+
197208
/* Time based irq coalescing is not supported */
198209
pfvf->hw.cq_qcount_wait = 0x0;
199210

drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -616,6 +616,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
616616
for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
617617
sq = &qset->sq[qidx];
618618
qmem_free(pf->dev, sq->sqe);
619+
qmem_free(pf->dev, sq->tso_hdrs);
619620
kfree(sq->sg);
620621
kfree(sq->sqb_ptrs);
621622
}
@@ -986,8 +987,9 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
986987
struct otx2_snd_queue *sq;
987988
struct netdev_queue *txq;
988989

989-
/* Check for minimum packet length */
990-
if (skb->len <= ETH_HLEN) {
990+
/* Check for minimum and maximum packet length */
991+
if (skb->len <= ETH_HLEN ||
992+
(!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
991993
dev_kfree_skb(skb);
992994
return NETDEV_TX_OK;
993995
}
@@ -1243,11 +1245,12 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
12431245

12441246
netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
12451247
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
1246-
NETIF_F_SG);
1248+
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
12471249
netdev->features |= netdev->hw_features;
12481250

12491251
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
12501252

1253+
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
12511254
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
12521255

12531256
netdev->netdev_ops = &otx2_netdev_ops;

drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c

Lines changed: 246 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
#include <linux/etherdevice.h>
1212
#include <net/ip.h>
13+
#include <net/tso.h>
1314

1415
#include "otx2_reg.h"
1516
#include "otx2_common.h"
@@ -428,6 +429,38 @@ static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
428429
return true;
429430
}
430431

432+
/* Add SQE extended header subdescriptor */
433+
static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
434+
struct sk_buff *skb, int *offset)
435+
{
436+
struct nix_sqe_ext_s *ext;
437+
438+
ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
439+
ext->subdc = NIX_SUBDC_EXT;
440+
if (skb_shinfo(skb)->gso_size) {
441+
ext->lso = 1;
442+
ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
443+
ext->lso_mps = skb_shinfo(skb)->gso_size;
444+
445+
/* Only TSOv4 and TSOv6 GSO offloads are supported */
446+
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
447+
ext->lso_format = pfvf->hw.lso_tsov4_idx;
448+
449+
/* HW adds payload size to 'ip_hdr->tot_len' while
450+
* sending TSO segment, hence set payload length
451+
* in IP header of the packet to just header length.
452+
*/
453+
ip_hdr(skb)->tot_len =
454+
htons(ext->lso_sb - skb_network_offset(skb));
455+
} else {
456+
ext->lso_format = pfvf->hw.lso_tsov6_idx;
457+
ipv6_hdr(skb)->payload_len =
458+
htons(ext->lso_sb - skb_network_offset(skb));
459+
}
460+
}
461+
*offset += sizeof(*ext);
462+
}
463+
431464
/* Add SQE header subdescriptor structure */
432465
static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
433466
struct nix_sqe_hdr_s *sqe_hdr,
@@ -475,6 +508,209 @@ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
475508
}
476509
}
477510

511+
static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
512+
struct otx2_snd_queue *sq,
513+
struct sk_buff *skb, int sqe, int hdr_len)
514+
{
515+
int num_segs = skb_shinfo(skb)->nr_frags + 1;
516+
struct sg_list *sg = &sq->sg[sqe];
517+
u64 dma_addr;
518+
int seg, len;
519+
520+
sg->num_segs = 0;
521+
522+
/* Get payload length at skb->data */
523+
len = skb_headlen(skb) - hdr_len;
524+
525+
for (seg = 0; seg < num_segs; seg++) {
526+
/* Skip skb->data, if there is no payload */
527+
if (!seg && !len)
528+
continue;
529+
dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
530+
if (dma_mapping_error(pfvf->dev, dma_addr))
531+
goto unmap;
532+
533+
/* Save DMA mapping info for later unmapping */
534+
sg->dma_addr[sg->num_segs] = dma_addr;
535+
sg->size[sg->num_segs] = len;
536+
sg->num_segs++;
537+
}
538+
return 0;
539+
unmap:
540+
otx2_dma_unmap_skb_frags(pfvf, sg);
541+
return -EINVAL;
542+
}
543+
544+
static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
545+
struct sk_buff *skb, int seg,
546+
u64 seg_addr, int hdr_len, int sqe)
547+
{
548+
struct sg_list *sg = &sq->sg[sqe];
549+
const skb_frag_t *frag;
550+
int offset;
551+
552+
if (seg < 0)
553+
return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
554+
555+
frag = &skb_shinfo(skb)->frags[seg];
556+
offset = seg_addr - (u64)skb_frag_address(frag);
557+
if (skb_headlen(skb) - hdr_len)
558+
seg++;
559+
return sg->dma_addr[seg] + offset;
560+
}
561+
562+
static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
563+
struct sg_list *list, int *offset)
564+
{
565+
struct nix_sqe_sg_s *sg = NULL;
566+
u16 *sg_lens = NULL;
567+
u64 *iova = NULL;
568+
int seg;
569+
570+
/* Add SG descriptors with buffer addresses */
571+
for (seg = 0; seg < list->num_segs; seg++) {
572+
if ((seg % MAX_SEGS_PER_SG) == 0) {
573+
sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
574+
sg->ld_type = NIX_SEND_LDTYPE_LDD;
575+
sg->subdc = NIX_SUBDC_SG;
576+
sg->segs = 0;
577+
sg_lens = (void *)sg;
578+
iova = (void *)sg + sizeof(*sg);
579+
/* Next subdc always starts at a 16byte boundary.
580+
* So if sg->segs is whether 2 or 3, offset += 16bytes.
581+
*/
582+
if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
583+
*offset += sizeof(*sg) + (3 * sizeof(u64));
584+
else
585+
*offset += sizeof(*sg) + sizeof(u64);
586+
}
587+
sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
588+
*iova++ = list->dma_addr[seg];
589+
sg->segs++;
590+
}
591+
}
592+
593+
static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
594+
struct sk_buff *skb, u16 qidx)
595+
{
596+
struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
597+
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
598+
int tcp_data, seg_len, pkt_len, offset;
599+
struct nix_sqe_hdr_s *sqe_hdr;
600+
int first_sqe = sq->head;
601+
struct sg_list list;
602+
struct tso_t tso;
603+
604+
/* Map SKB's fragments to DMA.
605+
* It's done here to avoid mapping for every TSO segment's packet.
606+
*/
607+
if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
608+
dev_kfree_skb_any(skb);
609+
return;
610+
}
611+
612+
netdev_tx_sent_queue(txq, skb->len);
613+
614+
tso_start(skb, &tso);
615+
tcp_data = skb->len - hdr_len;
616+
while (tcp_data > 0) {
617+
char *hdr;
618+
619+
seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
620+
tcp_data -= seg_len;
621+
622+
/* Set SQE's SEND_HDR */
623+
memset(sq->sqe_base, 0, sq->sqe_size);
624+
sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
625+
otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
626+
offset = sizeof(*sqe_hdr);
627+
628+
/* Add TSO segment's pkt header */
629+
hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
630+
tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
631+
list.dma_addr[0] =
632+
sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
633+
list.size[0] = hdr_len;
634+
list.num_segs = 1;
635+
636+
/* Add TSO segment's payload data fragments */
637+
pkt_len = hdr_len;
638+
while (seg_len > 0) {
639+
int size;
640+
641+
size = min_t(int, tso.size, seg_len);
642+
643+
list.size[list.num_segs] = size;
644+
list.dma_addr[list.num_segs] =
645+
otx2_tso_frag_dma_addr(sq, skb,
646+
tso.next_frag_idx - 1,
647+
(u64)tso.data, hdr_len,
648+
first_sqe);
649+
list.num_segs++;
650+
pkt_len += size;
651+
seg_len -= size;
652+
tso_build_data(skb, &tso, size);
653+
}
654+
sqe_hdr->total = pkt_len;
655+
otx2_sqe_tso_add_sg(sq, &list, &offset);
656+
657+
/* DMA mappings and skb needs to be freed only after last
658+
* TSO segment is transmitted out. So set 'PNC' only for
659+
* last segment. Also point last segment's sqe_id to first
660+
* segment's SQE index where skb address and DMA mappings
661+
* are saved.
662+
*/
663+
if (!tcp_data) {
664+
sqe_hdr->pnc = 1;
665+
sqe_hdr->sqe_id = first_sqe;
666+
sq->sg[first_sqe].skb = (u64)skb;
667+
} else {
668+
sqe_hdr->pnc = 0;
669+
}
670+
671+
sqe_hdr->sizem1 = (offset / 16) - 1;
672+
673+
/* Flush SQE to HW */
674+
otx2_sqe_flush(sq, offset);
675+
}
676+
}
677+
678+
static bool is_hw_tso_supported(struct otx2_nic *pfvf,
679+
struct sk_buff *skb)
680+
{
681+
int payload_len, last_seg_size;
682+
683+
if (!pfvf->hw.hw_tso)
684+
return false;
685+
686+
/* HW has an issue due to which when the payload of the last LSO
687+
* segment is shorter than 16 bytes, some header fields may not
688+
* be correctly modified, hence don't offload such TSO segments.
689+
*/
690+
if (!is_96xx_B0(pfvf->pdev))
691+
return true;
692+
693+
payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
694+
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
695+
if (last_seg_size && last_seg_size < 16)
696+
return false;
697+
698+
return true;
699+
}
700+
701+
static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
702+
{
703+
if (!skb_shinfo(skb)->gso_size)
704+
return 1;
705+
706+
/* HW TSO */
707+
if (is_hw_tso_supported(pfvf, skb))
708+
return 1;
709+
710+
/* SW TSO */
711+
return skb_shinfo(skb)->gso_segs;
712+
}
713+
478714
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
479715
struct sk_buff *skb, u16 qidx)
480716
{
@@ -489,7 +725,8 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
489725
*/
490726
free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
491727

492-
if (!free_sqe || free_sqe < sq->sqe_thresh)
728+
if (free_sqe < sq->sqe_thresh ||
729+
free_sqe < otx2_get_sqe_count(pfvf, skb))
493730
return false;
494731

495732
num_segs = skb_shinfo(skb)->nr_frags + 1;
@@ -505,6 +742,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
505742
num_segs = skb_shinfo(skb)->nr_frags + 1;
506743
}
507744

745+
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
746+
otx2_sq_append_tso(pfvf, sq, skb, qidx);
747+
return true;
748+
}
749+
508750
/* Set SQE's SEND_HDR.
509751
* Do not clear the first 64bit as it contains constant info.
510752
*/
@@ -513,6 +755,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
513755
otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
514756
offset = sizeof(*sqe_hdr);
515757

758+
/* Add extended header if needed */
759+
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
760+
516761
/* Add SG subdesc with data frags */
517762
if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
518763
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);

drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#define OTX2_MIN_MTU 64
2727
#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN)
2828

29+
#define OTX2_MAX_GSO_SEGS 255
2930
#define OTX2_MAX_FRAGS_IN_SQE 9
3031

3132
/* Rx buffer size should be in multiples of 128bytes */
@@ -79,6 +80,7 @@ struct otx2_snd_queue {
7980
u64 *lmt_addr;
8081
void *sqe_base;
8182
struct qmem *sqe;
83+
struct qmem *tso_hdrs;
8284
struct sg_list *sg;
8385
u16 sqb_count;
8486
u64 *sqb_ptrs;

0 commit comments

Comments
 (0)