Skip to content

Commit da70d18

Browse files
rogerqdavem330
authored andcommitted
net: ethernet: ti: am65-cpsw: Introduce multi queue Rx
am65-cpsw can support up to 8 queues at Rx. Use a macro AM65_CPSW_MAX_RX_QUEUES to indicate that. As there is only one DMA channel for RX traffic, the 8 queues come as 8 flows in that channel. By default, we will start with 1 flow as defined by the macro AM65_CPSW_DEFAULT_RX_CHN_FLOWS. User can change the number of flows by ethtool like so 'ethtool -L ethx rx <N>' All traffic will still come on flow 0. To get traffic on different flows the Classifiers will need to be set up. Signed-off-by: Roger Quadros <[email protected]> Reviewed-by: Simon Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 52fa3b6 commit da70d18

File tree

3 files changed

+269
-230
lines changed

3 files changed

+269
-230
lines changed

drivers/net/ethernet/ti/am65-cpsw-ethtool.c

Lines changed: 29 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -427,9 +427,9 @@ static void am65_cpsw_get_channels(struct net_device *ndev,
427427
{
428428
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
429429

430-
ch->max_rx = AM65_CPSW_MAX_RX_QUEUES;
431-
ch->max_tx = AM65_CPSW_MAX_TX_QUEUES;
432-
ch->rx_count = AM65_CPSW_MAX_RX_QUEUES;
430+
ch->max_rx = AM65_CPSW_MAX_QUEUES;
431+
ch->max_tx = AM65_CPSW_MAX_QUEUES;
432+
ch->rx_count = common->rx_ch_num_flows;
433433
ch->tx_count = common->tx_ch_num;
434434
}
435435

@@ -447,9 +447,8 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
447447
if (common->usage_count)
448448
return -EBUSY;
449449

450-
am65_cpsw_nuss_remove_tx_chns(common);
451-
452-
return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
450+
return am65_cpsw_nuss_update_tx_rx_chns(common, chs->tx_count,
451+
chs->rx_count);
453452
}
454453

455454
static void
@@ -913,80 +912,64 @@ static void am65_cpsw_get_mm_stats(struct net_device *ndev,
913912
s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD);
914913
}
915914

916-
static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
917-
struct kernel_ethtool_coalesce *kernel_coal,
918-
struct netlink_ext_ack *extack)
919-
{
920-
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
921-
struct am65_cpsw_tx_chn *tx_chn;
922-
923-
tx_chn = &common->tx_chns[0];
924-
925-
coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
926-
coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
927-
928-
return 0;
929-
}
930-
931915
static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
932916
struct ethtool_coalesce *coal)
933917
{
934918
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
919+
struct am65_cpsw_rx_flow *rx_flow;
935920
struct am65_cpsw_tx_chn *tx_chn;
936921

937-
if (queue >= AM65_CPSW_MAX_TX_QUEUES)
922+
if (queue >= AM65_CPSW_MAX_QUEUES)
938923
return -EINVAL;
939924

940925
tx_chn = &common->tx_chns[queue];
941-
942926
coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
943927

928+
rx_flow = &common->rx_chns.flows[queue];
929+
coal->rx_coalesce_usecs = rx_flow->rx_pace_timeout / 1000;
930+
944931
return 0;
945932
}
946933

947-
static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
934+
static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
948935
struct kernel_ethtool_coalesce *kernel_coal,
949936
struct netlink_ext_ack *extack)
950937
{
951-
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
952-
struct am65_cpsw_tx_chn *tx_chn;
953-
954-
tx_chn = &common->tx_chns[0];
955-
956-
if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
957-
return -EINVAL;
958-
959-
if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
960-
return -EINVAL;
961-
962-
common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
963-
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
964-
965-
return 0;
938+
return am65_cpsw_get_per_queue_coalesce(ndev, 0, coal);
966939
}
967940

968941
static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
969942
struct ethtool_coalesce *coal)
970943
{
971944
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
945+
struct am65_cpsw_rx_flow *rx_flow;
972946
struct am65_cpsw_tx_chn *tx_chn;
973947

974-
if (queue >= AM65_CPSW_MAX_TX_QUEUES)
948+
if (queue >= AM65_CPSW_MAX_QUEUES)
975949
return -EINVAL;
976950

977951
tx_chn = &common->tx_chns[queue];
978-
979-
if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) {
980-
dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n",
981-
queue);
982-
coal->tx_coalesce_usecs = 20;
983-
}
952+
if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
953+
return -EINVAL;
984954

985955
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
986956

957+
rx_flow = &common->rx_chns.flows[queue];
958+
if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
959+
return -EINVAL;
960+
961+
rx_flow->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
962+
987963
return 0;
988964
}
989965

966+
static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
967+
struct kernel_ethtool_coalesce *kernel_coal,
968+
struct netlink_ext_ack *extack)
969+
{
970+
return am65_cpsw_set_per_queue_coalesce(ndev, 0, coal);
971+
}
972+
990973
const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
991974
.begin = am65_cpsw_ethtool_op_begin,
992975
.complete = am65_cpsw_ethtool_op_complete,

0 commit comments

Comments
 (0)