Skip to content

Commit d70a0d8

Browse files
ParthibanI17164kuba-moo
authored andcommitted
net: ethernet: oa_tc6: implement receive path to receive rx ethernet frames
SPI rx data buffer can contain one or more receive data chunks. A receive data chunk consists a 64 bytes receive data chunk payload followed a 4 bytes data footer at the end. The data footer contains the information needed to determine the validity and location of the receive frame data within the receive data chunk payload and the host can use these information to generate ethernet frame. Initially the receive chunks available will be updated from the buffer status register and then it will be updated from the footer received on each spi data transfer. Tx data valid or empty chunks equal to the number receive chunks available will be transmitted in the MOSI to receive all the rx chunks. Additionally the receive data footer contains the below information as well. The received footer will be examined for the receive errors if any. Reviewed-by: Andrew Lunn <[email protected]> Signed-off-by: Parthiban Veerasooran <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 53fbde8 commit d70a0d8

File tree

1 file changed

+233
-8
lines changed

1 file changed

+233
-8
lines changed

drivers/net/ethernet/oa_tc6.c

Lines changed: 233 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,13 @@
2929
#define STATUS0_RESETC BIT(6) /* Reset Complete */
3030
#define STATUS0_HEADER_ERROR BIT(5)
3131
#define STATUS0_LOSS_OF_FRAME_ERROR BIT(4)
32+
#define STATUS0_RX_BUFFER_OVERFLOW_ERROR BIT(3)
3233
#define STATUS0_TX_PROTOCOL_ERROR BIT(0)
3334

3435
/* Buffer Status Register */
3536
#define OA_TC6_REG_BUFFER_STATUS 0x000B
3637
#define BUFFER_STATUS_TX_CREDITS_AVAILABLE GENMASK(15, 8)
38+
#define BUFFER_STATUS_RX_CHUNKS_AVAILABLE GENMASK(7, 0)
3739

3840
/* Interrupt Mask Register #0 */
3941
#define OA_TC6_REG_INT_MASK0 0x000C
@@ -67,6 +69,12 @@
6769
#define OA_TC6_DATA_FOOTER_EXTENDED_STS BIT(31)
6870
#define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD BIT(30)
6971
#define OA_TC6_DATA_FOOTER_CONFIG_SYNC BIT(29)
72+
#define OA_TC6_DATA_FOOTER_RX_CHUNKS GENMASK(28, 24)
73+
#define OA_TC6_DATA_FOOTER_DATA_VALID BIT(21)
74+
#define OA_TC6_DATA_FOOTER_START_VALID BIT(20)
75+
#define OA_TC6_DATA_FOOTER_START_WORD_OFFSET GENMASK(19, 16)
76+
#define OA_TC6_DATA_FOOTER_END_VALID BIT(14)
77+
#define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET GENMASK(13, 8)
7078
#define OA_TC6_DATA_FOOTER_TX_CREDITS GENMASK(5, 1)
7179

7280
/* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the
@@ -110,11 +118,14 @@ struct oa_tc6 {
110118
void *spi_data_rx_buf;
111119
struct sk_buff *ongoing_tx_skb;
112120
struct sk_buff *waiting_tx_skb;
121+
struct sk_buff *rx_skb;
113122
struct task_struct *spi_thread;
114123
wait_queue_head_t spi_wq;
115124
u16 tx_skb_offset;
116125
u16 spi_data_tx_buf_offset;
117126
u16 tx_credits;
127+
u8 rx_chunks_available;
128+
bool rx_buf_overflow;
118129
};
119130

120131
enum oa_tc6_header_type {
@@ -637,6 +648,15 @@ static int oa_tc6_enable_data_transfer(struct oa_tc6 *tc6)
637648
return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, value);
638649
}
639650

651+
static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 *tc6)
652+
{
653+
if (tc6->rx_skb) {
654+
tc6->netdev->stats.rx_dropped++;
655+
kfree_skb(tc6->rx_skb);
656+
tc6->rx_skb = NULL;
657+
}
658+
}
659+
640660
static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 *tc6)
641661
{
642662
if (tc6->ongoing_tx_skb) {
@@ -666,6 +686,13 @@ static int oa_tc6_process_extended_status(struct oa_tc6 *tc6)
666686
return ret;
667687
}
668688

689+
if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR, value)) {
690+
tc6->rx_buf_overflow = true;
691+
oa_tc6_cleanup_ongoing_rx_skb(tc6);
692+
net_err_ratelimited("%s: Receive buffer overflow error\n",
693+
tc6->netdev->name);
694+
return -EAGAIN;
695+
}
669696
if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR, value)) {
670697
netdev_err(tc6->netdev, "Transmit protocol error\n");
671698
return -ENODEV;
@@ -690,8 +717,11 @@ static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer)
690717
/* Process rx chunk footer for the following,
691718
* 1. tx credits
692719
* 2. errors if any from MAC-PHY
720+
* 3. receive chunks available
693721
*/
694722
tc6->tx_credits = FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS, footer);
723+
tc6->rx_chunks_available = FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS,
724+
footer);
695725

696726
if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS, footer)) {
697727
int ret = oa_tc6_process_extended_status(tc6);
@@ -717,6 +747,141 @@ static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer)
717747
return 0;
718748
}
719749

750+
static void oa_tc6_submit_rx_skb(struct oa_tc6 *tc6)
751+
{
752+
tc6->rx_skb->protocol = eth_type_trans(tc6->rx_skb, tc6->netdev);
753+
tc6->netdev->stats.rx_packets++;
754+
tc6->netdev->stats.rx_bytes += tc6->rx_skb->len;
755+
756+
netif_rx(tc6->rx_skb);
757+
758+
tc6->rx_skb = NULL;
759+
}
760+
761+
static void oa_tc6_update_rx_skb(struct oa_tc6 *tc6, u8 *payload, u8 length)
762+
{
763+
memcpy(skb_put(tc6->rx_skb, length), payload, length);
764+
}
765+
766+
static int oa_tc6_allocate_rx_skb(struct oa_tc6 *tc6)
767+
{
768+
tc6->rx_skb = netdev_alloc_skb_ip_align(tc6->netdev, tc6->netdev->mtu +
769+
ETH_HLEN + ETH_FCS_LEN);
770+
if (!tc6->rx_skb) {
771+
tc6->netdev->stats.rx_dropped++;
772+
return -ENOMEM;
773+
}
774+
775+
return 0;
776+
}
777+
778+
static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6 *tc6, u8 *payload,
779+
u16 size)
780+
{
781+
int ret;
782+
783+
ret = oa_tc6_allocate_rx_skb(tc6);
784+
if (ret)
785+
return ret;
786+
787+
oa_tc6_update_rx_skb(tc6, payload, size);
788+
789+
oa_tc6_submit_rx_skb(tc6);
790+
791+
return 0;
792+
}
793+
794+
static int oa_tc6_prcs_rx_frame_start(struct oa_tc6 *tc6, u8 *payload, u16 size)
795+
{
796+
int ret;
797+
798+
ret = oa_tc6_allocate_rx_skb(tc6);
799+
if (ret)
800+
return ret;
801+
802+
oa_tc6_update_rx_skb(tc6, payload, size);
803+
804+
return 0;
805+
}
806+
807+
static void oa_tc6_prcs_rx_frame_end(struct oa_tc6 *tc6, u8 *payload, u16 size)
808+
{
809+
oa_tc6_update_rx_skb(tc6, payload, size);
810+
811+
oa_tc6_submit_rx_skb(tc6);
812+
}
813+
814+
static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 *tc6, u8 *payload,
815+
u32 footer)
816+
{
817+
oa_tc6_update_rx_skb(tc6, payload, OA_TC6_CHUNK_PAYLOAD_SIZE);
818+
}
819+
820+
static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 *tc6, u8 *data,
821+
u32 footer)
822+
{
823+
u8 start_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET,
824+
footer) * sizeof(u32);
825+
u8 end_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET,
826+
footer);
827+
bool start_valid = FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID, footer);
828+
bool end_valid = FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID, footer);
829+
u16 size;
830+
831+
/* Restart the new rx frame after receiving rx buffer overflow error */
832+
if (start_valid && tc6->rx_buf_overflow)
833+
tc6->rx_buf_overflow = false;
834+
835+
if (tc6->rx_buf_overflow)
836+
return 0;
837+
838+
/* Process the chunk with complete rx frame */
839+
if (start_valid && end_valid && start_byte_offset < end_byte_offset) {
840+
size = end_byte_offset + 1 - start_byte_offset;
841+
return oa_tc6_prcs_complete_rx_frame(tc6,
842+
&data[start_byte_offset],
843+
size);
844+
}
845+
846+
/* Process the chunk with only rx frame start */
847+
if (start_valid && !end_valid) {
848+
size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
849+
return oa_tc6_prcs_rx_frame_start(tc6,
850+
&data[start_byte_offset],
851+
size);
852+
}
853+
854+
/* Process the chunk with only rx frame end */
855+
if (end_valid && !start_valid) {
856+
size = end_byte_offset + 1;
857+
oa_tc6_prcs_rx_frame_end(tc6, data, size);
858+
return 0;
859+
}
860+
861+
/* Process the chunk with previous rx frame end and next rx frame
862+
* start.
863+
*/
864+
if (start_valid && end_valid && start_byte_offset > end_byte_offset) {
865+
/* After rx buffer overflow error received, there might be a
866+
* possibility of getting an end valid of a previously
867+
* incomplete rx frame along with the new rx frame start valid.
868+
*/
869+
if (tc6->rx_skb) {
870+
size = end_byte_offset + 1;
871+
oa_tc6_prcs_rx_frame_end(tc6, data, size);
872+
}
873+
size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
874+
return oa_tc6_prcs_rx_frame_start(tc6,
875+
&data[start_byte_offset],
876+
size);
877+
}
878+
879+
/* Process the chunk with ongoing rx frame data */
880+
oa_tc6_prcs_ongoing_rx_frame(tc6, data, footer);
881+
882+
return 0;
883+
}
884+
720885
static u32 oa_tc6_get_rx_chunk_footer(struct oa_tc6 *tc6, u16 footer_offset)
721886
{
722887
u8 *rx_buf = tc6->spi_data_rx_buf;
@@ -742,6 +907,20 @@ static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6 *tc6, u16 length)
742907
ret = oa_tc6_process_rx_chunk_footer(tc6, footer);
743908
if (ret)
744909
return ret;
910+
911+
/* If there is a data valid chunks then process it for the
912+
* information needed to determine the validity and the location
913+
* of the receive frame data.
914+
*/
915+
if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID, footer)) {
916+
u8 *payload = tc6->spi_data_rx_buf + i *
917+
OA_TC6_CHUNK_SIZE;
918+
919+
ret = oa_tc6_prcs_rx_chunk_payload(tc6, payload,
920+
footer);
921+
if (ret)
922+
return ret;
923+
}
745924
}
746925

747926
return 0;
@@ -834,31 +1013,74 @@ static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
8341013
return used_tx_credits * OA_TC6_CHUNK_SIZE;
8351014
}
8361015

1016+
static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 *tc6,
1017+
u16 needed_empty_chunks)
1018+
{
1019+
__be32 header;
1020+
1021+
header = oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID,
1022+
OA_TC6_DATA_START_INVALID,
1023+
OA_TC6_DATA_END_INVALID, 0);
1024+
1025+
while (needed_empty_chunks--) {
1026+
__be32 *tx_buf = tc6->spi_data_tx_buf +
1027+
tc6->spi_data_tx_buf_offset;
1028+
1029+
*tx_buf = header;
1030+
tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
1031+
}
1032+
}
1033+
1034+
static u16 oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 *tc6, u16 len)
1035+
{
1036+
u16 tx_chunks = len / OA_TC6_CHUNK_SIZE;
1037+
u16 needed_empty_chunks;
1038+
1039+
/* If there are more chunks to receive than to transmit, we need to add
1040+
* enough empty tx chunks to allow the reception of the excess rx
1041+
* chunks.
1042+
*/
1043+
if (tx_chunks >= tc6->rx_chunks_available)
1044+
return len;
1045+
1046+
needed_empty_chunks = tc6->rx_chunks_available - tx_chunks;
1047+
1048+
oa_tc6_add_empty_chunks_to_spi_buf(tc6, needed_empty_chunks);
1049+
1050+
return needed_empty_chunks * OA_TC6_CHUNK_SIZE + len;
1051+
}
1052+
8371053
static int oa_tc6_try_spi_transfer(struct oa_tc6 *tc6)
8381054
{
8391055
int ret;
8401056

8411057
while (true) {
842-
u16 spi_length = 0;
1058+
u16 spi_len = 0;
8431059

8441060
tc6->spi_data_tx_buf_offset = 0;
8451061

8461062
if (tc6->ongoing_tx_skb || tc6->waiting_tx_skb)
847-
spi_length = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6);
1063+
spi_len = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6);
8481064

849-
if (spi_length == 0)
1065+
spi_len = oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6, spi_len);
1066+
1067+
if (spi_len == 0)
8501068
break;
8511069

852-
ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_length);
1070+
ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_len);
8531071
if (ret) {
8541072
netdev_err(tc6->netdev, "SPI data transfer failed: %d\n",
8551073
ret);
8561074
return ret;
8571075
}
8581076

859-
ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_length);
1077+
ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_len);
8601078
if (ret) {
1079+
if (ret == -EAGAIN)
1080+
continue;
1081+
8611082
oa_tc6_cleanup_ongoing_tx_skb(tc6);
1083+
oa_tc6_cleanup_ongoing_rx_skb(tc6);
8621084
netdev_err(tc6->netdev, "Device error: %d\n", ret);
8631085
return ret;
8641086
}
@@ -896,15 +1118,17 @@ static int oa_tc6_update_buffer_status_from_register(struct oa_tc6 *tc6)
8961118
u32 value;
8971119
int ret;
8981120

899-
/* Initially tx credits to be updated from the register as there is no
900-
* data transfer performed yet. Later it will be updated from the rx
901-
* footer.
1121+
/* Initially tx credits and rx chunks available to be updated from the
1122+
* register as there is no data transfer performed yet. Later they will
1123+
* be updated from the rx footer.
9021124
*/
9031125
ret = oa_tc6_read_register(tc6, OA_TC6_REG_BUFFER_STATUS, &value);
9041126
if (ret)
9051127
return ret;
9061128

9071129
tc6->tx_credits = FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE, value);
1130+
tc6->rx_chunks_available = FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE,
1131+
value);
9081132

9091133
return 0;
9101134
}
@@ -1054,6 +1278,7 @@ void oa_tc6_exit(struct oa_tc6 *tc6)
10541278
kthread_stop(tc6->spi_thread);
10551279
dev_kfree_skb_any(tc6->ongoing_tx_skb);
10561280
dev_kfree_skb_any(tc6->waiting_tx_skb);
1281+
dev_kfree_skb_any(tc6->rx_skb);
10571282
}
10581283
EXPORT_SYMBOL_GPL(oa_tc6_exit);
10591284

0 commit comments

Comments
 (0)