@@ -223,6 +223,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
223
223
int dsa_port = airoha_get_dsa_port (& dev );
224
224
struct airoha_foe_mac_info_common * l2 ;
225
225
u32 qdata , ports_pad , val ;
226
+ u8 smac_id = 0xf ;
226
227
227
228
memset (hwe , 0 , sizeof (* hwe ));
228
229
@@ -257,6 +258,8 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
257
258
*/
258
259
if (airhoa_is_lan_gdm_port (port ))
259
260
val |= AIROHA_FOE_IB2_FAST_PATH ;
261
+
262
+ smac_id = port -> id ;
260
263
}
261
264
262
265
if (is_multicast_ether_addr (data -> eth .h_dest ))
@@ -291,7 +294,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
291
294
hwe -> ipv4 .l2 .src_mac_lo =
292
295
get_unaligned_be16 (data -> eth .h_source + 4 );
293
296
} else {
294
- l2 -> src_mac_hi = FIELD_PREP (AIROHA_FOE_MAC_SMAC_ID , 0xf );
297
+ l2 -> src_mac_hi = FIELD_PREP (AIROHA_FOE_MAC_SMAC_ID , smac_id );
295
298
}
296
299
297
300
if (data -> vlan .num ) {
@@ -636,7 +639,6 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
636
639
u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP ;
637
640
struct airoha_foe_entry * hwe_p , hwe ;
638
641
struct airoha_flow_table_entry * f ;
639
- struct airoha_foe_mac_info * l2 ;
640
642
int type ;
641
643
642
644
hwe_p = airoha_ppe_foe_get_entry (ppe , hash );
@@ -653,18 +655,25 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
653
655
654
656
memcpy (& hwe , hwe_p , sizeof (* hwe_p ));
655
657
hwe .ib1 = (hwe .ib1 & mask ) | (e -> data .ib1 & ~mask );
656
- l2 = & hwe .bridge .l2 ;
657
- memcpy (l2 , & e -> data .bridge .l2 , sizeof (* l2 ));
658
658
659
659
type = FIELD_GET (AIROHA_FOE_IB1_BIND_PACKET_TYPE , hwe .ib1 );
660
- if (type == PPE_PKT_TYPE_IPV4_HNAPT )
661
- memcpy (& hwe .ipv4 .new_tuple , & hwe .ipv4 .orig_tuple ,
662
- sizeof (hwe .ipv4 .new_tuple ));
663
- else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T &&
664
- l2 -> common .etype == ETH_P_IP )
665
- l2 -> common .etype = ETH_P_IPV6 ;
666
-
667
- hwe .bridge .ib2 = e -> data .bridge .ib2 ;
660
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T ) {
661
+ memcpy (& hwe .ipv6 .l2 , & e -> data .bridge .l2 , sizeof (hwe .ipv6 .l2 ));
662
+ hwe .ipv6 .ib2 = e -> data .bridge .ib2 ;
663
+ /* setting smac_id to 0xf instruct the hw to keep original
664
+ * source mac address
665
+ */
666
+ hwe .ipv6 .l2 .src_mac_hi = FIELD_PREP (AIROHA_FOE_MAC_SMAC_ID ,
667
+ 0xf );
668
+ } else {
669
+ memcpy (& hwe .bridge .l2 , & e -> data .bridge .l2 ,
670
+ sizeof (hwe .bridge .l2 ));
671
+ hwe .bridge .ib2 = e -> data .bridge .ib2 ;
672
+ if (type == PPE_PKT_TYPE_IPV4_HNAPT )
673
+ memcpy (& hwe .ipv4 .new_tuple , & hwe .ipv4 .orig_tuple ,
674
+ sizeof (hwe .ipv4 .new_tuple ));
675
+ }
676
+
668
677
hwe .bridge .data = e -> data .bridge .data ;
669
678
airoha_ppe_foe_commit_entry (ppe , & hwe , hash );
670
679
@@ -1238,6 +1247,27 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
1238
1247
airoha_ppe_foe_insert_entry (ppe , skb , hash );
1239
1248
}
1240
1249
1250
+ void airoha_ppe_init_upd_mem (struct airoha_gdm_port * port )
1251
+ {
1252
+ struct airoha_eth * eth = port -> qdma -> eth ;
1253
+ struct net_device * dev = port -> dev ;
1254
+ const u8 * addr = dev -> dev_addr ;
1255
+ u32 val ;
1256
+
1257
+ val = (addr [2 ] << 24 ) | (addr [3 ] << 16 ) | (addr [4 ] << 8 ) | addr [5 ];
1258
+ airoha_fe_wr (eth , REG_UPDMEM_DATA (0 ), val );
1259
+ airoha_fe_wr (eth , REG_UPDMEM_CTRL (0 ),
1260
+ FIELD_PREP (PPE_UPDMEM_ADDR_MASK , port -> id ) |
1261
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK );
1262
+
1263
+ val = (addr [0 ] << 8 ) | addr [1 ];
1264
+ airoha_fe_wr (eth , REG_UPDMEM_DATA (0 ), val );
1265
+ airoha_fe_wr (eth , REG_UPDMEM_CTRL (0 ),
1266
+ FIELD_PREP (PPE_UPDMEM_ADDR_MASK , port -> id ) |
1267
+ FIELD_PREP (PPE_UPDMEM_OFFSET_MASK , 1 ) |
1268
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK );
1269
+ }
1270
+
1241
1271
int airoha_ppe_init (struct airoha_eth * eth )
1242
1272
{
1243
1273
struct airoha_ppe * ppe ;
0 commit comments