@@ -36,15 +36,15 @@ struct napi_gro_cb {
3636 /* This is non-zero if the packet cannot be merged with the new skb. */
3737 u16 flush ;
3838
39- /* Save the IP ID here and check when we get to the transport layer */
40- u16 flush_id ;
41-
4239 /* Number of segments aggregated. */
4340 u16 count ;
4441
4542 /* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */
4643 u16 proto ;
4744
45+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
46+ __wsum csum ;
47+
4848/* Used in napi_gro_cb::free */
4949#define NAPI_GRO_FREE 1
5050#define NAPI_GRO_FREE_STOLEN_HEAD 2
@@ -75,8 +75,8 @@ struct napi_gro_cb {
7575 /* Used in GRE, set in fou/gue_gro_receive */
7676 u8 is_fou :1 ;
7777
78- /* Used to determine if flush_id can be ignored */
79- u8 is_atomic :1 ;
78+ /* Used to determine if ipid_offset can be ignored */
79+ u8 ip_fixedid :1 ;
8080
8181 /* Number of gro_receive callbacks this packet already went through */
8282 u8 recursion_counter :4 ;
@@ -85,9 +85,6 @@ struct napi_gro_cb {
8585 u8 is_flist :1 ;
8686 );
8787
88- /* used to support CHECKSUM_COMPLETE for tunneling protocols */
89- __wsum csum ;
90-
9188 /* L3 offsets */
9289 union {
9390 struct {
@@ -181,12 +178,17 @@ static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
181178 return ptr ;
182179}
183180
181+ static inline int skb_gro_receive_network_offset (const struct sk_buff * skb )
182+ {
183+ return NAPI_GRO_CB (skb )-> network_offsets [NAPI_GRO_CB (skb )-> encap_mark ];
184+ }
185+
184186static inline void * skb_gro_network_header (const struct sk_buff * skb )
185187{
186188 if (skb_gro_may_pull (skb , skb_gro_offset (skb )))
187- return skb_gro_header_fast (skb , skb_network_offset (skb ));
189+ return skb_gro_header_fast (skb , skb_gro_receive_network_offset (skb ));
188190
189- return skb_network_header (skb );
191+ return skb -> data + skb_gro_receive_network_offset (skb );
190192}
191193
192194static inline __wsum inet_gro_compute_pseudo (const struct sk_buff * skb ,
@@ -437,6 +439,69 @@ static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
437439 skb_gro_len (skb ), proto , 0 ));
438440}
439441
442+ static inline int inet_gro_flush (const struct iphdr * iph , const struct iphdr * iph2 ,
443+ struct sk_buff * p , bool outer )
444+ {
445+ const u32 id = ntohl (* (__be32 * )& iph -> id );
446+ const u32 id2 = ntohl (* (__be32 * )& iph2 -> id );
447+ const u16 ipid_offset = (id >> 16 ) - (id2 >> 16 );
448+ const u16 count = NAPI_GRO_CB (p )-> count ;
449+ const u32 df = id & IP_DF ;
450+ int flush ;
451+
452+ /* All fields must match except length and checksum. */
453+ flush = (iph -> ttl ^ iph2 -> ttl ) | (iph -> tos ^ iph2 -> tos ) | (df ^ (id2 & IP_DF ));
454+
455+ if (flush | (outer && df ))
456+ return flush ;
457+
458+ /* When we receive our second frame we can make a decision on if we
459+ * continue this flow as an atomic flow with a fixed ID or if we use
460+ * an incrementing ID.
461+ */
462+ if (count == 1 && df && !ipid_offset )
463+ NAPI_GRO_CB (p )-> ip_fixedid = true;
464+
465+ return ipid_offset ^ (count * !NAPI_GRO_CB (p )-> ip_fixedid );
466+ }
467+
468+ static inline int ipv6_gro_flush (const struct ipv6hdr * iph , const struct ipv6hdr * iph2 )
469+ {
470+ /* <Version:4><Traffic_Class:8><Flow_Label:20> */
471+ __be32 first_word = * (__be32 * )iph ^ * (__be32 * )iph2 ;
472+
473+ /* Flush if Traffic Class fields are different. */
474+ return !!((first_word & htonl (0x0FF00000 )) |
475+ (__force __be32 )(iph -> hop_limit ^ iph2 -> hop_limit ));
476+ }
477+
478+ static inline int __gro_receive_network_flush (const void * th , const void * th2 ,
479+ struct sk_buff * p , const u16 diff ,
480+ bool outer )
481+ {
482+ const void * nh = th - diff ;
483+ const void * nh2 = th2 - diff ;
484+
485+ if (((struct iphdr * )nh )-> version == 6 )
486+ return ipv6_gro_flush (nh , nh2 );
487+ else
488+ return inet_gro_flush (nh , nh2 , p , outer );
489+ }
490+
491+ static inline int gro_receive_network_flush (const void * th , const void * th2 ,
492+ struct sk_buff * p )
493+ {
494+ const bool encap_mark = NAPI_GRO_CB (p )-> encap_mark ;
495+ int off = skb_transport_offset (p );
496+ int flush ;
497+
498+ flush = __gro_receive_network_flush (th , th2 , p , off - NAPI_GRO_CB (p )-> network_offset , encap_mark );
499+ if (encap_mark )
500+ flush |= __gro_receive_network_flush (th , th2 , p , off - NAPI_GRO_CB (p )-> inner_network_offset , false);
501+
502+ return flush ;
503+ }
504+
440505int skb_gro_receive (struct sk_buff * p , struct sk_buff * skb );
441506int skb_gro_receive_list (struct sk_buff * p , struct sk_buff * skb );
442507
0 commit comments