@@ -49,6 +49,7 @@ static struct kmem_cache *xfrm_state_cache __ro_after_init;
49
49
50
50
static DECLARE_WORK (xfrm_state_gc_work , xfrm_state_gc_task );
51
51
static HLIST_HEAD (xfrm_state_gc_list );
52
+ static HLIST_HEAD (xfrm_state_dev_gc_list );
52
53
53
54
static inline bool xfrm_state_hold_rcu (struct xfrm_state __rcu * x )
54
55
{
@@ -214,6 +215,7 @@ static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
214
215
static struct xfrm_state_afinfo __rcu * xfrm_state_afinfo [NPROTO ];
215
216
216
217
static DEFINE_SPINLOCK (xfrm_state_gc_lock );
218
+ static DEFINE_SPINLOCK (xfrm_state_dev_gc_lock );
217
219
218
220
int __xfrm_state_delete (struct xfrm_state * x );
219
221
@@ -683,6 +685,40 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
683
685
}
684
686
EXPORT_SYMBOL (xfrm_state_alloc );
685
687
688
+ #ifdef CONFIG_XFRM_OFFLOAD
689
+ void xfrm_dev_state_delete (struct xfrm_state * x )
690
+ {
691
+ struct xfrm_dev_offload * xso = & x -> xso ;
692
+ struct net_device * dev = READ_ONCE (xso -> dev );
693
+
694
+ if (dev ) {
695
+ dev -> xfrmdev_ops -> xdo_dev_state_delete (x );
696
+ spin_lock_bh (& xfrm_state_dev_gc_lock );
697
+ hlist_add_head (& x -> dev_gclist , & xfrm_state_dev_gc_list );
698
+ spin_unlock_bh (& xfrm_state_dev_gc_lock );
699
+ }
700
+ }
701
+
702
+ void xfrm_dev_state_free (struct xfrm_state * x )
703
+ {
704
+ struct xfrm_dev_offload * xso = & x -> xso ;
705
+ struct net_device * dev = READ_ONCE (xso -> dev );
706
+
707
+ if (dev && dev -> xfrmdev_ops ) {
708
+ spin_lock_bh (& xfrm_state_dev_gc_lock );
709
+ if (!hlist_unhashed (& x -> dev_gclist ))
710
+ hlist_del (& x -> dev_gclist );
711
+ spin_unlock_bh (& xfrm_state_dev_gc_lock );
712
+
713
+ if (dev -> xfrmdev_ops -> xdo_dev_state_free )
714
+ dev -> xfrmdev_ops -> xdo_dev_state_free (x );
715
+ WRITE_ONCE (xso -> dev , NULL );
716
+ xso -> type = XFRM_DEV_OFFLOAD_UNSPECIFIED ;
717
+ netdev_put (dev , & xso -> dev_tracker );
718
+ }
719
+ }
720
+ #endif
721
+
686
722
void __xfrm_state_destroy (struct xfrm_state * x , bool sync )
687
723
{
688
724
WARN_ON (x -> km .state != XFRM_STATE_DEAD );
@@ -848,6 +884,9 @@ EXPORT_SYMBOL(xfrm_state_flush);
848
884
849
885
int xfrm_dev_state_flush (struct net * net , struct net_device * dev , bool task_valid )
850
886
{
887
+ struct xfrm_state * x ;
888
+ struct hlist_node * tmp ;
889
+ struct xfrm_dev_offload * xso ;
851
890
int i , err = 0 , cnt = 0 ;
852
891
853
892
spin_lock_bh (& net -> xfrm .xfrm_state_lock );
@@ -857,8 +896,6 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
857
896
858
897
err = - ESRCH ;
859
898
for (i = 0 ; i <= net -> xfrm .state_hmask ; i ++ ) {
860
- struct xfrm_state * x ;
861
- struct xfrm_dev_offload * xso ;
862
899
restart :
863
900
hlist_for_each_entry (x , net -> xfrm .state_bydst + i , bydst ) {
864
901
xso = & x -> xso ;
@@ -868,6 +905,8 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
868
905
spin_unlock_bh (& net -> xfrm .xfrm_state_lock );
869
906
870
907
err = xfrm_state_delete (x );
908
+ xfrm_dev_state_free (x );
909
+
871
910
xfrm_audit_state_delete (x , err ? 0 : 1 ,
872
911
task_valid );
873
912
xfrm_state_put (x );
@@ -884,6 +923,24 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
884
923
885
924
out :
886
925
spin_unlock_bh (& net -> xfrm .xfrm_state_lock );
926
+
927
+ spin_lock_bh (& xfrm_state_dev_gc_lock );
928
+ restart_gc :
929
+ hlist_for_each_entry_safe (x , tmp , & xfrm_state_dev_gc_list , dev_gclist ) {
930
+ xso = & x -> xso ;
931
+
932
+ if (xso -> dev == dev ) {
933
+ spin_unlock_bh (& xfrm_state_dev_gc_lock );
934
+ xfrm_dev_state_free (x );
935
+ spin_lock_bh (& xfrm_state_dev_gc_lock );
936
+ goto restart_gc ;
937
+ }
938
+
939
+ }
940
+ spin_unlock_bh (& xfrm_state_dev_gc_lock );
941
+
942
+ xfrm_flush_gc ();
943
+
887
944
return err ;
888
945
}
889
946
EXPORT_SYMBOL (xfrm_dev_state_flush );
0 commit comments