@@ -479,13 +479,20 @@ static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
479
479
if (sa -> sa_family != sb -> sa_family )
480
480
return sa -> sa_family - sb -> sa_family ;
481
481
482
- if (sa -> sa_family == AF_INET )
483
- return memcmp ((char * )& ((struct sockaddr_in * )sa )-> sin_addr ,
484
- (char * )& ((struct sockaddr_in * )sb )-> sin_addr ,
482
+ if (sa -> sa_family == AF_INET &&
483
+ __builtin_object_size (sa , 0 ) >= sizeof (struct sockaddr_in )) {
484
+ return memcmp (& ((struct sockaddr_in * )sa )-> sin_addr ,
485
+ & ((struct sockaddr_in * )sb )-> sin_addr ,
485
486
sizeof (((struct sockaddr_in * )sa )-> sin_addr ));
487
+ }
488
+
489
+ if (sa -> sa_family == AF_INET6 &&
490
+ __builtin_object_size (sa , 0 ) >= sizeof (struct sockaddr_in6 )) {
491
+ return ipv6_addr_cmp (& ((struct sockaddr_in6 * )sa )-> sin6_addr ,
492
+ & ((struct sockaddr_in6 * )sb )-> sin6_addr );
493
+ }
486
494
487
- return ipv6_addr_cmp (& ((struct sockaddr_in6 * )sa )-> sin6_addr ,
488
- & ((struct sockaddr_in6 * )sb )-> sin6_addr );
495
+ return -1 ;
489
496
}
490
497
491
498
static int cma_add_id_to_tree (struct rdma_id_private * node_id_priv )
@@ -2819,38 +2826,40 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
2819
2826
}
2820
2827
EXPORT_SYMBOL (rdma_set_min_rnr_timer );
2821
2828
2822
- static void route_set_path_rec_inbound (struct cma_work * work ,
2823
- struct sa_path_rec * path_rec )
2829
+ static int route_set_path_rec_inbound (struct cma_work * work ,
2830
+ struct sa_path_rec * path_rec )
2824
2831
{
2825
2832
struct rdma_route * route = & work -> id -> id .route ;
2826
2833
2827
2834
if (!route -> path_rec_inbound ) {
2828
2835
route -> path_rec_inbound =
2829
2836
kzalloc (sizeof (* route -> path_rec_inbound ), GFP_KERNEL );
2830
2837
if (!route -> path_rec_inbound )
2831
- return ;
2838
+ return - ENOMEM ;
2832
2839
}
2833
2840
2834
2841
* route -> path_rec_inbound = * path_rec ;
2842
+ return 0 ;
2835
2843
}
2836
2844
2837
- static void route_set_path_rec_outbound (struct cma_work * work ,
2838
- struct sa_path_rec * path_rec )
2845
+ static int route_set_path_rec_outbound (struct cma_work * work ,
2846
+ struct sa_path_rec * path_rec )
2839
2847
{
2840
2848
struct rdma_route * route = & work -> id -> id .route ;
2841
2849
2842
2850
if (!route -> path_rec_outbound ) {
2843
2851
route -> path_rec_outbound =
2844
2852
kzalloc (sizeof (* route -> path_rec_outbound ), GFP_KERNEL );
2845
2853
if (!route -> path_rec_outbound )
2846
- return ;
2854
+ return - ENOMEM ;
2847
2855
}
2848
2856
2849
2857
* route -> path_rec_outbound = * path_rec ;
2858
+ return 0 ;
2850
2859
}
2851
2860
2852
2861
static void cma_query_handler (int status , struct sa_path_rec * path_rec ,
2853
- int num_prs , void * context )
2862
+ unsigned int num_prs , void * context )
2854
2863
{
2855
2864
struct cma_work * work = context ;
2856
2865
struct rdma_route * route ;
@@ -2865,13 +2874,15 @@ static void cma_query_handler(int status, struct sa_path_rec *path_rec,
2865
2874
if (!path_rec [i ].flags || (path_rec [i ].flags & IB_PATH_GMP ))
2866
2875
* route -> path_rec = path_rec [i ];
2867
2876
else if (path_rec [i ].flags & IB_PATH_INBOUND )
2868
- route_set_path_rec_inbound (work , & path_rec [i ]);
2877
+ status = route_set_path_rec_inbound (work , & path_rec [i ]);
2869
2878
else if (path_rec [i ].flags & IB_PATH_OUTBOUND )
2870
- route_set_path_rec_outbound (work , & path_rec [i ]);
2871
- }
2872
- if (!route -> path_rec ) {
2873
- status = - EINVAL ;
2874
- goto fail ;
2879
+ status = route_set_path_rec_outbound (work ,
2880
+ & path_rec [i ]);
2881
+ else
2882
+ status = - EINVAL ;
2883
+
2884
+ if (status )
2885
+ goto fail ;
2875
2886
}
2876
2887
2877
2888
route -> num_pri_alt_paths = 1 ;
@@ -3541,121 +3552,6 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
3541
3552
return ret ;
3542
3553
}
3543
3554
3544
- static int cma_bind_addr (struct rdma_cm_id * id , struct sockaddr * src_addr ,
3545
- const struct sockaddr * dst_addr )
3546
- {
3547
- struct sockaddr_storage zero_sock = {};
3548
-
3549
- if (src_addr && src_addr -> sa_family )
3550
- return rdma_bind_addr (id , src_addr );
3551
-
3552
- /*
3553
- * When the src_addr is not specified, automatically supply an any addr
3554
- */
3555
- zero_sock .ss_family = dst_addr -> sa_family ;
3556
- if (IS_ENABLED (CONFIG_IPV6 ) && dst_addr -> sa_family == AF_INET6 ) {
3557
- struct sockaddr_in6 * src_addr6 =
3558
- (struct sockaddr_in6 * )& zero_sock ;
3559
- struct sockaddr_in6 * dst_addr6 =
3560
- (struct sockaddr_in6 * )dst_addr ;
3561
-
3562
- src_addr6 -> sin6_scope_id = dst_addr6 -> sin6_scope_id ;
3563
- if (ipv6_addr_type (& dst_addr6 -> sin6_addr ) & IPV6_ADDR_LINKLOCAL )
3564
- id -> route .addr .dev_addr .bound_dev_if =
3565
- dst_addr6 -> sin6_scope_id ;
3566
- } else if (dst_addr -> sa_family == AF_IB ) {
3567
- ((struct sockaddr_ib * )& zero_sock )-> sib_pkey =
3568
- ((struct sockaddr_ib * )dst_addr )-> sib_pkey ;
3569
- }
3570
- return rdma_bind_addr (id , (struct sockaddr * )& zero_sock );
3571
- }
3572
-
3573
- /*
3574
- * If required, resolve the source address for bind and leave the id_priv in
3575
- * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
3576
- * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
3577
- * ignored.
3578
- */
3579
- static int resolve_prepare_src (struct rdma_id_private * id_priv ,
3580
- struct sockaddr * src_addr ,
3581
- const struct sockaddr * dst_addr )
3582
- {
3583
- int ret ;
3584
-
3585
- memcpy (cma_dst_addr (id_priv ), dst_addr , rdma_addr_size (dst_addr ));
3586
- if (!cma_comp_exch (id_priv , RDMA_CM_ADDR_BOUND , RDMA_CM_ADDR_QUERY )) {
3587
- /* For a well behaved ULP state will be RDMA_CM_IDLE */
3588
- ret = cma_bind_addr (& id_priv -> id , src_addr , dst_addr );
3589
- if (ret )
3590
- goto err_dst ;
3591
- if (WARN_ON (!cma_comp_exch (id_priv , RDMA_CM_ADDR_BOUND ,
3592
- RDMA_CM_ADDR_QUERY ))) {
3593
- ret = - EINVAL ;
3594
- goto err_dst ;
3595
- }
3596
- }
3597
-
3598
- if (cma_family (id_priv ) != dst_addr -> sa_family ) {
3599
- ret = - EINVAL ;
3600
- goto err_state ;
3601
- }
3602
- return 0 ;
3603
-
3604
- err_state :
3605
- cma_comp_exch (id_priv , RDMA_CM_ADDR_QUERY , RDMA_CM_ADDR_BOUND );
3606
- err_dst :
3607
- memset (cma_dst_addr (id_priv ), 0 , rdma_addr_size (dst_addr ));
3608
- return ret ;
3609
- }
3610
-
3611
- int rdma_resolve_addr (struct rdma_cm_id * id , struct sockaddr * src_addr ,
3612
- const struct sockaddr * dst_addr , unsigned long timeout_ms )
3613
- {
3614
- struct rdma_id_private * id_priv =
3615
- container_of (id , struct rdma_id_private , id );
3616
- int ret ;
3617
-
3618
- ret = resolve_prepare_src (id_priv , src_addr , dst_addr );
3619
- if (ret )
3620
- return ret ;
3621
-
3622
- if (cma_any_addr (dst_addr )) {
3623
- ret = cma_resolve_loopback (id_priv );
3624
- } else {
3625
- if (dst_addr -> sa_family == AF_IB ) {
3626
- ret = cma_resolve_ib_addr (id_priv );
3627
- } else {
3628
- /*
3629
- * The FSM can return back to RDMA_CM_ADDR_BOUND after
3630
- * rdma_resolve_ip() is called, eg through the error
3631
- * path in addr_handler(). If this happens the existing
3632
- * request must be canceled before issuing a new one.
3633
- * Since canceling a request is a bit slow and this
3634
- * oddball path is rare, keep track once a request has
3635
- * been issued. The track turns out to be a permanent
3636
- * state since this is the only cancel as it is
3637
- * immediately before rdma_resolve_ip().
3638
- */
3639
- if (id_priv -> used_resolve_ip )
3640
- rdma_addr_cancel (& id -> route .addr .dev_addr );
3641
- else
3642
- id_priv -> used_resolve_ip = 1 ;
3643
- ret = rdma_resolve_ip (cma_src_addr (id_priv ), dst_addr ,
3644
- & id -> route .addr .dev_addr ,
3645
- timeout_ms , addr_handler ,
3646
- false, id_priv );
3647
- }
3648
- }
3649
- if (ret )
3650
- goto err ;
3651
-
3652
- return 0 ;
3653
- err :
3654
- cma_comp_exch (id_priv , RDMA_CM_ADDR_QUERY , RDMA_CM_ADDR_BOUND );
3655
- return ret ;
3656
- }
3657
- EXPORT_SYMBOL (rdma_resolve_addr );
3658
-
3659
3555
int rdma_set_reuseaddr (struct rdma_cm_id * id , int reuse )
3660
3556
{
3661
3557
struct rdma_id_private * id_priv ;
@@ -4058,27 +3954,26 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
4058
3954
}
4059
3955
EXPORT_SYMBOL (rdma_listen );
4060
3956
4061
- int rdma_bind_addr (struct rdma_cm_id * id , struct sockaddr * addr )
3957
+ static int rdma_bind_addr_dst (struct rdma_id_private * id_priv ,
3958
+ struct sockaddr * addr , const struct sockaddr * daddr )
4062
3959
{
4063
- struct rdma_id_private * id_priv ;
3960
+ struct sockaddr * id_daddr ;
4064
3961
int ret ;
4065
- struct sockaddr * daddr ;
4066
3962
4067
3963
if (addr -> sa_family != AF_INET && addr -> sa_family != AF_INET6 &&
4068
3964
addr -> sa_family != AF_IB )
4069
3965
return - EAFNOSUPPORT ;
4070
3966
4071
- id_priv = container_of (id , struct rdma_id_private , id );
4072
3967
if (!cma_comp_exch (id_priv , RDMA_CM_IDLE , RDMA_CM_ADDR_BOUND ))
4073
3968
return - EINVAL ;
4074
3969
4075
- ret = cma_check_linklocal (& id -> route .addr .dev_addr , addr );
3970
+ ret = cma_check_linklocal (& id_priv -> id . route .addr .dev_addr , addr );
4076
3971
if (ret )
4077
3972
goto err1 ;
4078
3973
4079
3974
memcpy (cma_src_addr (id_priv ), addr , rdma_addr_size (addr ));
4080
3975
if (!cma_any_addr (addr )) {
4081
- ret = cma_translate_addr (addr , & id -> route .addr .dev_addr );
3976
+ ret = cma_translate_addr (addr , & id_priv -> id . route .addr .dev_addr );
4082
3977
if (ret )
4083
3978
goto err1 ;
4084
3979
@@ -4098,8 +3993,10 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
4098
3993
}
4099
3994
#endif
4100
3995
}
4101
- daddr = cma_dst_addr (id_priv );
4102
- daddr -> sa_family = addr -> sa_family ;
3996
+ id_daddr = cma_dst_addr (id_priv );
3997
+ if (daddr != id_daddr )
3998
+ memcpy (id_daddr , daddr , rdma_addr_size (addr ));
3999
+ id_daddr -> sa_family = addr -> sa_family ;
4103
4000
4104
4001
ret = cma_get_port (id_priv );
4105
4002
if (ret )
@@ -4115,6 +4012,127 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
4115
4012
cma_comp_exch (id_priv , RDMA_CM_ADDR_BOUND , RDMA_CM_IDLE );
4116
4013
return ret ;
4117
4014
}
4015
+
4016
+ static int cma_bind_addr (struct rdma_cm_id * id , struct sockaddr * src_addr ,
4017
+ const struct sockaddr * dst_addr )
4018
+ {
4019
+ struct rdma_id_private * id_priv =
4020
+ container_of (id , struct rdma_id_private , id );
4021
+ struct sockaddr_storage zero_sock = {};
4022
+
4023
+ if (src_addr && src_addr -> sa_family )
4024
+ return rdma_bind_addr_dst (id_priv , src_addr , dst_addr );
4025
+
4026
+ /*
4027
+ * When the src_addr is not specified, automatically supply an any addr
4028
+ */
4029
+ zero_sock .ss_family = dst_addr -> sa_family ;
4030
+ if (IS_ENABLED (CONFIG_IPV6 ) && dst_addr -> sa_family == AF_INET6 ) {
4031
+ struct sockaddr_in6 * src_addr6 =
4032
+ (struct sockaddr_in6 * )& zero_sock ;
4033
+ struct sockaddr_in6 * dst_addr6 =
4034
+ (struct sockaddr_in6 * )dst_addr ;
4035
+
4036
+ src_addr6 -> sin6_scope_id = dst_addr6 -> sin6_scope_id ;
4037
+ if (ipv6_addr_type (& dst_addr6 -> sin6_addr ) & IPV6_ADDR_LINKLOCAL )
4038
+ id -> route .addr .dev_addr .bound_dev_if =
4039
+ dst_addr6 -> sin6_scope_id ;
4040
+ } else if (dst_addr -> sa_family == AF_IB ) {
4041
+ ((struct sockaddr_ib * )& zero_sock )-> sib_pkey =
4042
+ ((struct sockaddr_ib * )dst_addr )-> sib_pkey ;
4043
+ }
4044
+ return rdma_bind_addr_dst (id_priv , (struct sockaddr * )& zero_sock , dst_addr );
4045
+ }
4046
+
4047
+ /*
4048
+ * If required, resolve the source address for bind and leave the id_priv in
4049
+ * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
4050
+ * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
4051
+ * ignored.
4052
+ */
4053
+ static int resolve_prepare_src (struct rdma_id_private * id_priv ,
4054
+ struct sockaddr * src_addr ,
4055
+ const struct sockaddr * dst_addr )
4056
+ {
4057
+ int ret ;
4058
+
4059
+ if (!cma_comp_exch (id_priv , RDMA_CM_ADDR_BOUND , RDMA_CM_ADDR_QUERY )) {
4060
+ /* For a well behaved ULP state will be RDMA_CM_IDLE */
4061
+ ret = cma_bind_addr (& id_priv -> id , src_addr , dst_addr );
4062
+ if (ret )
4063
+ return ret ;
4064
+ if (WARN_ON (!cma_comp_exch (id_priv , RDMA_CM_ADDR_BOUND ,
4065
+ RDMA_CM_ADDR_QUERY )))
4066
+ return - EINVAL ;
4067
+
4068
+ }
4069
+
4070
+ if (cma_family (id_priv ) != dst_addr -> sa_family ) {
4071
+ ret = - EINVAL ;
4072
+ goto err_state ;
4073
+ }
4074
+ return 0 ;
4075
+
4076
+ err_state :
4077
+ cma_comp_exch (id_priv , RDMA_CM_ADDR_QUERY , RDMA_CM_ADDR_BOUND );
4078
+ return ret ;
4079
+ }
4080
+
4081
+ int rdma_resolve_addr (struct rdma_cm_id * id , struct sockaddr * src_addr ,
4082
+ const struct sockaddr * dst_addr , unsigned long timeout_ms )
4083
+ {
4084
+ struct rdma_id_private * id_priv =
4085
+ container_of (id , struct rdma_id_private , id );
4086
+ int ret ;
4087
+
4088
+ ret = resolve_prepare_src (id_priv , src_addr , dst_addr );
4089
+ if (ret )
4090
+ return ret ;
4091
+
4092
+ if (cma_any_addr (dst_addr )) {
4093
+ ret = cma_resolve_loopback (id_priv );
4094
+ } else {
4095
+ if (dst_addr -> sa_family == AF_IB ) {
4096
+ ret = cma_resolve_ib_addr (id_priv );
4097
+ } else {
4098
+ /*
4099
+ * The FSM can return back to RDMA_CM_ADDR_BOUND after
4100
+ * rdma_resolve_ip() is called, eg through the error
4101
+ * path in addr_handler(). If this happens the existing
4102
+ * request must be canceled before issuing a new one.
4103
+ * Since canceling a request is a bit slow and this
4104
+ * oddball path is rare, keep track once a request has
4105
+ * been issued. The track turns out to be a permanent
4106
+ * state since this is the only cancel as it is
4107
+ * immediately before rdma_resolve_ip().
4108
+ */
4109
+ if (id_priv -> used_resolve_ip )
4110
+ rdma_addr_cancel (& id -> route .addr .dev_addr );
4111
+ else
4112
+ id_priv -> used_resolve_ip = 1 ;
4113
+ ret = rdma_resolve_ip (cma_src_addr (id_priv ), dst_addr ,
4114
+ & id -> route .addr .dev_addr ,
4115
+ timeout_ms , addr_handler ,
4116
+ false, id_priv );
4117
+ }
4118
+ }
4119
+ if (ret )
4120
+ goto err ;
4121
+
4122
+ return 0 ;
4123
+ err :
4124
+ cma_comp_exch (id_priv , RDMA_CM_ADDR_QUERY , RDMA_CM_ADDR_BOUND );
4125
+ return ret ;
4126
+ }
4127
+ EXPORT_SYMBOL (rdma_resolve_addr );
4128
+
4129
+ int rdma_bind_addr (struct rdma_cm_id * id , struct sockaddr * addr )
4130
+ {
4131
+ struct rdma_id_private * id_priv =
4132
+ container_of (id , struct rdma_id_private , id );
4133
+
4134
+ return rdma_bind_addr_dst (id_priv , addr , cma_dst_addr (id_priv ));
4135
+ }
4118
4136
EXPORT_SYMBOL (rdma_bind_addr );
4119
4137
4120
4138
static int cma_format_hdr (void * hdr , struct rdma_id_private * id_priv )
0 commit comments