@@ -604,7 +604,8 @@ void uct_rc_mlx5_release_desc(uct_recv_desc_t *self, void *desc)
604604ucs_status_t
605605uct_rc_mlx5_dp_ordering_ooo_init (uct_ib_mlx5_md_t * md ,
606606 uct_rc_mlx5_iface_common_t * iface ,
607- uct_ib_mlx5_dp_ordering_t dp_ordering_cap ,
607+ uct_ib_mlx5_dp_ordering_t dp_ordering_cap_devx ,
608+ int ddp_supported_dv ,
608609 uct_rc_mlx5_iface_common_config_t * config ,
609610 const char * tl_name )
610611{
@@ -613,11 +614,25 @@ uct_rc_mlx5_dp_ordering_ooo_init(uct_ib_mlx5_md_t *md,
613614 [UCT_IB_MLX5_DP_ORDERING_OOO_RW ] = "OOO_RW" ,
614615 [UCT_IB_MLX5_DP_ORDERING_OOO_ALL ] = "OOO_ALL"
615616 };
616- uct_ib_mlx5_dp_ordering_t max_dp_ordering = dp_ordering_cap ;
617+ uct_ib_mlx5_dp_ordering_t max_dp_ordering = dp_ordering_cap_devx ;
617618 uct_ib_mlx5_dp_ordering_t min_dp_ordering = UCT_IB_MLX5_DP_ORDERING_IBTA ;
618619 char ar_enable_str [16 ], ddp_enable_str [16 ];
619620 int force ;
620621
622+ if (!(md -> flags & UCT_IB_MLX5_MD_FLAG_DEVX )) {
623+ if ((config -> ddp_enable == UCS_YES ) && !ddp_supported_dv ) {
624+ ucs_error ("%s/%s: ddp is not supported for DV" ,
625+ uct_ib_device_name (& md -> super .dev ), tl_name );
626+ return UCS_ERR_INVALID_PARAM ;
627+ }
628+
629+ iface -> config .ddp_enabled_dv = (config -> ddp_enable != UCS_NO ) &&
630+ ddp_supported_dv ;
631+ iface -> config .dp_ordering_devx = UCT_IB_MLX5_DP_ORDERING_IBTA ;
632+ iface -> config .dp_ordering_force = 0 ;
633+ return UCS_OK ;
634+ }
635+
621636 /*
622637 * HCA has an mlxreg admin configuration to force enable adaptive routing
623638 * (AR) or not.
@@ -657,7 +672,7 @@ uct_rc_mlx5_dp_ordering_ooo_init(uct_ib_mlx5_md_t *md,
657672 * but can't force it
658673 */
659674 force = (min_dp_ordering > UCT_IB_MLX5_DP_ORDERING_IBTA ) ||
660- (max_dp_ordering < dp_ordering_cap );
675+ (max_dp_ordering < dp_ordering_cap_devx );
661676 if ((min_dp_ordering > max_dp_ordering ) ||
662677 (force && !(md -> flags & UCT_IB_MLX5_MD_FLAG_DP_ORDERING_FORCE ))) {
663678 ucs_config_sprintf_ternary_auto (ar_enable_str , sizeof (ar_enable_str ),
@@ -667,15 +682,16 @@ uct_rc_mlx5_dp_ordering_ooo_init(uct_ib_mlx5_md_t *md,
667682 ucs_error ("%s/%s: cannot set ar_enable=%s ddp_enable=%s cap=%s "
668683 "supp_force=%d (min=%s max=%s force=%d)" ,
669684 uct_ib_device_name (& md -> super .dev ), tl_name , ar_enable_str ,
670- ddp_enable_str , dp_ordering_names [dp_ordering_cap ],
685+ ddp_enable_str , dp_ordering_names [dp_ordering_cap_devx ],
671686 !!(md -> flags & UCT_IB_MLX5_MD_FLAG_DP_ORDERING_FORCE ),
672687 dp_ordering_names [min_dp_ordering ],
673688 dp_ordering_names [max_dp_ordering ], force );
674689 return UCS_ERR_INVALID_PARAM ;
675690 }
676691
677- iface -> config .dp_ordering = max_dp_ordering ;
692+ iface -> config .dp_ordering_devx = max_dp_ordering ;
678693 iface -> config .dp_ordering_force = force ;
694+ iface -> config .ddp_enabled_dv = 0 ;
679695 return UCS_OK ;
680696}
681697
@@ -1107,7 +1123,7 @@ void uct_rc_mlx5_common_fill_dv_qp_attr(uct_rc_mlx5_iface_common_t *iface,
11071123 }
11081124
11091125#ifdef HAVE_OOO_RECV_WRS
1110- if (iface -> config .dp_ordering == UCT_IB_MLX5_DP_ORDERING_OOO_ALL ) {
1126+ if (iface -> config .ddp_enabled_dv ) {
11111127 dv_attr -> create_flags |= MLX5DV_QP_CREATE_OOO_DP ;
11121128 dv_attr -> comp_mask |= MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS ;
11131129 }
@@ -1293,9 +1309,9 @@ void uct_ib_mlx5_devx_set_qpc_dp_ordering(uct_ib_mlx5_md_t *md, void *qpc,
12931309 uct_rc_mlx5_iface_common_t * iface )
12941310{
12951311 UCT_IB_MLX5DV_SET (qpc , qpc , dp_ordering_0 ,
1296- UCS_BIT_GET (iface -> config .dp_ordering , 0 ));
1312+ UCS_BIT_GET (iface -> config .dp_ordering_devx , 0 ));
12971313 UCT_IB_MLX5DV_SET (qpc , qpc , dp_ordering_1 ,
1298- UCS_BIT_GET (iface -> config .dp_ordering , 1 ));
1314+ UCS_BIT_GET (iface -> config .dp_ordering_devx , 1 ));
12991315 UCT_IB_MLX5DV_SET (qpc , qpc , dp_ordering_force ,
13001316 iface -> config .dp_ordering_force );
13011317}
0 commit comments