|
5 | 5 | #include "mlx5_core.h" |
6 | 6 |
|
7 | 7 | enum { |
| 8 | + MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF = 0x80, |
| 9 | + MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP = 0x81, |
8 | 10 | MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG = 0x10a, |
| 11 | + |
| 12 | + MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF = 0x80, |
9 | 13 | }; |
10 | 14 |
|
11 | 15 | struct mlx5_ifc_configuration_item_type_class_global_bits { |
12 | 16 | u8 type_class[0x8]; |
13 | 17 | u8 parameter_index[0x18]; |
14 | 18 | }; |
15 | 19 |
|
| 20 | +struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits { |
| 21 | + u8 type_class[0x8]; |
| 22 | + u8 pf_index[0x6]; |
| 23 | + u8 pci_bus_index[0x8]; |
| 24 | + u8 parameter_index[0xa]; |
| 25 | +}; |
| 26 | + |
16 | 27 | union mlx5_ifc_config_item_type_auto_bits { |
17 | 28 | struct mlx5_ifc_configuration_item_type_class_global_bits |
18 | 29 | configuration_item_type_class_global; |
| 30 | + struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits |
| 31 | + configuration_item_type_class_per_host_pf; |
19 | 32 | u8 reserved_at_0[0x20]; |
20 | 33 | }; |
21 | 34 |
|
@@ -45,6 +58,45 @@ struct mlx5_ifc_mnvda_reg_bits { |
45 | 58 | u8 configuration_item_data[64][0x20]; |
46 | 59 | }; |
47 | 60 |
|
| 61 | +struct mlx5_ifc_nv_global_pci_conf_bits { |
| 62 | + u8 sriov_valid[0x1]; |
| 63 | + u8 reserved_at_1[0x10]; |
| 64 | + u8 per_pf_total_vf[0x1]; |
| 65 | + u8 reserved_at_12[0xe]; |
| 66 | + |
| 67 | + u8 sriov_en[0x1]; |
| 68 | + u8 reserved_at_21[0xf]; |
| 69 | + u8 total_vfs[0x10]; |
| 70 | + |
| 71 | + u8 reserved_at_40[0x20]; |
| 72 | +}; |
| 73 | + |
| 74 | +struct mlx5_ifc_nv_global_pci_cap_bits { |
| 75 | + u8 max_vfs_per_pf_valid[0x1]; |
| 76 | + u8 reserved_at_1[0x13]; |
| 77 | + u8 per_pf_total_vf_supported[0x1]; |
| 78 | + u8 reserved_at_15[0xb]; |
| 79 | + |
| 80 | + u8 sriov_support[0x1]; |
| 81 | + u8 reserved_at_21[0xf]; |
| 82 | + u8 max_vfs_per_pf[0x10]; |
| 83 | + |
| 84 | + u8 reserved_at_40[0x60]; |
| 85 | +}; |
| 86 | + |
| 87 | +struct mlx5_ifc_nv_pf_pci_conf_bits { |
| 88 | + u8 reserved_at_0[0x9]; |
| 89 | + u8 pf_total_vf_en[0x1]; |
| 90 | + u8 reserved_at_a[0x16]; |
| 91 | + |
| 92 | + u8 reserved_at_20[0x20]; |
| 93 | + |
| 94 | + u8 reserved_at_40[0x10]; |
| 95 | + u8 total_vf[0x10]; |
| 96 | + |
| 97 | + u8 reserved_at_60[0x20]; |
| 98 | +}; |
| 99 | + |
48 | 100 | struct mlx5_ifc_nv_sw_offload_conf_bits { |
49 | 101 | u8 ip_over_vxlan_port[0x10]; |
50 | 102 | u8 tunnel_ecn_copy_offload_disable[0x1]; |
@@ -216,7 +268,154 @@ mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 id, |
216 | 268 | return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda)); |
217 | 269 | } |
218 | 270 |
|
| 271 | +static int mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev, |
| 272 | + void *mnvda, size_t len) |
| 273 | +{ |
| 274 | + MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0); |
| 275 | + MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index, |
| 276 | + MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF); |
| 277 | + MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_conf); |
| 278 | + |
| 279 | + return mlx5_nv_param_read(dev, mnvda, len); |
| 280 | +} |
| 281 | + |
| 282 | +static int mlx5_nv_param_read_global_pci_cap(struct mlx5_core_dev *dev, |
| 283 | + void *mnvda, size_t len) |
| 284 | +{ |
| 285 | + MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0); |
| 286 | + MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index, |
| 287 | + MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP); |
| 288 | + MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_cap); |
| 289 | + |
| 290 | + return mlx5_nv_param_read(dev, mnvda, len); |
| 291 | +} |
| 292 | + |
| 293 | +static int mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev, |
| 294 | + void *mnvda, size_t len) |
| 295 | +{ |
| 296 | + MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, type_class, 3); |
| 297 | + MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, parameter_index, |
| 298 | + MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF); |
| 299 | + MLX5_SET_CFG_HDR_LEN(mnvda, nv_pf_pci_conf); |
| 300 | + |
| 301 | + return mlx5_nv_param_read(dev, mnvda, len); |
| 302 | +} |
| 303 | + |
| 304 | +static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id, |
| 305 | + struct devlink_param_gset_ctx *ctx) |
| 306 | +{ |
| 307 | + struct mlx5_core_dev *dev = devlink_priv(devlink); |
| 308 | + u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {}; |
| 309 | + bool sriov_en = false; |
| 310 | + void *data; |
| 311 | + int err; |
| 312 | + |
| 313 | + err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda)); |
| 314 | + if (err) |
| 315 | + return err; |
| 316 | + |
| 317 | + data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data); |
| 318 | + if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) { |
| 319 | + ctx->val.vbool = false; |
| 320 | + return 0; |
| 321 | + } |
| 322 | + |
| 323 | + memset(mnvda, 0, sizeof(mnvda)); |
| 324 | + err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda)); |
| 325 | + if (err) |
| 326 | + return err; |
| 327 | + |
| 328 | + data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data); |
| 329 | + sriov_en = MLX5_GET(nv_global_pci_conf, data, sriov_en); |
| 330 | + if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) { |
| 331 | + ctx->val.vbool = sriov_en; |
| 332 | + return 0; |
| 333 | + } |
| 334 | + |
| 335 | + /* SRIOV is per PF */ |
| 336 | + memset(mnvda, 0, sizeof(mnvda)); |
| 337 | + err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda)); |
| 338 | + if (err) |
| 339 | + return err; |
| 340 | + |
| 341 | + data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data); |
| 342 | + ctx->val.vbool = sriov_en && |
| 343 | + MLX5_GET(nv_pf_pci_conf, data, pf_total_vf_en); |
| 344 | + return 0; |
| 345 | +} |
| 346 | + |
| 347 | +static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id, |
| 348 | + struct devlink_param_gset_ctx *ctx, |
| 349 | + struct netlink_ext_ack *extack) |
| 350 | +{ |
| 351 | + struct mlx5_core_dev *dev = devlink_priv(devlink); |
| 352 | + u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {}; |
| 353 | + bool per_pf_support; |
| 354 | + void *cap, *data; |
| 355 | + int err; |
| 356 | + |
| 357 | + err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda)); |
| 358 | + if (err) { |
| 359 | + NL_SET_ERR_MSG_MOD(extack, |
| 360 | + "Failed to read global PCI capability"); |
| 361 | + return err; |
| 362 | + } |
| 363 | + |
| 364 | + cap = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data); |
| 365 | + per_pf_support = MLX5_GET(nv_global_pci_cap, cap, |
| 366 | + per_pf_total_vf_supported); |
| 367 | + |
| 368 | + if (!MLX5_GET(nv_global_pci_cap, cap, sriov_support)) { |
| 369 | + NL_SET_ERR_MSG_MOD(extack, |
| 370 | + "SRIOV is not supported on this device"); |
| 371 | + return -EOPNOTSUPP; |
| 372 | + } |
| 373 | + |
| 374 | + if (!per_pf_support) { |
| 375 | + /* We don't allow global SRIOV setting on per PF devlink */ |
| 376 | + NL_SET_ERR_MSG_MOD(extack, |
| 377 | + "SRIOV is not per PF on this device"); |
| 378 | + return -EOPNOTSUPP; |
| 379 | + } |
| 380 | + |
| 381 | + memset(mnvda, 0, sizeof(mnvda)); |
| 382 | + err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda)); |
| 383 | + if (err) { |
| 384 | + NL_SET_ERR_MSG_MOD(extack, |
| 385 | + "Unable to read global PCI configuration"); |
| 386 | + return err; |
| 387 | + } |
| 388 | + |
| 389 | + data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data); |
| 390 | + |
| 391 | + /* setup per PF sriov mode */ |
| 392 | + MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1); |
| 393 | + MLX5_SET(nv_global_pci_conf, data, sriov_en, 1); |
| 394 | + MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1); |
| 395 | + |
| 396 | + err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda)); |
| 397 | + if (err) { |
| 398 | + NL_SET_ERR_MSG_MOD(extack, |
| 399 | + "Unable to write global PCI configuration"); |
| 400 | + return err; |
| 401 | + } |
| 402 | + |
| 403 | + /* enable/disable sriov on this PF */ |
| 404 | + memset(mnvda, 0, sizeof(mnvda)); |
| 405 | + err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda)); |
| 406 | + if (err) { |
| 407 | + NL_SET_ERR_MSG_MOD(extack, |
| 408 | + "Unable to read per host PF configuration"); |
| 409 | + return err; |
| 410 | + } |
| 411 | + MLX5_SET(nv_pf_pci_conf, data, pf_total_vf_en, ctx->val.vbool); |
| 412 | + return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda)); |
| 413 | +} |
| 414 | + |
219 | 415 | static const struct devlink_param mlx5_nv_param_devlink_params[] = { |
| 416 | + DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT), |
| 417 | + mlx5_devlink_enable_sriov_get, |
| 418 | + mlx5_devlink_enable_sriov_set, NULL), |
220 | 419 | DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE, |
221 | 420 | "cqe_compress_type", DEVLINK_PARAM_TYPE_STRING, |
222 | 421 | BIT(DEVLINK_PARAM_CMODE_PERMANENT), |
|
0 commit comments