@@ -3210,13 +3210,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
3210
3210
{
3211
3211
struct ibmvnic_adapter * adapter = netdev_priv (netdev );
3212
3212
3213
- if (adapter -> priv_flags & IBMVNIC_USE_SERVER_MAXES ) {
3214
- ring -> rx_max_pending = adapter -> max_rx_add_entries_per_subcrq ;
3215
- ring -> tx_max_pending = adapter -> max_tx_entries_per_subcrq ;
3216
- } else {
3217
- ring -> rx_max_pending = IBMVNIC_MAX_QUEUE_SZ ;
3218
- ring -> tx_max_pending = IBMVNIC_MAX_QUEUE_SZ ;
3219
- }
3213
+ ring -> rx_max_pending = adapter -> max_rx_add_entries_per_subcrq ;
3214
+ ring -> tx_max_pending = adapter -> max_tx_entries_per_subcrq ;
3220
3215
ring -> rx_mini_max_pending = 0 ;
3221
3216
ring -> rx_jumbo_max_pending = 0 ;
3222
3217
ring -> rx_pending = adapter -> req_rx_add_entries_per_subcrq ;
@@ -3231,38 +3226,30 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
3231
3226
struct netlink_ext_ack * extack )
3232
3227
{
3233
3228
struct ibmvnic_adapter * adapter = netdev_priv (netdev );
3234
- int ret ;
3235
3229
3236
- ret = 0 ;
3230
+ if (ring -> rx_pending > adapter -> max_rx_add_entries_per_subcrq ||
3231
+ ring -> tx_pending > adapter -> max_tx_entries_per_subcrq ) {
3232
+ netdev_err (netdev , "Invalid request.\n" );
3233
+ netdev_err (netdev , "Max tx buffers = %llu\n" ,
3234
+ adapter -> max_rx_add_entries_per_subcrq );
3235
+ netdev_err (netdev , "Max rx buffers = %llu\n" ,
3236
+ adapter -> max_tx_entries_per_subcrq );
3237
+ return - EINVAL ;
3238
+ }
3239
+
3237
3240
adapter -> desired .rx_entries = ring -> rx_pending ;
3238
3241
adapter -> desired .tx_entries = ring -> tx_pending ;
3239
3242
3240
- ret = wait_for_reset (adapter );
3241
-
3242
- if (!ret &&
3243
- (adapter -> req_rx_add_entries_per_subcrq != ring -> rx_pending ||
3244
- adapter -> req_tx_entries_per_subcrq != ring -> tx_pending ))
3245
- netdev_info (netdev ,
3246
- "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n" ,
3247
- ring -> rx_pending , ring -> tx_pending ,
3248
- adapter -> req_rx_add_entries_per_subcrq ,
3249
- adapter -> req_tx_entries_per_subcrq );
3250
- return ret ;
3243
+ return wait_for_reset (adapter );
3251
3244
}
3252
3245
3253
3246
static void ibmvnic_get_channels (struct net_device * netdev ,
3254
3247
struct ethtool_channels * channels )
3255
3248
{
3256
3249
struct ibmvnic_adapter * adapter = netdev_priv (netdev );
3257
3250
3258
- if (adapter -> priv_flags & IBMVNIC_USE_SERVER_MAXES ) {
3259
- channels -> max_rx = adapter -> max_rx_queues ;
3260
- channels -> max_tx = adapter -> max_tx_queues ;
3261
- } else {
3262
- channels -> max_rx = IBMVNIC_MAX_QUEUES ;
3263
- channels -> max_tx = IBMVNIC_MAX_QUEUES ;
3264
- }
3265
-
3251
+ channels -> max_rx = adapter -> max_rx_queues ;
3252
+ channels -> max_tx = adapter -> max_tx_queues ;
3266
3253
channels -> max_other = 0 ;
3267
3254
channels -> max_combined = 0 ;
3268
3255
channels -> rx_count = adapter -> req_rx_queues ;
@@ -3275,66 +3262,44 @@ static int ibmvnic_set_channels(struct net_device *netdev,
3275
3262
struct ethtool_channels * channels )
3276
3263
{
3277
3264
struct ibmvnic_adapter * adapter = netdev_priv (netdev );
3278
- int ret ;
3279
3265
3280
- ret = 0 ;
3281
3266
adapter -> desired .rx_queues = channels -> rx_count ;
3282
3267
adapter -> desired .tx_queues = channels -> tx_count ;
3283
3268
3284
- ret = wait_for_reset (adapter );
3285
-
3286
- if (!ret &&
3287
- (adapter -> req_rx_queues != channels -> rx_count ||
3288
- adapter -> req_tx_queues != channels -> tx_count ))
3289
- netdev_info (netdev ,
3290
- "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n" ,
3291
- channels -> rx_count , channels -> tx_count ,
3292
- adapter -> req_rx_queues , adapter -> req_tx_queues );
3293
- return ret ;
3269
+ return wait_for_reset (adapter );
3294
3270
}
3295
3271
3296
3272
static void ibmvnic_get_strings (struct net_device * dev , u32 stringset , u8 * data )
3297
3273
{
3298
3274
struct ibmvnic_adapter * adapter = netdev_priv (dev );
3299
3275
int i ;
3300
3276
3301
- switch (stringset ) {
3302
- case ETH_SS_STATS :
3303
- for (i = 0 ; i < ARRAY_SIZE (ibmvnic_stats );
3304
- i ++ , data += ETH_GSTRING_LEN )
3305
- memcpy (data , ibmvnic_stats [i ].name , ETH_GSTRING_LEN );
3277
+ if (stringset != ETH_SS_STATS )
3278
+ return ;
3306
3279
3307
- for (i = 0 ; i < adapter -> req_tx_queues ; i ++ ) {
3308
- snprintf (data , ETH_GSTRING_LEN , "tx%d_packets" , i );
3309
- data += ETH_GSTRING_LEN ;
3280
+ for (i = 0 ; i < ARRAY_SIZE (ibmvnic_stats ); i ++ , data += ETH_GSTRING_LEN )
3281
+ memcpy (data , ibmvnic_stats [i ].name , ETH_GSTRING_LEN );
3310
3282
3311
- snprintf (data , ETH_GSTRING_LEN , "tx%d_bytes" , i );
3312
- data += ETH_GSTRING_LEN ;
3283
+ for (i = 0 ; i < adapter -> req_tx_queues ; i ++ ) {
3284
+ snprintf (data , ETH_GSTRING_LEN , "tx%d_packets" , i );
3285
+ data += ETH_GSTRING_LEN ;
3313
3286
3314
- snprintf (data , ETH_GSTRING_LEN ,
3315
- "tx%d_dropped_packets" , i );
3316
- data += ETH_GSTRING_LEN ;
3317
- }
3287
+ snprintf (data , ETH_GSTRING_LEN , "tx%d_bytes" , i );
3288
+ data += ETH_GSTRING_LEN ;
3318
3289
3319
- for ( i = 0 ; i < adapter -> req_rx_queues ; i ++ ) {
3320
- snprintf ( data , ETH_GSTRING_LEN , "rx%d_packets" , i ) ;
3321
- data += ETH_GSTRING_LEN ;
3290
+ snprintf ( data , ETH_GSTRING_LEN , "tx%d_dropped_packets" , i );
3291
+ data += ETH_GSTRING_LEN ;
3292
+ }
3322
3293
3323
- snprintf (data , ETH_GSTRING_LEN , "rx%d_bytes" , i );
3324
- data += ETH_GSTRING_LEN ;
3294
+ for (i = 0 ; i < adapter -> req_rx_queues ; i ++ ) {
3295
+ snprintf (data , ETH_GSTRING_LEN , "rx%d_packets" , i );
3296
+ data += ETH_GSTRING_LEN ;
3325
3297
3326
- snprintf (data , ETH_GSTRING_LEN , "rx%d_interrupts" , i );
3327
- data += ETH_GSTRING_LEN ;
3328
- }
3329
- break ;
3298
+ snprintf (data , ETH_GSTRING_LEN , "rx%d_bytes" , i );
3299
+ data += ETH_GSTRING_LEN ;
3330
3300
3331
- case ETH_SS_PRIV_FLAGS :
3332
- for (i = 0 ; i < ARRAY_SIZE (ibmvnic_priv_flags ); i ++ )
3333
- strcpy (data + i * ETH_GSTRING_LEN ,
3334
- ibmvnic_priv_flags [i ]);
3335
- break ;
3336
- default :
3337
- return ;
3301
+ snprintf (data , ETH_GSTRING_LEN , "rx%d_interrupts" , i );
3302
+ data += ETH_GSTRING_LEN ;
3338
3303
}
3339
3304
}
3340
3305
@@ -3347,8 +3312,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3347
3312
return ARRAY_SIZE (ibmvnic_stats ) +
3348
3313
adapter -> req_tx_queues * NUM_TX_STATS +
3349
3314
adapter -> req_rx_queues * NUM_RX_STATS ;
3350
- case ETH_SS_PRIV_FLAGS :
3351
- return ARRAY_SIZE (ibmvnic_priv_flags );
3352
3315
default :
3353
3316
return - EOPNOTSUPP ;
3354
3317
}
@@ -3401,26 +3364,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3401
3364
}
3402
3365
}
3403
3366
3404
- static u32 ibmvnic_get_priv_flags (struct net_device * netdev )
3405
- {
3406
- struct ibmvnic_adapter * adapter = netdev_priv (netdev );
3407
-
3408
- return adapter -> priv_flags ;
3409
- }
3410
-
3411
- static int ibmvnic_set_priv_flags (struct net_device * netdev , u32 flags )
3412
- {
3413
- struct ibmvnic_adapter * adapter = netdev_priv (netdev );
3414
- bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES );
3415
-
3416
- if (which_maxes )
3417
- adapter -> priv_flags |= IBMVNIC_USE_SERVER_MAXES ;
3418
- else
3419
- adapter -> priv_flags &= ~IBMVNIC_USE_SERVER_MAXES ;
3420
-
3421
- return 0 ;
3422
- }
3423
-
3424
3367
static const struct ethtool_ops ibmvnic_ethtool_ops = {
3425
3368
.get_drvinfo = ibmvnic_get_drvinfo ,
3426
3369
.get_msglevel = ibmvnic_get_msglevel ,
@@ -3434,8 +3377,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
3434
3377
.get_sset_count = ibmvnic_get_sset_count ,
3435
3378
.get_ethtool_stats = ibmvnic_get_ethtool_stats ,
3436
3379
.get_link_ksettings = ibmvnic_get_link_ksettings ,
3437
- .get_priv_flags = ibmvnic_get_priv_flags ,
3438
- .set_priv_flags = ibmvnic_set_priv_flags ,
3439
3380
};
3440
3381
3441
3382
/* Routines for managing CRQs/sCRQs */
0 commit comments