@@ -246,14 +246,13 @@ static void alloc_nodes_vectors(unsigned int numvecs,
246
246
247
247
static int __irq_build_affinity_masks (unsigned int startvec ,
248
248
unsigned int numvecs ,
249
- unsigned int firstvec ,
250
249
cpumask_var_t * node_to_cpumask ,
251
250
const struct cpumask * cpu_mask ,
252
251
struct cpumask * nmsk ,
253
252
struct irq_affinity_desc * masks )
254
253
{
255
254
unsigned int i , n , nodes , cpus_per_vec , extra_vecs , done = 0 ;
256
- unsigned int last_affv = firstvec + numvecs ;
255
+ unsigned int last_affv = numvecs ;
257
256
unsigned int curvec = startvec ;
258
257
nodemask_t nodemsk = NODE_MASK_NONE ;
259
258
struct node_vectors * node_vectors ;
@@ -273,7 +272,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
273
272
cpumask_and (nmsk , cpu_mask , node_to_cpumask [n ]);
274
273
cpumask_or (& masks [curvec ].mask , & masks [curvec ].mask , nmsk );
275
274
if (++ curvec == last_affv )
276
- curvec = firstvec ;
275
+ curvec = 0 ;
277
276
}
278
277
return numvecs ;
279
278
}
@@ -321,7 +320,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
321
320
* may start anywhere
322
321
*/
323
322
if (curvec >= last_affv )
324
- curvec = firstvec ;
323
+ curvec = 0 ;
325
324
irq_spread_init_one (& masks [curvec ].mask , nmsk ,
326
325
cpus_per_vec );
327
326
}
@@ -336,11 +335,10 @@ static int __irq_build_affinity_masks(unsigned int startvec,
336
335
* 1) spread present CPU on these vectors
337
336
* 2) spread other possible CPUs on these vectors
338
337
*/
339
- static int irq_build_affinity_masks (unsigned int startvec , unsigned int numvecs ,
338
+ static int irq_build_affinity_masks (unsigned int numvecs ,
340
339
struct irq_affinity_desc * masks )
341
340
{
342
- unsigned int curvec = startvec , nr_present = 0 , nr_others = 0 ;
343
- unsigned int firstvec = startvec ;
341
+ unsigned int curvec = 0 , nr_present = 0 , nr_others = 0 ;
344
342
cpumask_var_t * node_to_cpumask ;
345
343
cpumask_var_t nmsk , npresmsk ;
346
344
int ret = - ENOMEM ;
@@ -360,9 +358,8 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
360
358
build_node_to_cpumask (node_to_cpumask );
361
359
362
360
/* Spread on present CPUs starting from affd->pre_vectors */
363
- ret = __irq_build_affinity_masks (curvec , numvecs , firstvec ,
364
- node_to_cpumask , cpu_present_mask ,
365
- nmsk , masks );
361
+ ret = __irq_build_affinity_masks (curvec , numvecs , node_to_cpumask ,
362
+ cpu_present_mask , nmsk , masks );
366
363
if (ret < 0 )
367
364
goto fail_build_affinity ;
368
365
nr_present = ret ;
@@ -374,13 +371,12 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
374
371
* out vectors.
375
372
*/
376
373
if (nr_present >= numvecs )
377
- curvec = firstvec ;
374
+ curvec = 0 ;
378
375
else
379
- curvec = firstvec + nr_present ;
376
+ curvec = nr_present ;
380
377
cpumask_andnot (npresmsk , cpu_possible_mask , cpu_present_mask );
381
- ret = __irq_build_affinity_masks (curvec , numvecs , firstvec ,
382
- node_to_cpumask , npresmsk , nmsk ,
383
- masks );
378
+ ret = __irq_build_affinity_masks (curvec , numvecs , node_to_cpumask ,
379
+ npresmsk , nmsk , masks );
384
380
if (ret >= 0 )
385
381
nr_others = ret ;
386
382
@@ -463,7 +459,7 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
463
459
unsigned int this_vecs = affd -> set_size [i ];
464
460
int ret ;
465
461
466
- ret = irq_build_affinity_masks (curvec , this_vecs , masks );
462
+ ret = irq_build_affinity_masks (this_vecs , & masks [ curvec ] );
467
463
if (ret ) {
468
464
kfree (masks );
469
465
return NULL ;
0 commit comments