@@ -80,6 +80,7 @@ struct cm_req_msg {
80
80
union ib_gid primary_path_sgid ;
81
81
};
82
82
83
+ static struct workqueue_struct * cm_wq ;
83
84
84
85
static void set_local_comm_id (struct ib_mad * mad , u32 cm_id )
85
86
{
@@ -288,10 +289,10 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
288
289
/*make sure that there is no schedule inside the scheduled work.*/
289
290
if (!sriov -> is_going_down && !id -> scheduled_delete ) {
290
291
id -> scheduled_delete = 1 ;
291
- schedule_delayed_work ( & id -> timeout , CM_CLEANUP_CACHE_TIMEOUT );
292
+ queue_delayed_work ( cm_wq , & id -> timeout , CM_CLEANUP_CACHE_TIMEOUT );
292
293
} else if (id -> scheduled_delete ) {
293
294
/* Adjust timeout if already scheduled */
294
- mod_delayed_work (system_wq , & id -> timeout , CM_CLEANUP_CACHE_TIMEOUT );
295
+ mod_delayed_work (cm_wq , & id -> timeout , CM_CLEANUP_CACHE_TIMEOUT );
295
296
}
296
297
spin_unlock_irqrestore (& sriov -> going_down_lock , flags );
297
298
spin_unlock (& sriov -> id_map_lock );
@@ -370,7 +371,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
370
371
ret = xa_err (item );
371
372
else
372
373
/* If a retry, adjust delayed work */
373
- mod_delayed_work (system_wq , & item -> timeout , CM_CLEANUP_CACHE_TIMEOUT );
374
+ mod_delayed_work (cm_wq , & item -> timeout , CM_CLEANUP_CACHE_TIMEOUT );
374
375
goto err_or_exists ;
375
376
}
376
377
xa_unlock (& sriov -> xa_rej_tmout );
@@ -393,7 +394,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
393
394
return xa_err (old );
394
395
}
395
396
396
- schedule_delayed_work ( & item -> timeout , CM_CLEANUP_CACHE_TIMEOUT );
397
+ queue_delayed_work ( cm_wq , & item -> timeout , CM_CLEANUP_CACHE_TIMEOUT );
397
398
398
399
return 0 ;
399
400
@@ -500,15 +501,15 @@ static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
500
501
xa_lock (& sriov -> xa_rej_tmout );
501
502
xa_for_each (& sriov -> xa_rej_tmout , id , item ) {
502
503
if (slave < 0 || slave == item -> slave ) {
503
- mod_delayed_work (system_wq , & item -> timeout , 0 );
504
+ mod_delayed_work (cm_wq , & item -> timeout , 0 );
504
505
flush_needed = true;
505
506
++ cnt ;
506
507
}
507
508
}
508
509
xa_unlock (& sriov -> xa_rej_tmout );
509
510
510
511
if (flush_needed ) {
511
- flush_scheduled_work ( );
512
+ flush_workqueue ( cm_wq );
512
513
pr_debug ("Deleted %d entries in xarray for slave %d during cleanup\n" ,
513
514
cnt , slave );
514
515
}
@@ -540,7 +541,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
540
541
spin_unlock (& sriov -> id_map_lock );
541
542
542
543
if (need_flush )
543
- flush_scheduled_work ( ); /* make sure all timers were flushed */
544
+ flush_workqueue ( cm_wq ); /* make sure all timers were flushed */
544
545
545
546
/* now, remove all leftover entries from databases*/
546
547
spin_lock (& sriov -> id_map_lock );
@@ -587,3 +588,17 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
587
588
588
589
rej_tmout_xa_cleanup (sriov , slave );
589
590
}
591
+
592
+ int mlx4_ib_cm_init (void )
593
+ {
594
+ cm_wq = alloc_workqueue ("mlx4_ib_cm" , 0 , 0 );
595
+ if (!cm_wq )
596
+ return - ENOMEM ;
597
+
598
+ return 0 ;
599
+ }
600
+
601
+ void mlx4_ib_cm_destroy (void )
602
+ {
603
+ destroy_workqueue (cm_wq );
604
+ }
0 commit comments