@@ -329,7 +329,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
329
329
put_probe_ref ();
330
330
}
331
331
332
- int blk_trace_remove (struct request_queue * q )
332
+ static int __blk_trace_remove (struct request_queue * q )
333
333
{
334
334
struct blk_trace * bt ;
335
335
@@ -342,6 +342,17 @@ int blk_trace_remove(struct request_queue *q)
342
342
343
343
return 0 ;
344
344
}
345
+
346
+ int blk_trace_remove (struct request_queue * q )
347
+ {
348
+ int ret ;
349
+
350
+ mutex_lock (& q -> blk_trace_mutex );
351
+ ret = __blk_trace_remove (q );
352
+ mutex_unlock (& q -> blk_trace_mutex );
353
+
354
+ return ret ;
355
+ }
345
356
EXPORT_SYMBOL_GPL (blk_trace_remove );
346
357
347
358
static ssize_t blk_dropped_read (struct file * filp , char __user * buffer ,
@@ -546,9 +557,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
546
557
return ret ;
547
558
}
548
559
549
- int blk_trace_setup (struct request_queue * q , char * name , dev_t dev ,
550
- struct block_device * bdev ,
551
- char __user * arg )
560
+ static int __blk_trace_setup (struct request_queue * q , char * name , dev_t dev ,
561
+ struct block_device * bdev , char __user * arg )
552
562
{
553
563
struct blk_user_trace_setup buts ;
554
564
int ret ;
@@ -567,6 +577,19 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
567
577
}
568
578
return 0 ;
569
579
}
580
+
581
+ int blk_trace_setup (struct request_queue * q , char * name , dev_t dev ,
582
+ struct block_device * bdev ,
583
+ char __user * arg )
584
+ {
585
+ int ret ;
586
+
587
+ mutex_lock (& q -> blk_trace_mutex );
588
+ ret = __blk_trace_setup (q , name , dev , bdev , arg );
589
+ mutex_unlock (& q -> blk_trace_mutex );
590
+
591
+ return ret ;
592
+ }
570
593
EXPORT_SYMBOL_GPL (blk_trace_setup );
571
594
572
595
#if defined(CONFIG_COMPAT ) && defined(CONFIG_X86_64 )
@@ -603,7 +626,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
603
626
}
604
627
#endif
605
628
606
- int blk_trace_startstop (struct request_queue * q , int start )
629
+ static int __blk_trace_startstop (struct request_queue * q , int start )
607
630
{
608
631
int ret ;
609
632
struct blk_trace * bt = q -> blk_trace ;
@@ -642,6 +665,17 @@ int blk_trace_startstop(struct request_queue *q, int start)
642
665
643
666
return ret ;
644
667
}
668
+
669
+ int blk_trace_startstop (struct request_queue * q , int start )
670
+ {
671
+ int ret ;
672
+
673
+ mutex_lock (& q -> blk_trace_mutex );
674
+ ret = __blk_trace_startstop (q , start );
675
+ mutex_unlock (& q -> blk_trace_mutex );
676
+
677
+ return ret ;
678
+ }
645
679
EXPORT_SYMBOL_GPL (blk_trace_startstop );
646
680
647
681
/*
@@ -672,7 +706,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
672
706
switch (cmd ) {
673
707
case BLKTRACESETUP :
674
708
bdevname (bdev , b );
675
- ret = blk_trace_setup (q , b , bdev -> bd_dev , bdev , arg );
709
+ ret = __blk_trace_setup (q , b , bdev -> bd_dev , bdev , arg );
676
710
break ;
677
711
#if defined(CONFIG_COMPAT ) && defined(CONFIG_X86_64 )
678
712
case BLKTRACESETUP32 :
@@ -683,10 +717,10 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
683
717
case BLKTRACESTART :
684
718
start = 1 ;
685
719
case BLKTRACESTOP :
686
- ret = blk_trace_startstop (q , start );
720
+ ret = __blk_trace_startstop (q , start );
687
721
break ;
688
722
case BLKTRACETEARDOWN :
689
- ret = blk_trace_remove (q );
723
+ ret = __blk_trace_remove (q );
690
724
break ;
691
725
default :
692
726
ret = - ENOTTY ;
@@ -704,10 +738,14 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
704
738
**/
705
739
void blk_trace_shutdown (struct request_queue * q )
706
740
{
741
+ mutex_lock (& q -> blk_trace_mutex );
742
+
707
743
if (q -> blk_trace ) {
708
- blk_trace_startstop (q , 0 );
709
- blk_trace_remove (q );
744
+ __blk_trace_startstop (q , 0 );
745
+ __blk_trace_remove (q );
710
746
}
747
+
748
+ mutex_unlock (& q -> blk_trace_mutex );
711
749
}
712
750
713
751
/*
0 commit comments