@@ -325,6 +325,7 @@ static void put_probe_ref(void)
325
325
326
326
static void blk_trace_cleanup (struct blk_trace * bt )
327
327
{
328
+ synchronize_rcu ();
328
329
blk_trace_free (bt );
329
330
put_probe_ref ();
330
331
}
@@ -629,8 +630,10 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
629
630
static int __blk_trace_startstop (struct request_queue * q , int start )
630
631
{
631
632
int ret ;
632
- struct blk_trace * bt = q -> blk_trace ;
633
+ struct blk_trace * bt ;
633
634
635
+ bt = rcu_dereference_protected (q -> blk_trace ,
636
+ lockdep_is_held (& q -> blk_trace_mutex ));
634
637
if (bt == NULL )
635
638
return - EINVAL ;
636
639
@@ -739,8 +742,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
739
742
void blk_trace_shutdown (struct request_queue * q )
740
743
{
741
744
mutex_lock (& q -> blk_trace_mutex );
742
-
743
- if ( q -> blk_trace ) {
745
+ if ( rcu_dereference_protected ( q -> blk_trace ,
746
+ lockdep_is_held ( & q -> blk_trace_mutex )) ) {
744
747
__blk_trace_startstop (q , 0 );
745
748
__blk_trace_remove (q );
746
749
}
@@ -766,10 +769,14 @@ void blk_trace_shutdown(struct request_queue *q)
766
769
static void blk_add_trace_rq (struct request_queue * q , struct request * rq ,
767
770
unsigned int nr_bytes , u32 what )
768
771
{
769
- struct blk_trace * bt = q -> blk_trace ;
772
+ struct blk_trace * bt ;
770
773
771
- if (likely (!bt ))
774
+ rcu_read_lock ();
775
+ bt = rcu_dereference (q -> blk_trace );
776
+ if (likely (!bt )) {
777
+ rcu_read_unlock ();
772
778
return ;
779
+ }
773
780
774
781
if (rq -> cmd_type == REQ_TYPE_BLOCK_PC ) {
775
782
what |= BLK_TC_ACT (BLK_TC_PC );
@@ -780,6 +787,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
780
787
__blk_add_trace (bt , blk_rq_pos (rq ), nr_bytes , req_op (rq ),
781
788
rq -> cmd_flags , what , rq -> errors , 0 , NULL );
782
789
}
790
+ rcu_read_unlock ();
783
791
}
784
792
785
793
static void blk_add_trace_rq_abort (void * ignore ,
@@ -829,13 +837,18 @@ static void blk_add_trace_rq_complete(void *ignore,
829
837
static void blk_add_trace_bio (struct request_queue * q , struct bio * bio ,
830
838
u32 what , int error )
831
839
{
832
- struct blk_trace * bt = q -> blk_trace ;
840
+ struct blk_trace * bt ;
833
841
834
- if (likely (!bt ))
842
+ rcu_read_lock ();
843
+ bt = rcu_dereference (q -> blk_trace );
844
+ if (likely (!bt )) {
845
+ rcu_read_unlock ();
835
846
return ;
847
+ }
836
848
837
849
__blk_add_trace (bt , bio -> bi_iter .bi_sector , bio -> bi_iter .bi_size ,
838
850
bio_op (bio ), bio -> bi_opf , what , error , 0 , NULL );
851
+ rcu_read_unlock ();
839
852
}
840
853
841
854
static void blk_add_trace_bio_bounce (void * ignore ,
@@ -880,11 +893,14 @@ static void blk_add_trace_getrq(void *ignore,
880
893
if (bio )
881
894
blk_add_trace_bio (q , bio , BLK_TA_GETRQ , 0 );
882
895
else {
883
- struct blk_trace * bt = q -> blk_trace ;
896
+ struct blk_trace * bt ;
884
897
898
+ rcu_read_lock ();
899
+ bt = rcu_dereference (q -> blk_trace );
885
900
if (bt )
886
901
__blk_add_trace (bt , 0 , 0 , rw , 0 , BLK_TA_GETRQ , 0 , 0 ,
887
902
NULL );
903
+ rcu_read_unlock ();
888
904
}
889
905
}
890
906
@@ -896,27 +912,35 @@ static void blk_add_trace_sleeprq(void *ignore,
896
912
if (bio )
897
913
blk_add_trace_bio (q , bio , BLK_TA_SLEEPRQ , 0 );
898
914
else {
899
- struct blk_trace * bt = q -> blk_trace ;
915
+ struct blk_trace * bt ;
900
916
917
+ rcu_read_lock ();
918
+ bt = rcu_dereference (q -> blk_trace );
901
919
if (bt )
902
920
__blk_add_trace (bt , 0 , 0 , rw , 0 , BLK_TA_SLEEPRQ ,
903
921
0 , 0 , NULL );
922
+ rcu_read_unlock ();
904
923
}
905
924
}
906
925
907
926
static void blk_add_trace_plug (void * ignore , struct request_queue * q )
908
927
{
909
- struct blk_trace * bt = q -> blk_trace ;
928
+ struct blk_trace * bt ;
910
929
930
+ rcu_read_lock ();
931
+ bt = rcu_dereference (q -> blk_trace );
911
932
if (bt )
912
933
__blk_add_trace (bt , 0 , 0 , 0 , 0 , BLK_TA_PLUG , 0 , 0 , NULL );
934
+ rcu_read_unlock ();
913
935
}
914
936
915
937
static void blk_add_trace_unplug (void * ignore , struct request_queue * q ,
916
938
unsigned int depth , bool explicit )
917
939
{
918
- struct blk_trace * bt = q -> blk_trace ;
940
+ struct blk_trace * bt ;
919
941
942
+ rcu_read_lock ();
943
+ bt = rcu_dereference (q -> blk_trace );
920
944
if (bt ) {
921
945
__be64 rpdu = cpu_to_be64 (depth );
922
946
u32 what ;
@@ -928,14 +952,17 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
928
952
929
953
__blk_add_trace (bt , 0 , 0 , 0 , 0 , what , 0 , sizeof (rpdu ), & rpdu );
930
954
}
955
+ rcu_read_unlock ();
931
956
}
932
957
933
958
static void blk_add_trace_split (void * ignore ,
934
959
struct request_queue * q , struct bio * bio ,
935
960
unsigned int pdu )
936
961
{
937
- struct blk_trace * bt = q -> blk_trace ;
962
+ struct blk_trace * bt ;
938
963
964
+ rcu_read_lock ();
965
+ bt = rcu_dereference (q -> blk_trace );
939
966
if (bt ) {
940
967
__be64 rpdu = cpu_to_be64 (pdu );
941
968
@@ -944,6 +971,7 @@ static void blk_add_trace_split(void *ignore,
944
971
BLK_TA_SPLIT , bio -> bi_error , sizeof (rpdu ),
945
972
& rpdu );
946
973
}
974
+ rcu_read_unlock ();
947
975
}
948
976
949
977
/**
@@ -963,11 +991,15 @@ static void blk_add_trace_bio_remap(void *ignore,
963
991
struct request_queue * q , struct bio * bio ,
964
992
dev_t dev , sector_t from )
965
993
{
966
- struct blk_trace * bt = q -> blk_trace ;
994
+ struct blk_trace * bt ;
967
995
struct blk_io_trace_remap r ;
968
996
969
- if (likely (!bt ))
997
+ rcu_read_lock ();
998
+ bt = rcu_dereference (q -> blk_trace );
999
+ if (likely (!bt )) {
1000
+ rcu_read_unlock ();
970
1001
return ;
1002
+ }
971
1003
972
1004
r .device_from = cpu_to_be32 (dev );
973
1005
r .device_to = cpu_to_be32 (bio -> bi_bdev -> bd_dev );
@@ -976,6 +1008,7 @@ static void blk_add_trace_bio_remap(void *ignore,
976
1008
__blk_add_trace (bt , bio -> bi_iter .bi_sector , bio -> bi_iter .bi_size ,
977
1009
bio_op (bio ), bio -> bi_opf , BLK_TA_REMAP , bio -> bi_error ,
978
1010
sizeof (r ), & r );
1011
+ rcu_read_unlock ();
979
1012
}
980
1013
981
1014
/**
@@ -996,11 +1029,15 @@ static void blk_add_trace_rq_remap(void *ignore,
996
1029
struct request * rq , dev_t dev ,
997
1030
sector_t from )
998
1031
{
999
- struct blk_trace * bt = q -> blk_trace ;
1032
+ struct blk_trace * bt ;
1000
1033
struct blk_io_trace_remap r ;
1001
1034
1002
- if (likely (!bt ))
1035
+ rcu_read_lock ();
1036
+ bt = rcu_dereference (q -> blk_trace );
1037
+ if (likely (!bt )) {
1038
+ rcu_read_unlock ();
1003
1039
return ;
1040
+ }
1004
1041
1005
1042
r .device_from = cpu_to_be32 (dev );
1006
1043
r .device_to = cpu_to_be32 (disk_devt (rq -> rq_disk ));
@@ -1009,6 +1046,7 @@ static void blk_add_trace_rq_remap(void *ignore,
1009
1046
__blk_add_trace (bt , blk_rq_pos (rq ), blk_rq_bytes (rq ),
1010
1047
rq_data_dir (rq ), 0 , BLK_TA_REMAP , !!rq -> errors ,
1011
1048
sizeof (r ), & r );
1049
+ rcu_read_unlock ();
1012
1050
}
1013
1051
1014
1052
/**
@@ -1026,17 +1064,22 @@ void blk_add_driver_data(struct request_queue *q,
1026
1064
struct request * rq ,
1027
1065
void * data , size_t len )
1028
1066
{
1029
- struct blk_trace * bt = q -> blk_trace ;
1067
+ struct blk_trace * bt ;
1030
1068
1031
- if (likely (!bt ))
1069
+ rcu_read_lock ();
1070
+ bt = rcu_dereference (q -> blk_trace );
1071
+ if (likely (!bt )) {
1072
+ rcu_read_unlock ();
1032
1073
return ;
1074
+ }
1033
1075
1034
1076
if (rq -> cmd_type == REQ_TYPE_BLOCK_PC )
1035
1077
__blk_add_trace (bt , 0 , blk_rq_bytes (rq ), 0 , 0 ,
1036
1078
BLK_TA_DRV_DATA , rq -> errors , len , data );
1037
1079
else
1038
1080
__blk_add_trace (bt , blk_rq_pos (rq ), blk_rq_bytes (rq ), 0 , 0 ,
1039
1081
BLK_TA_DRV_DATA , rq -> errors , len , data );
1082
+ rcu_read_unlock ();
1040
1083
}
1041
1084
EXPORT_SYMBOL_GPL (blk_add_driver_data );
1042
1085
@@ -1529,6 +1572,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
1529
1572
return - EINVAL ;
1530
1573
1531
1574
put_probe_ref ();
1575
+ synchronize_rcu ();
1532
1576
blk_trace_free (bt );
1533
1577
return 0 ;
1534
1578
}
@@ -1690,6 +1734,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1690
1734
struct hd_struct * p = dev_to_part (dev );
1691
1735
struct request_queue * q ;
1692
1736
struct block_device * bdev ;
1737
+ struct blk_trace * bt ;
1693
1738
ssize_t ret = - ENXIO ;
1694
1739
1695
1740
bdev = bdget (part_devt (p ));
@@ -1702,21 +1747,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1702
1747
1703
1748
mutex_lock (& q -> blk_trace_mutex );
1704
1749
1750
+ bt = rcu_dereference_protected (q -> blk_trace ,
1751
+ lockdep_is_held (& q -> blk_trace_mutex ));
1705
1752
if (attr == & dev_attr_enable ) {
1706
- ret = sprintf (buf , "%u\n" , !!q -> blk_trace );
1753
+ ret = sprintf (buf , "%u\n" , !!bt );
1707
1754
goto out_unlock_bdev ;
1708
1755
}
1709
1756
1710
- if (q -> blk_trace == NULL )
1757
+ if (bt == NULL )
1711
1758
ret = sprintf (buf , "disabled\n" );
1712
1759
else if (attr == & dev_attr_act_mask )
1713
- ret = blk_trace_mask2str (buf , q -> blk_trace -> act_mask );
1760
+ ret = blk_trace_mask2str (buf , bt -> act_mask );
1714
1761
else if (attr == & dev_attr_pid )
1715
- ret = sprintf (buf , "%u\n" , q -> blk_trace -> pid );
1762
+ ret = sprintf (buf , "%u\n" , bt -> pid );
1716
1763
else if (attr == & dev_attr_start_lba )
1717
- ret = sprintf (buf , "%llu\n" , q -> blk_trace -> start_lba );
1764
+ ret = sprintf (buf , "%llu\n" , bt -> start_lba );
1718
1765
else if (attr == & dev_attr_end_lba )
1719
- ret = sprintf (buf , "%llu\n" , q -> blk_trace -> end_lba );
1766
+ ret = sprintf (buf , "%llu\n" , bt -> end_lba );
1720
1767
1721
1768
out_unlock_bdev :
1722
1769
mutex_unlock (& q -> blk_trace_mutex );
@@ -1733,6 +1780,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1733
1780
struct block_device * bdev ;
1734
1781
struct request_queue * q ;
1735
1782
struct hd_struct * p ;
1783
+ struct blk_trace * bt ;
1736
1784
u64 value ;
1737
1785
ssize_t ret = - EINVAL ;
1738
1786
@@ -1763,8 +1811,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1763
1811
1764
1812
mutex_lock (& q -> blk_trace_mutex );
1765
1813
1814
+ bt = rcu_dereference_protected (q -> blk_trace ,
1815
+ lockdep_is_held (& q -> blk_trace_mutex ));
1766
1816
if (attr == & dev_attr_enable ) {
1767
- if (!!value == !!q -> blk_trace ) {
1817
+ if (!!value == !!bt ) {
1768
1818
ret = 0 ;
1769
1819
goto out_unlock_bdev ;
1770
1820
}
@@ -1776,18 +1826,18 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1776
1826
}
1777
1827
1778
1828
ret = 0 ;
1779
- if (q -> blk_trace == NULL )
1829
+ if (bt == NULL )
1780
1830
ret = blk_trace_setup_queue (q , bdev );
1781
1831
1782
1832
if (ret == 0 ) {
1783
1833
if (attr == & dev_attr_act_mask )
1784
- q -> blk_trace -> act_mask = value ;
1834
+ bt -> act_mask = value ;
1785
1835
else if (attr == & dev_attr_pid )
1786
- q -> blk_trace -> pid = value ;
1836
+ bt -> pid = value ;
1787
1837
else if (attr == & dev_attr_start_lba )
1788
- q -> blk_trace -> start_lba = value ;
1838
+ bt -> start_lba = value ;
1789
1839
else if (attr == & dev_attr_end_lba )
1790
- q -> blk_trace -> end_lba = value ;
1840
+ bt -> end_lba = value ;
1791
1841
}
1792
1842
1793
1843
out_unlock_bdev :
0 commit comments