@@ -6680,7 +6680,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
6680
6680
unsigned long vma_size ;
6681
6681
unsigned long nr_pages ;
6682
6682
long user_extra = 0 , extra = 0 ;
6683
- int ret = 0 , flags = 0 ;
6683
+ int ret , flags = 0 ;
6684
6684
6685
6685
/*
6686
6686
* Don't allow mmap() of inherited per-task counters. This would
@@ -6708,6 +6708,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
6708
6708
6709
6709
user_extra = nr_pages ;
6710
6710
6711
+ mutex_lock (& event -> mmap_mutex );
6712
+ ret = - EINVAL ;
6713
+
6711
6714
if (vma -> vm_pgoff == 0 ) {
6712
6715
nr_pages -= 1 ;
6713
6716
@@ -6716,16 +6719,13 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
6716
6719
* can do bitmasks instead of modulo.
6717
6720
*/
6718
6721
if (nr_pages != 0 && !is_power_of_2 (nr_pages ))
6719
- return - EINVAL ;
6722
+ goto unlock ;
6720
6723
6721
6724
WARN_ON_ONCE (event -> ctx -> parent_ctx );
6722
- mutex_lock (& event -> mmap_mutex );
6723
6725
6724
6726
if (event -> rb ) {
6725
- if (data_page_nr (event -> rb ) != nr_pages ) {
6726
- ret = - EINVAL ;
6727
+ if (data_page_nr (event -> rb ) != nr_pages )
6727
6728
goto unlock ;
6728
- }
6729
6729
6730
6730
if (atomic_inc_not_zero (& event -> rb -> mmap_count )) {
6731
6731
/*
@@ -6754,12 +6754,6 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
6754
6754
*/
6755
6755
u64 aux_offset , aux_size ;
6756
6756
6757
- if (!event -> rb )
6758
- return - EINVAL ;
6759
-
6760
- mutex_lock (& event -> mmap_mutex );
6761
- ret = - EINVAL ;
6762
-
6763
6757
rb = event -> rb ;
6764
6758
if (!rb )
6765
6759
goto aux_unlock ;
@@ -6869,6 +6863,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
6869
6863
rb -> aux_mmap_locked = extra ;
6870
6864
}
6871
6865
6866
+ ret = 0 ;
6867
+
6872
6868
unlock :
6873
6869
if (!ret ) {
6874
6870
atomic_long_add (user_extra , & user -> locked_vm );
0 commit comments