@@ -2898,77 +2898,25 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2898
2898
return 0 ;
2899
2899
}
2900
2900
2901
- static bool io_req_cancelled (struct io_kiocb * req )
2902
- {
2903
- if (req -> work .flags & IO_WQ_WORK_CANCEL ) {
2904
- req_set_fail_links (req );
2905
- io_cqring_add_event (req , - ECANCELED );
2906
- io_put_req (req );
2907
- return true;
2908
- }
2909
-
2910
- return false;
2911
- }
2912
-
2913
- static void __io_fsync (struct io_kiocb * req )
2901
+ static int io_fsync (struct io_kiocb * req , bool force_nonblock )
2914
2902
{
2915
2903
loff_t end = req -> sync .off + req -> sync .len ;
2916
2904
int ret ;
2917
2905
2906
+ /* fsync always requires a blocking context */
2907
+ if (force_nonblock )
2908
+ return - EAGAIN ;
2909
+
2918
2910
ret = vfs_fsync_range (req -> file , req -> sync .off ,
2919
2911
end > 0 ? end : LLONG_MAX ,
2920
2912
req -> sync .flags & IORING_FSYNC_DATASYNC );
2921
2913
if (ret < 0 )
2922
2914
req_set_fail_links (req );
2923
2915
io_cqring_add_event (req , ret );
2924
2916
io_put_req (req );
2925
- }
2926
-
2927
- static void io_fsync_finish (struct io_wq_work * * workptr )
2928
- {
2929
- struct io_kiocb * req = container_of (* workptr , struct io_kiocb , work );
2930
-
2931
- if (io_req_cancelled (req ))
2932
- return ;
2933
- __io_fsync (req );
2934
- io_steal_work (req , workptr );
2935
- }
2936
-
2937
- static int io_fsync (struct io_kiocb * req , bool force_nonblock )
2938
- {
2939
- /* fsync always requires a blocking context */
2940
- if (force_nonblock ) {
2941
- req -> work .func = io_fsync_finish ;
2942
- return - EAGAIN ;
2943
- }
2944
- __io_fsync (req );
2945
2917
return 0 ;
2946
2918
}
2947
2919
2948
- static void __io_fallocate (struct io_kiocb * req )
2949
- {
2950
- int ret ;
2951
-
2952
- current -> signal -> rlim [RLIMIT_FSIZE ].rlim_cur = req -> fsize ;
2953
- ret = vfs_fallocate (req -> file , req -> sync .mode , req -> sync .off ,
2954
- req -> sync .len );
2955
- current -> signal -> rlim [RLIMIT_FSIZE ].rlim_cur = RLIM_INFINITY ;
2956
- if (ret < 0 )
2957
- req_set_fail_links (req );
2958
- io_cqring_add_event (req , ret );
2959
- io_put_req (req );
2960
- }
2961
-
2962
- static void io_fallocate_finish (struct io_wq_work * * workptr )
2963
- {
2964
- struct io_kiocb * req = container_of (* workptr , struct io_kiocb , work );
2965
-
2966
- if (io_req_cancelled (req ))
2967
- return ;
2968
- __io_fallocate (req );
2969
- io_steal_work (req , workptr );
2970
- }
2971
-
2972
2920
static int io_fallocate_prep (struct io_kiocb * req ,
2973
2921
const struct io_uring_sqe * sqe )
2974
2922
{
@@ -2986,13 +2934,20 @@ static int io_fallocate_prep(struct io_kiocb *req,
2986
2934
2987
2935
static int io_fallocate (struct io_kiocb * req , bool force_nonblock )
2988
2936
{
2937
+ int ret ;
2938
+
2989
2939
/* fallocate always requiring blocking context */
2990
- if (force_nonblock ) {
2991
- req -> work .func = io_fallocate_finish ;
2940
+ if (force_nonblock )
2992
2941
return - EAGAIN ;
2993
- }
2994
2942
2995
- __io_fallocate (req );
2943
+ current -> signal -> rlim [RLIMIT_FSIZE ].rlim_cur = req -> fsize ;
2944
+ ret = vfs_fallocate (req -> file , req -> sync .mode , req -> sync .off ,
2945
+ req -> sync .len );
2946
+ current -> signal -> rlim [RLIMIT_FSIZE ].rlim_cur = RLIM_INFINITY ;
2947
+ if (ret < 0 )
2948
+ req_set_fail_links (req );
2949
+ io_cqring_add_event (req , ret );
2950
+ io_put_req (req );
2996
2951
return 0 ;
2997
2952
}
2998
2953
@@ -3489,38 +3444,20 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3489
3444
return 0 ;
3490
3445
}
3491
3446
3492
- static void __io_sync_file_range (struct io_kiocb * req )
3447
+ static int io_sync_file_range (struct io_kiocb * req , bool force_nonblock )
3493
3448
{
3494
3449
int ret ;
3495
3450
3451
+ /* sync_file_range always requires a blocking context */
3452
+ if (force_nonblock )
3453
+ return - EAGAIN ;
3454
+
3496
3455
ret = sync_file_range (req -> file , req -> sync .off , req -> sync .len ,
3497
3456
req -> sync .flags );
3498
3457
if (ret < 0 )
3499
3458
req_set_fail_links (req );
3500
3459
io_cqring_add_event (req , ret );
3501
3460
io_put_req (req );
3502
- }
3503
-
3504
-
3505
- static void io_sync_file_range_finish (struct io_wq_work * * workptr )
3506
- {
3507
- struct io_kiocb * req = container_of (* workptr , struct io_kiocb , work );
3508
-
3509
- if (io_req_cancelled (req ))
3510
- return ;
3511
- __io_sync_file_range (req );
3512
- io_steal_work (req , workptr );
3513
- }
3514
-
3515
- static int io_sync_file_range (struct io_kiocb * req , bool force_nonblock )
3516
- {
3517
- /* sync_file_range always requires a blocking context */
3518
- if (force_nonblock ) {
3519
- req -> work .func = io_sync_file_range_finish ;
3520
- return - EAGAIN ;
3521
- }
3522
-
3523
- __io_sync_file_range (req );
3524
3461
return 0 ;
3525
3462
}
3526
3463
@@ -3942,49 +3879,27 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3942
3879
return 0 ;
3943
3880
}
3944
3881
3945
- static int __io_accept (struct io_kiocb * req , bool force_nonblock )
3882
+ static int io_accept (struct io_kiocb * req , bool force_nonblock )
3946
3883
{
3947
3884
struct io_accept * accept = & req -> accept ;
3948
- unsigned file_flags ;
3885
+ unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0 ;
3949
3886
int ret ;
3950
3887
3951
- file_flags = force_nonblock ? O_NONBLOCK : 0 ;
3952
3888
ret = __sys_accept4_file (req -> file , file_flags , accept -> addr ,
3953
3889
accept -> addr_len , accept -> flags ,
3954
3890
accept -> nofile );
3955
3891
if (ret == - EAGAIN && force_nonblock )
3956
3892
return - EAGAIN ;
3957
- if (ret == - ERESTARTSYS )
3958
- ret = - EINTR ;
3959
- if ( ret < 0 )
3893
+ if (ret < 0 ) {
3894
+ if ( ret == - ERESTARTSYS )
3895
+ ret = - EINTR ;
3960
3896
req_set_fail_links (req );
3897
+ }
3961
3898
io_cqring_add_event (req , ret );
3962
3899
io_put_req (req );
3963
3900
return 0 ;
3964
3901
}
3965
3902
3966
- static void io_accept_finish (struct io_wq_work * * workptr )
3967
- {
3968
- struct io_kiocb * req = container_of (* workptr , struct io_kiocb , work );
3969
-
3970
- if (io_req_cancelled (req ))
3971
- return ;
3972
- __io_accept (req , false);
3973
- io_steal_work (req , workptr );
3974
- }
3975
-
3976
- static int io_accept (struct io_kiocb * req , bool force_nonblock )
3977
- {
3978
- int ret ;
3979
-
3980
- ret = __io_accept (req , force_nonblock );
3981
- if (ret == - EAGAIN && force_nonblock ) {
3982
- req -> work .func = io_accept_finish ;
3983
- return - EAGAIN ;
3984
- }
3985
- return 0 ;
3986
- }
3987
-
3988
3903
static int io_connect_prep (struct io_kiocb * req , const struct io_uring_sqe * sqe )
3989
3904
{
3990
3905
struct io_connect * conn = & req -> connect ;
0 commit comments