@@ -596,6 +596,11 @@ static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
596596 return ubq -> flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY );
597597}
598598
599+ static inline bool ublk_need_map_io (const struct ublk_queue * ubq )
600+ {
601+ return !ublk_support_user_copy (ubq );
602+ }
603+
599604static inline bool ublk_need_req_ref (const struct ublk_queue * ubq )
600605{
601606 /*
@@ -923,7 +928,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
923928{
924929 const unsigned int rq_bytes = blk_rq_bytes (req );
925930
926- if (ublk_support_user_copy (ubq ))
931+ if (! ublk_need_map_io (ubq ))
927932 return rq_bytes ;
928933
929934 /*
@@ -947,7 +952,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
947952{
948953 const unsigned int rq_bytes = blk_rq_bytes (req );
949954
950- if (ublk_support_user_copy (ubq ))
955+ if (! ublk_need_map_io (ubq ))
951956 return rq_bytes ;
952957
953958 if (ublk_need_unmap_req (req )) {
@@ -1867,7 +1872,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
18671872 if (io -> flags & UBLK_IO_FLAG_OWNED_BY_SRV )
18681873 goto out ;
18691874
1870- if (! ublk_support_user_copy (ubq )) {
1875+ if (ublk_need_map_io (ubq )) {
18711876 /*
18721877 * FETCH_RQ has to provide IO buffer if NEED GET
18731878 * DATA is not enabled
@@ -1889,7 +1894,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
18891894 if (!(io -> flags & UBLK_IO_FLAG_OWNED_BY_SRV ))
18901895 goto out ;
18911896
1892- if (! ublk_support_user_copy (ubq )) {
1897+ if (ublk_need_map_io (ubq )) {
18931898 /*
18941899 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
18951900 * NEED GET DATA is not enabled or it is Read IO.
0 commit comments