|
10 | 10 |
|
11 | 11 | #define uptr64(val) ((void __user *)(uintptr_t)(val)) |
12 | 12 |
|
| 13 | +/* |
| 14 | + * BSG io_uring PDU structure overlaying io_uring_cmd.pdu[32]. |
| 15 | + * Stores temporary data needed during command execution. |
| 16 | + */ |
| 17 | +struct scsi_bsg_uring_cmd_pdu { |
| 18 | + struct bio *bio; /* mapped user buffer, unmap in task work */ |
| 19 | + struct request *req; /* block request, freed in task work */ |
| 20 | + u64 sense_addr; /* user space response buffer address (SCSI: sense data) */ |
| 21 | + u32 resid_len; /* residual transfer length */ |
| 22 | + /* Protocol-specific status fields using union for extensibility */ |
| 23 | + union { |
| 24 | + struct { |
| 25 | + u8 device_status; /* SCSI device status (low 8 bits of result) */ |
| 26 | + u8 driver_status; /* SCSI driver status (DRIVER_SENSE if check) */ |
| 27 | + u8 host_status; /* SCSI host status (host_byte of result) */ |
| 28 | + u8 sense_len_wr; /* actual sense data length written */ |
| 29 | + } scsi; |
| 30 | + /* Future protocols can add their own status layouts here */ |
| 31 | + }; |
| 32 | +}; |
| 33 | + |
| 34 | +static inline struct scsi_bsg_uring_cmd_pdu *scsi_bsg_uring_cmd_pdu( |
| 35 | + struct io_uring_cmd *ioucmd) |
| 36 | +{ |
| 37 | + return io_uring_cmd_to_pdu(ioucmd, struct scsi_bsg_uring_cmd_pdu); |
| 38 | +} |
| 39 | + |
| 40 | +/* |
| 41 | + * Task work callback executed in process context. |
| 42 | + * Builds res2 with status information and copies sense data to user space. |
| 43 | + * res2 layout (64-bit): |
| 44 | + * 0-7: device_status |
| 45 | + * 8-15: driver_status |
| 46 | + * 16-23: host_status |
| 47 | + * 24-31: sense_len_wr |
| 48 | + * 32-63: resid_len |
| 49 | + */ |
| 50 | +static void scsi_bsg_uring_task_cb(struct io_tw_req tw_req, io_tw_token_t tw) |
| 51 | +{ |
| 52 | + struct io_uring_cmd *ioucmd = io_uring_cmd_from_tw(tw_req); |
| 53 | + struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd); |
| 54 | + struct scsi_cmnd *scmd; |
| 55 | + struct request *rq = pdu->req; |
| 56 | + int ret = 0; |
| 57 | + u64 res2; |
| 58 | + |
| 59 | + scmd = blk_mq_rq_to_pdu(rq); |
| 60 | + |
| 61 | + if (pdu->bio) |
| 62 | + blk_rq_unmap_user(pdu->bio); |
| 63 | + |
| 64 | + /* Build res2 with status information */ |
| 65 | + res2 = ((u64)pdu->resid_len << 32) | |
| 66 | + ((u64)(pdu->scsi.sense_len_wr & 0xff) << 24) | |
| 67 | + ((u64)(pdu->scsi.host_status & 0xff) << 16) | |
| 68 | + ((u64)(pdu->scsi.driver_status & 0xff) << 8) | |
| 69 | + (pdu->scsi.device_status & 0xff); |
| 70 | + |
| 71 | + if (pdu->scsi.sense_len_wr && pdu->sense_addr) { |
| 72 | + if (copy_to_user(uptr64(pdu->sense_addr), scmd->sense_buffer, |
| 73 | + pdu->scsi.sense_len_wr)) |
| 74 | + ret = -EFAULT; |
| 75 | + } |
| 76 | + |
| 77 | + blk_mq_free_request(rq); |
| 78 | + io_uring_cmd_done32(ioucmd, ret, res2, |
| 79 | + IO_URING_CMD_TASK_WORK_ISSUE_FLAGS); |
| 80 | +} |
| 81 | + |
| 82 | +/* |
| 83 | + * Async completion callback executed in interrupt/atomic context. |
| 84 | + * Saves SCSI status information and schedules task work for final completion. |
| 85 | + */ |
| 86 | +static enum rq_end_io_ret scsi_bsg_uring_cmd_done(struct request *req, |
| 87 | + blk_status_t status) |
| 88 | +{ |
| 89 | + struct io_uring_cmd *ioucmd = req->end_io_data; |
| 90 | + struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd); |
| 91 | + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); |
| 92 | + |
| 93 | + /* Pack SCSI status fields into union */ |
| 94 | + pdu->scsi.device_status = scmd->result & 0xff; |
| 95 | + pdu->scsi.host_status = host_byte(scmd->result); |
| 96 | + pdu->scsi.driver_status = 0; |
| 97 | + pdu->scsi.sense_len_wr = 0; |
| 98 | + |
| 99 | + if (scsi_status_is_check_condition(scmd->result)) { |
| 100 | + pdu->scsi.driver_status = DRIVER_SENSE; |
| 101 | + if (pdu->sense_addr) |
| 102 | + pdu->scsi.sense_len_wr = min_t(u8, scmd->sense_len, SCSI_SENSE_BUFFERSIZE); |
| 103 | + } |
| 104 | + |
| 105 | + pdu->resid_len = scmd->resid_len; |
| 106 | + |
| 107 | + io_uring_cmd_do_in_task_lazy(ioucmd, scsi_bsg_uring_task_cb); |
| 108 | + return RQ_END_IO_NONE; |
| 109 | +} |
| 110 | + |
| 111 | +/* |
| 112 | + * Validate bsg_uring_cmd structure parameters. |
| 113 | + * Note: xfer_dir must match the actual SCSI command direction. |
| 114 | + * The direction is determined by the CDB, and user space should |
| 115 | + * set xfer_dir accordingly (0=READ, 1=WRITE). |
| 116 | + */ |
| 117 | +static int scsi_bsg_validate_uring_cmd(const struct bsg_uring_cmd *cmd) |
| 118 | +{ |
| 119 | + if (cmd->protocol != BSG_PROTOCOL_SCSI || |
| 120 | + cmd->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD) |
| 121 | + return -EINVAL; |
| 122 | + |
| 123 | + if (!cmd->request || cmd->request_len == 0) |
| 124 | + return -EINVAL; |
| 125 | + |
| 126 | + if (cmd->xfer_dir > 1) |
| 127 | + return -EINVAL; |
| 128 | + |
| 129 | + if (cmd->iovec_count > 0) |
| 130 | + return -EOPNOTSUPP; |
| 131 | + |
| 132 | + return 0; |
| 133 | +} |
| 134 | + |
| 135 | +/* |
| 136 | + * Map user buffer to request, supporting both zero-copy (fixed buffers) |
| 137 | + * and traditional mode. |
| 138 | + */ |
| 139 | +static int scsi_bsg_map_user_buffer(struct request *req, |
| 140 | + struct io_uring_cmd *ioucmd, |
| 141 | + unsigned int issue_flags, gfp_t gfp_mask) |
| 142 | +{ |
| 143 | + const struct bsg_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe); |
| 144 | + struct iov_iter iter; |
| 145 | + int ret; |
| 146 | + |
| 147 | + if (ioucmd->flags & IORING_URING_CMD_FIXED) { |
| 148 | + ret = io_uring_cmd_import_fixed(cmd->xfer_addr, cmd->xfer_len, |
| 149 | + cmd->xfer_dir, &iter, ioucmd, |
| 150 | + issue_flags); |
| 151 | + if (ret < 0) |
| 152 | + return ret; |
| 153 | + ret = blk_rq_map_user_iov(req->q, req, NULL, &iter, gfp_mask); |
| 154 | + } else { |
| 155 | + ret = blk_rq_map_user(req->q, req, NULL, |
| 156 | + uptr64(cmd->xfer_addr), cmd->xfer_len, |
| 157 | + gfp_mask); |
| 158 | + } |
| 159 | + |
| 160 | + return ret; |
| 161 | +} |
| 162 | + |
13 | 163 | int scsi_bsg_uring_cmd(struct request_queue *q, struct io_uring_cmd *ioucmd, |
14 | 164 | unsigned int issue_flags, bool open_for_write) |
15 | 165 | { |
16 | | - return -EOPNOTSUPP; |
| 166 | + struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd); |
| 167 | + const struct bsg_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe); |
| 168 | + struct scsi_cmnd *scmd; |
| 169 | + struct request *req; |
| 170 | + blk_mq_req_flags_t blk_flags = 0; |
| 171 | + gfp_t gfp_mask = GFP_KERNEL; |
| 172 | + int ret; |
| 173 | + |
| 174 | + ret = scsi_bsg_validate_uring_cmd(cmd); |
| 175 | + if (ret) |
| 176 | + return ret; |
| 177 | + |
| 178 | + if (issue_flags & IO_URING_F_NONBLOCK) { |
| 179 | + blk_flags = BLK_MQ_REQ_NOWAIT; |
| 180 | + gfp_mask = GFP_NOWAIT; |
| 181 | + } |
| 182 | + |
| 183 | + req = scsi_alloc_request(q, cmd->xfer_dir ? |
| 184 | + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, blk_flags); |
| 185 | + if (IS_ERR(req)) |
| 186 | + return PTR_ERR(req); |
| 187 | + |
| 188 | + scmd = blk_mq_rq_to_pdu(req); |
| 189 | + scmd->cmd_len = cmd->request_len; |
| 190 | + if (scmd->cmd_len > sizeof(scmd->cmnd)) { |
| 191 | + ret = -EINVAL; |
| 192 | + goto out_free_req; |
| 193 | + } |
| 194 | + scmd->allowed = SG_DEFAULT_RETRIES; |
| 195 | + |
| 196 | + if (copy_from_user(scmd->cmnd, uptr64(cmd->request), cmd->request_len)) { |
| 197 | + ret = -EFAULT; |
| 198 | + goto out_free_req; |
| 199 | + } |
| 200 | + |
| 201 | + if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) { |
| 202 | + ret = -EPERM; |
| 203 | + goto out_free_req; |
| 204 | + } |
| 205 | + |
| 206 | + pdu->sense_addr = cmd->response; |
| 207 | + scmd->sense_len = cmd->max_response_len ? |
| 208 | + min(cmd->max_response_len, SCSI_SENSE_BUFFERSIZE) : SCSI_SENSE_BUFFERSIZE; |
| 209 | + |
| 210 | + if (cmd->xfer_len > 0) { |
| 211 | + ret = scsi_bsg_map_user_buffer(req, ioucmd, issue_flags, gfp_mask); |
| 212 | + if (ret) |
| 213 | + goto out_free_req; |
| 214 | + pdu->bio = req->bio; |
| 215 | + } else { |
| 216 | + pdu->bio = NULL; |
| 217 | + } |
| 218 | + |
| 219 | + req->timeout = cmd->timeout_ms ? |
| 220 | + msecs_to_jiffies(cmd->timeout_ms) : BLK_DEFAULT_SG_TIMEOUT; |
| 221 | + |
| 222 | + req->end_io = scsi_bsg_uring_cmd_done; |
| 223 | + req->end_io_data = ioucmd; |
| 224 | + pdu->req = req; |
| 225 | + |
| 226 | + blk_execute_rq_nowait(req, false); |
| 227 | + return -EIOCBQUEUED; |
| 228 | + |
| 229 | +out_free_req: |
| 230 | + blk_mq_free_request(req); |
| 231 | + return ret; |
17 | 232 | } |
18 | 233 |
|
19 | 234 | static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr, |
|
0 commit comments