Skip to content

Commit 1ba2e50

Browse files
igawChristoph Hellwig
authored andcommitted
nvme-tcp: Do not reset transport on data digest errors
The spec says 7.4.6.1 Digest Error handling When a host detects a data digest error in a C2HData PDU, that host shall continue processing C2HData PDUs associated with the command and when the command processing has completed, if a successful status was returned by the controller, the host shall fail the command with a non-fatal transport error. Currently the transport is reseted when a data digest error is detected. Instead, when a digest error is detected, mark the final status as NVME_SC_DATA_XFER_ERROR and let the upper layer handle the error. In order to keep track of the final result maintain a status field in nvme_tcp_request object and use it to overwrite the completion queue status (which might be successful even though a digest error has been detected) when completing the request. Signed-off-by: Daniel Wagner <[email protected]> Reviewed-by: Sagi Grimberg <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent f040648 commit 1ba2e50

File tree

1 file changed

+18
-4
lines changed

1 file changed

+18
-4
lines changed

drivers/nvme/host/tcp.c

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ struct nvme_tcp_request {
4545
u32 pdu_len;
4646
u32 pdu_sent;
4747
u16 ttag;
48+
__le16 status;
4849
struct list_head entry;
4950
struct llist_node lentry;
5051
__le32 ddgst;
@@ -485,6 +486,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
485486
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
486487
struct nvme_completion *cqe)
487488
{
489+
struct nvme_tcp_request *req;
488490
struct request *rq;
489491

490492
rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
@@ -496,7 +498,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
496498
return -EINVAL;
497499
}
498500

499-
if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
501+
req = blk_mq_rq_to_pdu(rq);
502+
if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
503+
req->status = cqe->status;
504+
505+
if (!nvme_try_complete_req(rq, req->status, cqe->result))
500506
nvme_complete_rq(rq);
501507
queue->nr_cqe++;
502508

@@ -758,7 +764,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
758764
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
759765
} else {
760766
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
761-
nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
767+
nvme_tcp_end_request(rq,
768+
le16_to_cpu(req->status));
762769
queue->nr_cqe++;
763770
}
764771
nvme_tcp_init_recv_ctx(queue);
@@ -788,18 +795,24 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
788795
return 0;
789796

790797
if (queue->recv_ddgst != queue->exp_ddgst) {
798+
struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
799+
pdu->command_id);
800+
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
801+
802+
req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
803+
791804
dev_err(queue->ctrl->ctrl.device,
792805
"data digest error: recv %#x expected %#x\n",
793806
le32_to_cpu(queue->recv_ddgst),
794807
le32_to_cpu(queue->exp_ddgst));
795-
return -EIO;
796808
}
797809

798810
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
799811
struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
800812
pdu->command_id);
813+
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
801814

802-
nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
815+
nvme_tcp_end_request(rq, le16_to_cpu(req->status));
803816
queue->nr_cqe++;
804817
}
805818

@@ -2293,6 +2306,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
22932306
return ret;
22942307

22952308
req->state = NVME_TCP_SEND_CMD_PDU;
2309+
req->status = cpu_to_le16(NVME_SC_SUCCESS);
22962310
req->offset = 0;
22972311
req->data_sent = 0;
22982312
req->pdu_len = 0;

0 commit comments

Comments
 (0)