Skip to content

Commit f1b7b86

Browse files
longlimsftsmfrench
authored andcommitted
cifs: smbd: Properly process errors on ib_post_send
When processing errors from ib_post_send(), the transport state needs to be rolled back to the condition before the error. Refactor the old code to make it easy to roll back on IB errors, and fix this. Signed-off-by: Long Li <[email protected]> Signed-off-by: Steve French <[email protected]>
1 parent eda1c54 commit f1b7b86

File tree

1 file changed

+97
-123
lines changed

1 file changed

+97
-123
lines changed

fs/cifs/smbdirect.c

Lines changed: 97 additions & 123 deletions
Original file line numberDiff line numberDiff line change
@@ -800,41 +800,91 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info)
800800
return 0;
801801
}
802802

803-
/*
804-
* Build and prepare the SMBD packet header
805-
* This function waits for avaialbe send credits and build a SMBD packet
806-
* header. The caller then optional append payload to the packet after
807-
* the header
808-
* intput values
809-
* size: the size of the payload
810-
* remaining_data_length: remaining data to send if this is part of a
811-
* fragmented packet
812-
* output values
813-
* request_out: the request allocated from this function
814-
* return values: 0 on success, otherwise actual error code returned
815-
*/
816-
static int smbd_create_header(struct smbd_connection *info,
817-
int size, int remaining_data_length,
818-
struct smbd_request **request_out)
803+
/* Post the send request */
804+
static int smbd_post_send(struct smbd_connection *info,
805+
struct smbd_request *request)
806+
{
807+
struct ib_send_wr send_wr;
808+
int rc, i;
809+
810+
for (i = 0; i < request->num_sge; i++) {
811+
log_rdma_send(INFO,
812+
"rdma_request sge[%d] addr=%llu length=%u\n",
813+
i, request->sge[i].addr, request->sge[i].length);
814+
ib_dma_sync_single_for_device(
815+
info->id->device,
816+
request->sge[i].addr,
817+
request->sge[i].length,
818+
DMA_TO_DEVICE);
819+
}
820+
821+
request->cqe.done = send_done;
822+
823+
send_wr.next = NULL;
824+
send_wr.wr_cqe = &request->cqe;
825+
send_wr.sg_list = request->sge;
826+
send_wr.num_sge = request->num_sge;
827+
send_wr.opcode = IB_WR_SEND;
828+
send_wr.send_flags = IB_SEND_SIGNALED;
829+
830+
rc = ib_post_send(info->id->qp, &send_wr, NULL);
831+
if (rc) {
832+
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
833+
smbd_disconnect_rdma_connection(info);
834+
rc = -EAGAIN;
835+
} else
836+
/* Reset timer for idle connection after packet is sent */
837+
mod_delayed_work(info->workqueue, &info->idle_timer_work,
838+
info->keep_alive_interval*HZ);
839+
840+
return rc;
841+
}
842+
843+
static int smbd_post_send_sgl(struct smbd_connection *info,
844+
struct scatterlist *sgl, int data_length, int remaining_data_length)
819845
{
846+
int num_sgs;
847+
int i, rc;
848+
int header_length;
820849
struct smbd_request *request;
821850
struct smbd_data_transfer *packet;
822-
int header_length;
823851
int new_credits;
824-
int rc;
852+
struct scatterlist *sg;
825853

854+
wait_credit:
826855
/* Wait for send credits. A SMBD packet needs one credit */
827856
rc = wait_event_interruptible(info->wait_send_queue,
828857
atomic_read(&info->send_credits) > 0 ||
829858
info->transport_status != SMBD_CONNECTED);
830859
if (rc)
831-
return rc;
860+
goto err_wait_credit;
832861

833862
if (info->transport_status != SMBD_CONNECTED) {
834-
log_outgoing(ERR, "disconnected not sending\n");
835-
return -EAGAIN;
863+
log_outgoing(ERR, "disconnected not sending on wait_credit\n");
864+
rc = -EAGAIN;
865+
goto err_wait_credit;
866+
}
867+
if (unlikely(atomic_dec_return(&info->send_credits) < 0)) {
868+
atomic_inc(&info->send_credits);
869+
goto wait_credit;
870+
}
871+
872+
wait_send_queue:
873+
wait_event(info->wait_post_send,
874+
atomic_read(&info->send_pending) < info->send_credit_target ||
875+
info->transport_status != SMBD_CONNECTED);
876+
877+
if (info->transport_status != SMBD_CONNECTED) {
878+
log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
879+
rc = -EAGAIN;
880+
goto err_wait_send_queue;
881+
}
882+
883+
if (unlikely(atomic_inc_return(&info->send_pending) >
884+
info->send_credit_target)) {
885+
atomic_dec(&info->send_pending);
886+
goto wait_send_queue;
836887
}
837-
atomic_dec(&info->send_credits);
838888

839889
request = mempool_alloc(info->request_mempool, GFP_KERNEL);
840890
if (!request) {
@@ -859,11 +909,11 @@ static int smbd_create_header(struct smbd_connection *info,
859909
packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
860910

861911
packet->reserved = 0;
862-
if (!size)
912+
if (!data_length)
863913
packet->data_offset = 0;
864914
else
865915
packet->data_offset = cpu_to_le32(24);
866-
packet->data_length = cpu_to_le32(size);
916+
packet->data_length = cpu_to_le32(data_length);
867917
packet->remaining_data_length = cpu_to_le32(remaining_data_length);
868918
packet->padding = 0;
869919

@@ -878,7 +928,7 @@ static int smbd_create_header(struct smbd_connection *info,
878928
/* Map the packet to DMA */
879929
header_length = sizeof(struct smbd_data_transfer);
880930
/* If this is a packet without payload, don't send padding */
881-
if (!size)
931+
if (!data_length)
882932
header_length = offsetof(struct smbd_data_transfer, padding);
883933

884934
request->num_sge = 1;
@@ -887,107 +937,15 @@ static int smbd_create_header(struct smbd_connection *info,
887937
header_length,
888938
DMA_TO_DEVICE);
889939
if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
890-
mempool_free(request, info->request_mempool);
891940
rc = -EIO;
941+
request->sge[0].addr = 0;
892942
goto err_dma;
893943
}
894944

895945
request->sge[0].length = header_length;
896946
request->sge[0].lkey = info->pd->local_dma_lkey;
897947

898-
*request_out = request;
899-
return 0;
900-
901-
err_dma:
902-
/* roll back receive credits */
903-
spin_lock(&info->lock_new_credits_offered);
904-
info->new_credits_offered += new_credits;
905-
spin_unlock(&info->lock_new_credits_offered);
906-
atomic_sub(new_credits, &info->receive_credits);
907-
908-
err_alloc:
909-
/* roll back send credits */
910-
atomic_inc(&info->send_credits);
911-
912-
return rc;
913-
}
914-
915-
static void smbd_destroy_header(struct smbd_connection *info,
916-
struct smbd_request *request)
917-
{
918-
919-
ib_dma_unmap_single(info->id->device,
920-
request->sge[0].addr,
921-
request->sge[0].length,
922-
DMA_TO_DEVICE);
923-
mempool_free(request, info->request_mempool);
924-
atomic_inc(&info->send_credits);
925-
}
926-
927-
/* Post the send request */
928-
static int smbd_post_send(struct smbd_connection *info,
929-
struct smbd_request *request)
930-
{
931-
struct ib_send_wr send_wr;
932-
int rc, i;
933-
934-
for (i = 0; i < request->num_sge; i++) {
935-
log_rdma_send(INFO,
936-
"rdma_request sge[%d] addr=%llu length=%u\n",
937-
i, request->sge[i].addr, request->sge[i].length);
938-
ib_dma_sync_single_for_device(
939-
info->id->device,
940-
request->sge[i].addr,
941-
request->sge[i].length,
942-
DMA_TO_DEVICE);
943-
}
944-
945-
request->cqe.done = send_done;
946-
947-
send_wr.next = NULL;
948-
send_wr.wr_cqe = &request->cqe;
949-
send_wr.sg_list = request->sge;
950-
send_wr.num_sge = request->num_sge;
951-
send_wr.opcode = IB_WR_SEND;
952-
send_wr.send_flags = IB_SEND_SIGNALED;
953-
954-
wait_sq:
955-
wait_event(info->wait_post_send,
956-
atomic_read(&info->send_pending) < info->send_credit_target);
957-
if (unlikely(atomic_inc_return(&info->send_pending) >
958-
info->send_credit_target)) {
959-
atomic_dec(&info->send_pending);
960-
goto wait_sq;
961-
}
962-
963-
rc = ib_post_send(info->id->qp, &send_wr, NULL);
964-
if (rc) {
965-
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
966-
if (atomic_dec_and_test(&info->send_pending))
967-
wake_up(&info->wait_send_pending);
968-
smbd_disconnect_rdma_connection(info);
969-
rc = -EAGAIN;
970-
} else
971-
/* Reset timer for idle connection after packet is sent */
972-
mod_delayed_work(info->workqueue, &info->idle_timer_work,
973-
info->keep_alive_interval*HZ);
974-
975-
return rc;
976-
}
977-
978-
static int smbd_post_send_sgl(struct smbd_connection *info,
979-
struct scatterlist *sgl, int data_length, int remaining_data_length)
980-
{
981-
int num_sgs;
982-
int i, rc;
983-
struct smbd_request *request;
984-
struct scatterlist *sg;
985-
986-
rc = smbd_create_header(
987-
info, data_length, remaining_data_length, &request);
988-
if (rc)
989-
return rc;
990-
948+
/* Fill in the packet data payload */
991949
num_sgs = sgl ? sg_nents(sgl) : 0;
992950
for_each_sg(sgl, sg, num_sgs, i) {
993951
request->sge[i+1].addr =
@@ -997,7 +955,7 @@ static int smbd_post_send_sgl(struct smbd_connection *info,
997955
info->id->device, request->sge[i+1].addr)) {
998956
rc = -EIO;
999957
request->sge[i+1].addr = 0;
1000-
goto dma_mapping_failure;
958+
goto err_dma;
1001959
}
1002960
request->sge[i+1].length = sg->length;
1003961
request->sge[i+1].lkey = info->pd->local_dma_lkey;
@@ -1008,14 +966,30 @@ static int smbd_post_send_sgl(struct smbd_connection *info,
1008966
if (!rc)
1009967
return 0;
1010968

1011-
dma_mapping_failure:
1012-
for (i = 1; i < request->num_sge; i++)
969+
err_dma:
970+
for (i = 0; i < request->num_sge; i++)
1013971
if (request->sge[i].addr)
1014972
ib_dma_unmap_single(info->id->device,
1015973
request->sge[i].addr,
1016974
request->sge[i].length,
1017975
DMA_TO_DEVICE);
1018-
smbd_destroy_header(info, request);
976+
mempool_free(request, info->request_mempool);
977+
978+
/* roll back receive credits and credits to be offered */
979+
spin_lock(&info->lock_new_credits_offered);
980+
info->new_credits_offered += new_credits;
981+
spin_unlock(&info->lock_new_credits_offered);
982+
atomic_sub(new_credits, &info->receive_credits);
983+
984+
err_alloc:
985+
if (atomic_dec_and_test(&info->send_pending))
986+
wake_up(&info->wait_send_pending);
987+
988+
err_wait_send_queue:
989+
/* roll back send credits and pending */
990+
atomic_inc(&info->send_credits);
991+
992+
err_wait_credit:
1019993
return rc;
1020994
}
1021995

0 commit comments

Comments
 (0)