Skip to content

Commit 23bc06a

Browse files
Bob Pearsonjgunthorpe
authored andcommitted
RDMA/rxe: Don't call direct between tasks
Replace calls to rxe_run_task() with rxe_sched_task(). This prevents the tasks from all running on the same cpu. This change slightly reduces performance for single qp send and write benchmarks in loopback mode but greatly improves the performance with multiple qps because if run task is used all the work tends to be performed on one cpu. For actual on the wire benchmarks there is no noticeable performance change. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Bob Pearson <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent 3d807a3 commit 23bc06a

File tree

3 files changed

+4
-23
lines changed

3 files changed

+4
-23
lines changed

drivers/infiniband/sw/rxe/rxe_comp.c

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -129,18 +129,9 @@ void retransmit_timer(struct timer_list *t)
129129

130130
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
131131
{
132-
int must_sched;
133-
134-
must_sched = skb_queue_len(&qp->resp_pkts) > 0;
135-
if (must_sched != 0)
136-
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED);
137-
132+
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED);
138133
skb_queue_tail(&qp->resp_pkts, skb);
139-
140-
if (must_sched)
141-
rxe_sched_task(&qp->send_task);
142-
else
143-
rxe_run_task(&qp->send_task);
134+
rxe_sched_task(&qp->send_task);
144135
}
145136

146137
static inline enum comp_state get_wqe(struct rxe_qp *qp,

drivers/infiniband/sw/rxe/rxe_resp.c

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -49,18 +49,8 @@ static char *resp_state_name[] = {
4949
/* rxe_recv calls here to add a request packet to the input queue */
5050
void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
5151
{
52-
int must_sched;
53-
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
54-
5552
skb_queue_tail(&qp->req_pkts, skb);
56-
57-
must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
58-
(skb_queue_len(&qp->req_pkts) > 1);
59-
60-
if (must_sched)
61-
rxe_sched_task(&qp->recv_task);
62-
else
63-
rxe_run_task(&qp->recv_task);
53+
rxe_sched_task(&qp->recv_task);
6454
}
6555

6656
static inline enum resp_states get_req(struct rxe_qp *qp,

drivers/infiniband/sw/rxe/rxe_verbs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -935,7 +935,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
935935

936936
if (qp->is_user) {
937937
/* Utilize process context to do protocol processing */
938-
rxe_run_task(&qp->send_task);
938+
rxe_sched_task(&qp->send_task);
939939
} else {
940940
err = rxe_post_send_kernel(qp, wr, bad_wr);
941941
if (err)

0 commit comments

Comments
 (0)