Skip to content

Commit 8d99e09

Browse files
mwajdeczdanvet
authored andcommitted
drm/i915/guc: Always copy CT message to new allocation
Since most of future CT traffic will be based on G2H requests, instead of copying incoming CT message to static buffer and then create new allocation for such request, always copy incoming CT message to new allocation. Also by doing it while reading CT header, we can safely fallback if that atomic allocation fails. Signed-off-by: Michal Wajdeczko <[email protected]> Signed-off-by: Matthew Brost <[email protected]> Reviewed-by: Matthew Brost <[email protected]> Cc: Piotr Piórkowski <[email protected]> Signed-off-by: Daniel Vetter <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent 65dd4ed commit 8d99e09

File tree

1 file changed

+120
-60
lines changed

1 file changed

+120
-60
lines changed

drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c

Lines changed: 120 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,9 @@ struct ct_request {
7272
u32 *response_buf;
7373
};
7474

75-
struct ct_incoming_request {
75+
struct ct_incoming_msg {
7676
struct list_head link;
77+
u32 size;
7778
u32 msg[];
7879
};
7980

@@ -597,7 +598,26 @@ static inline bool ct_header_is_response(u32 header)
597598
return !!(header & GUC_CT_MSG_IS_RESPONSE);
598599
}
599600

600-
static int ct_read(struct intel_guc_ct *ct, u32 *data)
601+
static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
602+
{
603+
struct ct_incoming_msg *msg;
604+
605+
msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC);
606+
if (msg)
607+
msg->size = num_dwords;
608+
return msg;
609+
}
610+
611+
static void ct_free_msg(struct ct_incoming_msg *msg)
612+
{
613+
kfree(msg);
614+
}
615+
616+
/*
617+
* Return: number available remaining dwords to read (0 if empty)
618+
* or a negative error code on failure
619+
*/
620+
static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
601621
{
602622
struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
603623
struct guc_ct_buffer_desc *desc = ctb->desc;
@@ -608,6 +628,7 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
608628
s32 available;
609629
unsigned int len;
610630
unsigned int i;
631+
u32 header;
611632

612633
if (unlikely(desc->is_in_error))
613634
return -EPIPE;
@@ -623,35 +644,50 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
623644

624645
/* tail == head condition indicates empty */
625646
available = tail - head;
626-
if (unlikely(available == 0))
627-
return -ENODATA;
647+
if (unlikely(available == 0)) {
648+
*msg = NULL;
649+
return 0;
650+
}
628651

629652
/* beware of buffer wrap case */
630653
if (unlikely(available < 0))
631654
available += size;
632655
CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
633656
GEM_BUG_ON(available < 0);
634657

635-
data[0] = cmds[head];
658+
header = cmds[head];
636659
head = (head + 1) % size;
637660

638661
/* message len with header */
639-
len = ct_header_get_len(data[0]) + 1;
662+
len = ct_header_get_len(header) + 1;
640663
if (unlikely(len > (u32)available)) {
641664
CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
642-
4, data,
665+
4, &header,
643666
4 * (head + available - 1 > size ?
644667
size - head : available - 1), &cmds[head],
645668
4 * (head + available - 1 > size ?
646669
available - 1 - size + head : 0), &cmds[0]);
647670
goto corrupted;
648671
}
649672

673+
*msg = ct_alloc_msg(len);
674+
if (!*msg) {
675+
CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
676+
4, &header,
677+
4 * (head + available - 1 > size ?
678+
size - head : available - 1), &cmds[head],
679+
4 * (head + available - 1 > size ?
680+
available - 1 - size + head : 0), &cmds[0]);
681+
return available;
682+
}
683+
684+
(*msg)->msg[0] = header;
685+
650686
for (i = 1; i < len; i++) {
651-
data[i] = cmds[head];
687+
(*msg)->msg[i] = cmds[head];
652688
head = (head + 1) % size;
653689
}
654-
CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
690+
CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
655691

656692
desc->head = head * 4;
657693
return available - len;
@@ -681,33 +717,33 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
681717
* ^-----------------------len-----------------------^
682718
*/
683719

684-
static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
720+
static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
685721
{
686-
u32 header = msg[0];
722+
u32 header = response->msg[0];
687723
u32 len = ct_header_get_len(header);
688-
u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
689724
u32 fence;
690725
u32 status;
691726
u32 datalen;
692727
struct ct_request *req;
693728
unsigned long flags;
694729
bool found = false;
730+
int err = 0;
695731

696732
GEM_BUG_ON(!ct_header_is_response(header));
697733

698734
/* Response payload shall at least include fence and status */
699735
if (unlikely(len < 2)) {
700-
CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
736+
CT_ERROR(ct, "Corrupted response (len %u)\n", len);
701737
return -EPROTO;
702738
}
703739

704-
fence = msg[1];
705-
status = msg[2];
740+
fence = response->msg[1];
741+
status = response->msg[2];
706742
datalen = len - 2;
707743

708744
/* Format of the status follows RESPONSE message */
709745
if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
710-
CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
746+
CT_ERROR(ct, "Corrupted response (status %#x)\n", status);
711747
return -EPROTO;
712748
}
713749

@@ -721,58 +757,75 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
721757
continue;
722758
}
723759
if (unlikely(datalen > req->response_len)) {
724-
CT_ERROR(ct, "Response for %u is too long %*ph\n",
725-
req->fence, msgsize, msg);
726-
datalen = 0;
760+
CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
761+
req->fence, datalen, req->response_len);
762+
datalen = min(datalen, req->response_len);
763+
err = -EMSGSIZE;
727764
}
728765
if (datalen)
729-
memcpy(req->response_buf, msg + 3, 4 * datalen);
766+
memcpy(req->response_buf, response->msg + 3, 4 * datalen);
730767
req->response_len = datalen;
731768
WRITE_ONCE(req->status, status);
732769
found = true;
733770
break;
734771
}
735772
spin_unlock_irqrestore(&ct->requests.lock, flags);
736773

737-
if (!found)
738-
CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
774+
if (!found) {
775+
CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
776+
return -ENOKEY;
777+
}
778+
779+
if (unlikely(err))
780+
return err;
781+
782+
ct_free_msg(response);
739783
return 0;
740784
}
741785

742-
static void ct_process_request(struct intel_guc_ct *ct,
743-
u32 action, u32 len, const u32 *payload)
786+
static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
744787
{
745788
struct intel_guc *guc = ct_to_guc(ct);
789+
u32 header, action, len;
790+
const u32 *payload;
746791
int ret;
747792

793+
header = request->msg[0];
794+
payload = &request->msg[1];
795+
action = ct_header_get_action(header);
796+
len = ct_header_get_len(header);
797+
748798
CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
749799

750800
switch (action) {
751801
case INTEL_GUC_ACTION_DEFAULT:
752802
ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
753-
if (unlikely(ret))
754-
goto fail_unexpected;
755803
break;
756-
757804
default:
758-
fail_unexpected:
759-
CT_ERROR(ct, "Unexpected request %x %*ph\n",
760-
action, 4 * len, payload);
805+
ret = -EOPNOTSUPP;
761806
break;
762807
}
808+
809+
if (unlikely(ret)) {
810+
CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
811+
action, ERR_PTR(ret));
812+
return ret;
813+
}
814+
815+
ct_free_msg(request);
816+
return 0;
763817
}
764818

765819
static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
766820
{
767821
unsigned long flags;
768-
struct ct_incoming_request *request;
769-
u32 header;
770-
u32 *payload;
822+
struct ct_incoming_msg *request;
771823
bool done;
824+
int err;
772825

773826
spin_lock_irqsave(&ct->requests.lock, flags);
774827
request = list_first_entry_or_null(&ct->requests.incoming,
775-
struct ct_incoming_request, link);
828+
struct ct_incoming_msg, link);
776829
if (request)
777830
list_del(&request->link);
778831
done = !!list_empty(&ct->requests.incoming);
@@ -781,14 +834,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
781834
if (!request)
782835
return true;
783836

784-
header = request->msg[0];
785-
payload = &request->msg[1];
786-
ct_process_request(ct,
787-
ct_header_get_action(header),
788-
ct_header_get_len(header),
789-
payload);
837+
err = ct_process_request(ct, request);
838+
if (unlikely(err)) {
839+
CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
840+
ERR_PTR(err), 4 * request->size, request->msg);
841+
ct_free_msg(request);
842+
}
790843

791-
kfree(request);
792844
return done;
793845
}
794846

@@ -821,22 +873,11 @@ static void ct_incoming_request_worker_func(struct work_struct *w)
821873
* ^-----------------------len-----------------------^
822874
*/
823875

824-
static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
876+
static int ct_handle_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
825877
{
826-
u32 header = msg[0];
827-
u32 len = ct_header_get_len(header);
828-
u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
829-
struct ct_incoming_request *request;
830878
unsigned long flags;
831879

832-
GEM_BUG_ON(ct_header_is_response(header));
833-
834-
request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC);
835-
if (unlikely(!request)) {
836-
CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg);
837-
return 0; /* XXX: -ENOMEM ? */
838-
}
839-
memcpy(request->msg, msg, msgsize);
880+
GEM_BUG_ON(ct_header_is_response(request->msg[0]));
840881

841882
spin_lock_irqsave(&ct->requests.lock, flags);
842883
list_add_tail(&request->link, &ct->requests.incoming);
@@ -846,22 +887,41 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
846887
return 0;
847888
}
848889

890+
static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
891+
{
892+
u32 header = msg->msg[0];
893+
int err;
894+
895+
if (ct_header_is_response(header))
896+
err = ct_handle_response(ct, msg);
897+
else
898+
err = ct_handle_request(ct, msg);
899+
900+
if (unlikely(err)) {
901+
CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
902+
ERR_PTR(err), 4 * msg->size, msg->msg);
903+
ct_free_msg(msg);
904+
}
905+
}
906+
907+
/*
908+
* Return: number available remaining dwords to read (0 if empty)
909+
* or a negative error code on failure
910+
*/
849911
static int ct_receive(struct intel_guc_ct *ct)
850912
{
851-
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
913+
struct ct_incoming_msg *msg = NULL;
852914
unsigned long flags;
853915
int ret;
854916

855917
spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
856-
ret = ct_read(ct, msg);
918+
ret = ct_read(ct, &msg);
857919
spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
858920
if (ret < 0)
859921
return ret;
860922

861-
if (ct_header_is_response(msg[0]))
862-
ct_handle_response(ct, msg);
863-
else
864-
ct_handle_request(ct, msg);
923+
if (msg)
924+
ct_handle_msg(ct, msg);
865925

866926
return ret;
867927
}

0 commit comments

Comments
 (0)