Skip to content

Commit 2f2ba3c

Browse files
Wayne Lingregkh
authored andcommitted
drm/amd/display: Add polling method to handle MST reply packet
commit 4f6d9e3 upstream. [Why] Specific TBT4 dock doesn't send out short HPD to notify source that IRQ event DOWN_REP_MSG_RDY is set. Which violates the spec and cause source can't send out streams to mst sinks. [How] To cover this misbehavior, add an additional polling method to detect DOWN_REP_MSG_RDY is set. HPD driven handling method is still kept. Just hook up our handler to drm mgr->cbs->poll_hpd_irq(). Cc: Mario Limonciello <[email protected]> Cc: Alex Deucher <[email protected]> Cc: [email protected] Reviewed-by: Jerry Zuo <[email protected]> Acked-by: Alan Liu <[email protected]> Signed-off-by: Wayne Lin <[email protected]> Tested-by: Daniel Wheeler <[email protected]> Signed-off-by: Alex Deucher <[email protected]> Signed-off-by: Mario Limonciello <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 78ea2ed commit 2f2ba3c

File tree

4 files changed

+159
-86
lines changed

4 files changed

+159
-86
lines changed

drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

Lines changed: 31 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -1325,6 +1325,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
13251325
if (amdgpu_in_reset(adev))
13261326
goto skip;
13271327

1328+
if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
1329+
offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
1330+
dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
1331+
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1332+
offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
1333+
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1334+
goto skip;
1335+
}
1336+
13281337
mutex_lock(&adev->dm.dc_lock);
13291338
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
13301339
dc_link_dp_handle_automated_test(dc_link);
@@ -3229,87 +3238,6 @@ static void handle_hpd_irq(void *param)
32293238

32303239
}
32313240

3232-
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3233-
{
3234-
u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3235-
u8 dret;
3236-
bool new_irq_handled = false;
3237-
int dpcd_addr;
3238-
int dpcd_bytes_to_read;
3239-
3240-
const int max_process_count = 30;
3241-
int process_count = 0;
3242-
3243-
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3244-
3245-
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3246-
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3247-
/* DPCD 0x200 - 0x201 for downstream IRQ */
3248-
dpcd_addr = DP_SINK_COUNT;
3249-
} else {
3250-
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3251-
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3252-
dpcd_addr = DP_SINK_COUNT_ESI;
3253-
}
3254-
3255-
dret = drm_dp_dpcd_read(
3256-
&aconnector->dm_dp_aux.aux,
3257-
dpcd_addr,
3258-
esi,
3259-
dpcd_bytes_to_read);
3260-
3261-
while (dret == dpcd_bytes_to_read &&
3262-
process_count < max_process_count) {
3263-
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
3264-
u8 retry;
3265-
3266-
dret = 0;
3267-
3268-
process_count++;
3269-
3270-
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3271-
/* handle HPD short pulse irq */
3272-
if (aconnector->mst_mgr.mst_state)
3273-
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
3274-
esi,
3275-
ack,
3276-
&new_irq_handled);
3277-
3278-
if (new_irq_handled) {
3279-
/* ACK at DPCD to notify down stream */
3280-
for (retry = 0; retry < 3; retry++) {
3281-
ssize_t wret;
3282-
3283-
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
3284-
dpcd_addr + 1,
3285-
ack[1]);
3286-
if (wret == 1)
3287-
break;
3288-
}
3289-
3290-
if (retry == 3) {
3291-
DRM_ERROR("Failed to ack MST event.\n");
3292-
return;
3293-
}
3294-
3295-
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
3296-
/* check if there is new irq to be handled */
3297-
dret = drm_dp_dpcd_read(
3298-
&aconnector->dm_dp_aux.aux,
3299-
dpcd_addr,
3300-
esi,
3301-
dpcd_bytes_to_read);
3302-
3303-
new_irq_handled = false;
3304-
} else {
3305-
break;
3306-
}
3307-
}
3308-
3309-
if (process_count == max_process_count)
3310-
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3311-
}
3312-
33133241
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
33143242
union hpd_irq_data hpd_irq_data)
33153243
{
@@ -3371,7 +3299,23 @@ static void handle_hpd_rx_irq(void *param)
33713299
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
33723300
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
33733301
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3374-
dm_handle_mst_sideband_msg(aconnector);
3302+
bool skip = false;
3303+
3304+
/*
3305+
* DOWN_REP_MSG_RDY is also handled by polling method
3306+
* mgr->cbs->poll_hpd_irq()
3307+
*/
3308+
spin_lock(&offload_wq->offload_lock);
3309+
skip = offload_wq->is_handling_mst_msg_rdy_event;
3310+
3311+
if (!skip)
3312+
offload_wq->is_handling_mst_msg_rdy_event = true;
3313+
3314+
spin_unlock(&offload_wq->offload_lock);
3315+
3316+
if (!skip)
3317+
schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3318+
33753319
goto out;
33763320
}
33773321

@@ -3482,11 +3426,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
34823426
amdgpu_dm_irq_register_interrupt(adev, &int_params,
34833427
handle_hpd_rx_irq,
34843428
(void *) aconnector);
3485-
3486-
if (adev->dm.hpd_rx_offload_wq)
3487-
adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
3488-
aconnector;
34893429
}
3430+
3431+
if (adev->dm.hpd_rx_offload_wq)
3432+
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3433+
aconnector;
34903434
}
34913435
}
34923436

@@ -7082,6 +7026,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
70827026
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
70837027
aconnector->audio_inst = -1;
70847028
mutex_init(&aconnector->hpd_lock);
7029+
mutex_init(&aconnector->handle_mst_msg_ready);
70857030

70867031
/*
70877032
* configure support HPD hot plug connector_>polled default value is 0

drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,11 @@ struct hpd_rx_irq_offload_work_queue {
193193
* we're handling link loss
194194
*/
195195
bool is_handling_link_loss;
196+
/**
197+
* @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
198+
* ready event when we're already handling mst message ready event
199+
*/
200+
bool is_handling_mst_msg_rdy_event;
196201
/**
197202
* @aconnector: The aconnector that this work queue is attached to
198203
*/
@@ -614,6 +619,8 @@ struct amdgpu_dm_connector {
614619
struct drm_dp_mst_port *port;
615620
struct amdgpu_dm_connector *mst_port;
616621
struct drm_dp_aux *dsc_aux;
622+
struct mutex handle_mst_msg_ready;
623+
617624
/* TODO see if we can merge with ddc_bus or make a dm_connector */
618625
struct amdgpu_i2c_adapter *i2c;
619626

drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c

Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -590,8 +590,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
590590
return connector;
591591
}
592592

593+
void dm_handle_mst_sideband_msg_ready_event(
594+
struct drm_dp_mst_topology_mgr *mgr,
595+
enum mst_msg_ready_type msg_rdy_type)
596+
{
597+
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
598+
uint8_t dret;
599+
bool new_irq_handled = false;
600+
int dpcd_addr;
601+
uint8_t dpcd_bytes_to_read;
602+
const uint8_t max_process_count = 30;
603+
uint8_t process_count = 0;
604+
u8 retry;
605+
struct amdgpu_dm_connector *aconnector =
606+
container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
607+
608+
609+
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
610+
611+
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
612+
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
613+
/* DPCD 0x200 - 0x201 for downstream IRQ */
614+
dpcd_addr = DP_SINK_COUNT;
615+
} else {
616+
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
617+
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
618+
dpcd_addr = DP_SINK_COUNT_ESI;
619+
}
620+
621+
mutex_lock(&aconnector->handle_mst_msg_ready);
622+
623+
while (process_count < max_process_count) {
624+
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
625+
626+
process_count++;
627+
628+
dret = drm_dp_dpcd_read(
629+
&aconnector->dm_dp_aux.aux,
630+
dpcd_addr,
631+
esi,
632+
dpcd_bytes_to_read);
633+
634+
if (dret != dpcd_bytes_to_read) {
635+
DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
636+
break;
637+
}
638+
639+
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
640+
641+
switch (msg_rdy_type) {
642+
case DOWN_REP_MSG_RDY_EVENT:
643+
/* Only handle DOWN_REP_MSG_RDY case*/
644+
esi[1] &= DP_DOWN_REP_MSG_RDY;
645+
break;
646+
case UP_REQ_MSG_RDY_EVENT:
647+
/* Only handle UP_REQ_MSG_RDY case*/
648+
esi[1] &= DP_UP_REQ_MSG_RDY;
649+
break;
650+
default:
651+
/* Handle both cases*/
652+
esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
653+
break;
654+
}
655+
656+
if (!esi[1])
657+
break;
658+
659+
/* handle MST irq */
660+
if (aconnector->mst_mgr.mst_state)
661+
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
662+
esi,
663+
ack,
664+
&new_irq_handled);
665+
666+
if (new_irq_handled) {
667+
/* ACK at DPCD to notify down stream */
668+
for (retry = 0; retry < 3; retry++) {
669+
ssize_t wret;
670+
671+
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
672+
dpcd_addr + 1,
673+
ack[1]);
674+
if (wret == 1)
675+
break;
676+
}
677+
678+
if (retry == 3) {
679+
DRM_ERROR("Failed to ack MST event.\n");
680+
return;
681+
}
682+
683+
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
684+
685+
new_irq_handled = false;
686+
} else {
687+
break;
688+
}
689+
}
690+
691+
mutex_unlock(&aconnector->handle_mst_msg_ready);
692+
693+
if (process_count == max_process_count)
694+
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
695+
}
696+
697+
static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
698+
{
699+
dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
700+
}
701+
593702
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
594703
.add_connector = dm_dp_add_mst_connector,
704+
.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
595705
};
596706

597707
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,

drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,13 @@
4949
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
5050
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
5151

52+
enum mst_msg_ready_type {
53+
NONE_MSG_RDY_EVENT = 0,
54+
DOWN_REP_MSG_RDY_EVENT = 1,
55+
UP_REQ_MSG_RDY_EVENT = 2,
56+
DOWN_OR_UP_MSG_RDY_EVENT = 3
57+
};
58+
5259
struct amdgpu_display_manager;
5360
struct amdgpu_dm_connector;
5461

@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
6168
void
6269
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
6370

71+
void dm_handle_mst_sideband_msg_ready_event(
72+
struct drm_dp_mst_topology_mgr *mgr,
73+
enum mst_msg_ready_type msg_rdy_type);
74+
6475
struct dsc_mst_fairness_vars {
6576
int pbn;
6677
bool dsc_enabled;

0 commit comments

Comments
 (0)