Skip to content

Commit 00f68f5

Browse files
Wayne Lingregkh
authored andcommitted
drm/dp_mst: Clear MSG_RDY flag before sending new message
commit 72f1de4 upstream. [Why] The sequence for collecting down_reply from source perspective should be: Request_n->repeat (get partial reply of Request_n->clear message ready flag to ack DPRX that the message is received) till all partial replies for Request_n are received->new Request_n+1. Now there is chance that drm_dp_mst_hpd_irq() will fire new down request in the tx queue when the down reply is incomplete. Source is restricted to generate interveleaved message transactions so we should avoid it. Also, while assembling partial reply packets, reading out DPCD DOWN_REP Sideband MSG buffer + clearing DOWN_REP_MSG_RDY flag should be wrapped up as a complete operation for reading out a reply packet. Kicking off a new request before clearing DOWN_REP_MSG_RDY flag might be risky. e.g. If the reply of the new request has overwritten the DPRX DOWN_REP Sideband MSG buffer before source writing one to clear DOWN_REP_MSG_RDY flag, source then unintentionally flushes the reply for the new request. Should handle the up request in the same way. [How] Separete drm_dp_mst_hpd_irq() into 2 steps. After acking the MST IRQ event, driver calls drm_dp_mst_hpd_irq_send_new_request() and might trigger drm_dp_mst_kick_tx() only when there is no on going message transaction. Changes since v1: * Reworked on review comments received -> Adjust the fix to let driver explicitly kick off new down request when mst irq event is handled and acked -> Adjust the commit message Changes since v2: * Adjust the commit message * Adjust the naming of the divided 2 functions and add a new input parameter "ack". * Adjust code flow as per review comments. Changes since v3: * Update the function description of drm_dp_mst_hpd_irq_handle_event Changes since v4: * Change ack of drm_dp_mst_hpd_irq_handle_event() to be an array align the size of esi[] Signed-off-by: Wayne Lin <[email protected]> Reviewed-by: Lyude Paul <[email protected]> Acked-by: Jani Nikula <[email protected]> Cc: [email protected] Signed-off-by: Alex Deucher <[email protected]> Signed-off-by: Mario Limonciello <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent c085ffa commit 00f68f5

File tree

5 files changed

+81
-31
lines changed

5 files changed

+81
-31
lines changed

drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3201,6 +3201,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
32013201

32023202
while (dret == dpcd_bytes_to_read &&
32033203
process_count < max_process_count) {
3204+
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
32043205
u8 retry;
32053206
dret = 0;
32063207

@@ -3209,28 +3210,29 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
32093210
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
32103211
/* handle HPD short pulse irq */
32113212
if (aconnector->mst_mgr.mst_state)
3212-
drm_dp_mst_hpd_irq(
3213-
&aconnector->mst_mgr,
3214-
esi,
3215-
&new_irq_handled);
3213+
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
3214+
esi,
3215+
ack,
3216+
&new_irq_handled);
32163217

32173218
if (new_irq_handled) {
32183219
/* ACK at DPCD to notify down stream */
3219-
const int ack_dpcd_bytes_to_write =
3220-
dpcd_bytes_to_read - 1;
3221-
32223220
for (retry = 0; retry < 3; retry++) {
3223-
u8 wret;
3224-
3225-
wret = drm_dp_dpcd_write(
3226-
&aconnector->dm_dp_aux.aux,
3227-
dpcd_addr + 1,
3228-
&esi[1],
3229-
ack_dpcd_bytes_to_write);
3230-
if (wret == ack_dpcd_bytes_to_write)
3221+
ssize_t wret;
3222+
3223+
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
3224+
dpcd_addr + 1,
3225+
ack[1]);
3226+
if (wret == 1)
32313227
break;
32323228
}
32333229

3230+
if (retry == 3) {
3231+
DRM_ERROR("Failed to ack MST event.\n");
3232+
return;
3233+
}
3234+
3235+
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
32343236
/* check if there is new irq to be handled */
32353237
dret = drm_dp_dpcd_read(
32363238
&aconnector->dm_dp_aux.aux,

drivers/gpu/drm/display/drm_dp_mst_topology.c

Lines changed: 47 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4053,17 +4053,28 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
40534053
}
40544054

40554055
/**
4056-
* drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
4056+
* drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
40574057
* @mgr: manager to notify irq for.
40584058
* @esi: 4 bytes from SINK_COUNT_ESI
4059+
* @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
40594060
* @handled: whether the hpd interrupt was consumed or not
40604061
*
4061-
* This should be called from the driver when it detects a short IRQ,
4062+
* This should be called from the driver when it detects a HPD IRQ,
40624063
* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
4063-
* topology manager will process the sideband messages received as a result
4064-
* of this.
4064+
* topology manager will process the sideband messages received
4065+
* as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
4066+
* corresponding flags that Driver has to ack the DP receiver later.
4067+
*
4068+
* Note that driver shall also call
4069+
* drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
4070+
* after calling this function, to try to kick off a new request in
4071+
* the queue if the previous message transaction is completed.
4072+
*
4073+
* See also:
4074+
* drm_dp_mst_hpd_irq_send_new_request()
40654075
*/
4066-
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
4076+
int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
4077+
u8 *ack, bool *handled)
40674078
{
40684079
int ret = 0;
40694080
int sc;
@@ -4078,18 +4089,47 @@ int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl
40784089
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
40794090
ret = drm_dp_mst_handle_down_rep(mgr);
40804091
*handled = true;
4092+
ack[1] |= DP_DOWN_REP_MSG_RDY;
40814093
}
40824094

40834095
if (esi[1] & DP_UP_REQ_MSG_RDY) {
40844096
ret |= drm_dp_mst_handle_up_req(mgr);
40854097
*handled = true;
4098+
ack[1] |= DP_UP_REQ_MSG_RDY;
40864099
}
40874100

4088-
drm_dp_mst_kick_tx(mgr);
40894101
return ret;
40904102
}
4091-
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
4103+
EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
40924104

4105+
/**
4106+
* drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
4107+
* @mgr: manager to notify irq for.
4108+
*
4109+
* This should be called from the driver when mst irq event is handled
4110+
* and acked. Note that new down request should only be sent when
4111+
* previous message transaction is completed. Source is not supposed to generate
4112+
* interleaved message transactions.
4113+
*/
4114+
void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
4115+
{
4116+
struct drm_dp_sideband_msg_tx *txmsg;
4117+
bool kick = true;
4118+
4119+
mutex_lock(&mgr->qlock);
4120+
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
4121+
struct drm_dp_sideband_msg_tx, next);
4122+
/* If last transaction is not completed yet*/
4123+
if (!txmsg ||
4124+
txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
4125+
txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
4126+
kick = false;
4127+
mutex_unlock(&mgr->qlock);
4128+
4129+
if (kick)
4130+
drm_dp_mst_kick_tx(mgr);
4131+
}
4132+
EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
40934133
/**
40944134
* drm_dp_mst_detect_port() - get connection status for an MST port
40954135
* @connector: DRM connector for this port

drivers/gpu/drm/i915/display/intel_dp.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3804,9 +3804,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
38043804
{
38053805
bool handled = false;
38063806

3807-
drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3808-
if (handled)
3809-
ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
3807+
drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
38103808

38113809
if (esi[1] & DP_CP_IRQ) {
38123810
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
@@ -3881,6 +3879,9 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
38813879

38823880
if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
38833881
drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
3882+
3883+
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
3884+
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
38843885
}
38853886

38863887
return link_ok;

drivers/gpu/drm/nouveau/dispnv50/disp.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1473,22 +1473,26 @@ nv50_mstm_service(struct nouveau_drm *drm,
14731473
u8 esi[8] = {};
14741474

14751475
while (handled) {
1476+
u8 ack[8] = {};
1477+
14761478
rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
14771479
if (rc != 8) {
14781480
ret = false;
14791481
break;
14801482
}
14811483

1482-
drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
1484+
drm_dp_mst_hpd_irq_handle_event(&mstm->mgr, esi, ack, &handled);
14831485
if (!handled)
14841486
break;
14851487

1486-
rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1],
1487-
3);
1488-
if (rc != 3) {
1488+
rc = drm_dp_dpcd_writeb(aux, DP_SINK_COUNT_ESI + 1, ack[1]);
1489+
1490+
if (rc != 1) {
14891491
ret = false;
14901492
break;
14911493
}
1494+
1495+
drm_dp_mst_hpd_irq_send_new_request(&mstm->mgr);
14921496
}
14931497

14941498
if (!ret)

include/drm/display/drm_dp_mst_helper.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -815,8 +815,11 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
815815
bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
816816
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
817817

818-
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
819-
818+
int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
819+
const u8 *esi,
820+
u8 *ack,
821+
bool *handled);
822+
void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
820823

821824
int
822825
drm_dp_mst_detect_port(struct drm_connector *connector,

0 commit comments

Comments
 (0)