Skip to content

Commit ec5fa9f

Browse files
Wayne Linalexdeucher
authored andcommitted
drm/amd/display: Adjust the MST resume flow
[Why] In drm_dp_mst_topology_mgr_resume() today, it will resume the mst branch to be ready handling mst mode and also consecutively do the mst topology probing. Which will cause the dirver have chance to fire hotplug event before restoring the old state. Then Userspace will react to the hotplug event based on a wrong state. [How] Adjust the mst resume flow as: 1. set dpcd to resume mst branch status 2. restore source old state 3. Do mst resume topology probing For drm_dp_mst_topology_mgr_resume(), it's better to adjust it to pull out topology probing work into a 2nd part procedure of the mst resume. Will have a follow up patch in drm. Reviewed-by: Chao-kai Wang <stylon.wang@amd.com> Cc: Mario Limonciello <mario.limonciello@amd.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: stable@vger.kernel.org Acked-by: Stylon Wang <stylon.wang@amd.com> Signed-off-by: Wayne Lin <wayne.lin@amd.com> Tested-by: Daniel Wheeler <daniel.wheeler@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
1 parent ffd6bde commit ec5fa9f

File tree

1 file changed

+80
-13
lines changed

1 file changed

+80
-13
lines changed

drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

Lines changed: 80 additions & 13 deletions
Original file line numberOriginal file lineDiff line numberDiff line change
@@ -2340,14 +2340,62 @@ static int dm_late_init(void *handle)
2340
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2340
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2341
}
2341
}
2342

2342

2343+
static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
2344+
{
2345+
int ret;
2346+
u8 guid[16];
2347+
u64 tmp64;
2348+
2349+
mutex_lock(&mgr->lock);
2350+
if (!mgr->mst_primary)
2351+
goto out_fail;
2352+
2353+
if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
2354+
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2355+
goto out_fail;
2356+
}
2357+
2358+
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2359+
DP_MST_EN |
2360+
DP_UP_REQ_EN |
2361+
DP_UPSTREAM_IS_SRC);
2362+
if (ret < 0) {
2363+
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
2364+
goto out_fail;
2365+
}
2366+
2367+
/* Some hubs forget their guids after they resume */
2368+
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2369+
if (ret != 16) {
2370+
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2371+
goto out_fail;
2372+
}
2373+
2374+
if (memchr_inv(guid, 0, 16) == NULL) {
2375+
tmp64 = get_jiffies_64();
2376+
memcpy(&guid[0], &tmp64, sizeof(u64));
2377+
memcpy(&guid[8], &tmp64, sizeof(u64));
2378+
2379+
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
2380+
2381+
if (ret != 16) {
2382+
drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
2383+
goto out_fail;
2384+
}
2385+
}
2386+
2387+
memcpy(mgr->mst_primary->guid, guid, 16);
2388+
2389+
out_fail:
2390+
mutex_unlock(&mgr->lock);
2391+
}
2392+
2343
static void s3_handle_mst(struct drm_device *dev, bool suspend)
2393
static void s3_handle_mst(struct drm_device *dev, bool suspend)
2344
{
2394
{
2345
struct amdgpu_dm_connector *aconnector;
2395
struct amdgpu_dm_connector *aconnector;
2346
struct drm_connector *connector;
2396
struct drm_connector *connector;
2347
struct drm_connector_list_iter iter;
2397
struct drm_connector_list_iter iter;
2348
struct drm_dp_mst_topology_mgr *mgr;
2398
struct drm_dp_mst_topology_mgr *mgr;
2349-
int ret;
2350-
bool need_hotplug = false;
2351

2399

2352
drm_connector_list_iter_begin(dev, &iter);
2400
drm_connector_list_iter_begin(dev, &iter);
2353
drm_for_each_connector_iter(connector, &iter) {
2401
drm_for_each_connector_iter(connector, &iter) {
@@ -2369,18 +2417,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
2369
if (!dp_is_lttpr_present(aconnector->dc_link))
2417
if (!dp_is_lttpr_present(aconnector->dc_link))
2370
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2418
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2371

2419

2372-
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2420+
/* TODO: move resume_mst_branch_status() into drm mst resume again
2373-
if (ret < 0) {
2421+
* once topology probing work is pulled out from mst resume into mst
2374-
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2422+
* resume 2nd step. mst resume 2nd step should be called after old
2375-
aconnector->dc_link);
2423+
* state getting restored (i.e. drm_atomic_helper_resume()).
2376-
need_hotplug = true;
2424+
*/
2377-
}
2425+
resume_mst_branch_status(mgr);
2378
}
2426
}
2379
}
2427
}
2380
drm_connector_list_iter_end(&iter);
2428
drm_connector_list_iter_end(&iter);
2381-
2382-
if (need_hotplug)
2383-
drm_kms_helper_hotplug_event(dev);
2384
}
2429
}
2385

2430

2386
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2431
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
@@ -2774,7 +2819,8 @@ static int dm_resume(void *handle)
2774
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2819
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2775
enum dc_connection_type new_connection_type = dc_connection_none;
2820
enum dc_connection_type new_connection_type = dc_connection_none;
2776
struct dc_state *dc_state;
2821
struct dc_state *dc_state;
2777-
int i, r, j;
2822+
int i, r, j, ret;
2823+
bool need_hotplug = false;
2778

2824

2779
if (amdgpu_in_reset(adev)) {
2825
if (amdgpu_in_reset(adev)) {
2780
dc_state = dm->cached_dc_state;
2826
dc_state = dm->cached_dc_state;
@@ -2872,7 +2918,7 @@ static int dm_resume(void *handle)
2872
continue;
2918
continue;
2873

2919

2874
/*
2920
/*
2875-
* this is the case when traversing through already created
2921+
* this is the case when traversing through already created end sink
2876
* MST connectors, should be skipped
2922
* MST connectors, should be skipped
2877
*/
2923
*/
2878
if (aconnector && aconnector->mst_root)
2924
if (aconnector && aconnector->mst_root)
@@ -2932,6 +2978,27 @@ static int dm_resume(void *handle)
2932

2978

2933
dm->cached_state = NULL;
2979
dm->cached_state = NULL;
2934

2980

2981+
/* Do mst topology probing after resuming cached state*/
2982+
drm_connector_list_iter_begin(ddev, &iter);
2983+
drm_for_each_connector_iter(connector, &iter) {
2984+
aconnector = to_amdgpu_dm_connector(connector);
2985+
if (aconnector->dc_link->type != dc_connection_mst_branch ||
2986+
aconnector->mst_root)
2987+
continue;
2988+
2989+
ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
2990+
2991+
if (ret < 0) {
2992+
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2993+
aconnector->dc_link);
2994+
need_hotplug = true;
2995+
}
2996+
}
2997+
drm_connector_list_iter_end(&iter);
2998+
2999+
if (need_hotplug)
3000+
drm_kms_helper_hotplug_event(ddev);
3001+
2935
amdgpu_dm_irq_resume_late(adev);
3002
amdgpu_dm_irq_resume_late(adev);
2936

3003

2937
amdgpu_dm_smu_write_watermarks_table(adev);
3004
amdgpu_dm_smu_write_watermarks_table(adev);

0 commit comments

Comments
 (0)