Skip to content

Commit f1fd8a9

Browse files
charliu-AMDENGalexdeucher
authored andcommitted
drm/amd/display: Correct sw cache timing to ensure dispclk ramping
[why] Current driver will cache the dispclk right after send cmd to pmfw, but actual clock not reached yet. Change to only cache the dispclk setting after HW reached to the real clock. Also give some range as it might be in bypass clock setting. Reviewed-by: Yihan Zhu <yihan.zhu@amd.com> Signed-off-by: Charlene Liu <Charlene.Liu@amd.com> Signed-off-by: Ivan Lipski <ivan.lipski@amd.com> Tested-by: Dan Wheeler <daniel.wheeler@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
1 parent 35bcc91 commit f1fd8a9

File tree

1 file changed

+21
-9
lines changed

1 file changed

+21
-9
lines changed

drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
387387
bool update_dispclk = false;
388388
bool dpp_clock_lowered = false;
389389
int all_active_disps = 0;
390+
int actual_dppclk = 0;
390391

391392
if (dc->work_arounds.skip_clock_update)
392393
return;
@@ -472,22 +473,25 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
472473
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
473474
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
474475
dpp_clock_lowered = true;
475-
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
476476
update_dppclk = true;
477477
}
478478

479479
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
480480
(new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
481481
int requested_dispclk_khz = new_clocks->dispclk_khz;
482-
482+
int actual_dispclk;
483483
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
484484

485485
/* Clamp the requested clock to PMFW based on their limit. */
486486
if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
487487
requested_dispclk_khz = dc->debug.min_disp_clk_khz;
488488

489489
dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
490-
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
490+
actual_dispclk = REG_READ(CLK1_CLK0_CURRENT_CNT);
491+
492+
/*pmfw might set bypass clock which is higher than hardmin*/
493+
if (actual_dispclk >= new_clocks->dispclk_khz)
494+
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
491495

492496
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
493497

@@ -505,13 +509,20 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
505509
if (dpp_clock_lowered) {
506510
// increase per DPP DTO before lowering global dppclk
507511
dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
508-
dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
512+
dcn35_smu_set_dppclk(clk_mgr, new_clocks->dppclk_khz);
509513
} else {
510514
// increase global DPPCLK before lowering per DPP DTO
511515
if (update_dppclk || update_dispclk)
512-
dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
516+
dcn35_smu_set_dppclk(clk_mgr, new_clocks->dppclk_khz);
513517
dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
514518
}
519+
if (update_dppclk) {
520+
actual_dppclk = REG_READ(CLK1_CLK1_CURRENT_CNT);
521+
522+
/*pmfw might set bypass clock which is higher than hardmin*/
523+
if (actual_dppclk >= new_clocks->dppclk_khz)
524+
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
525+
}
515526

516527
// notify PMFW of bandwidth per DPIA tunnel
517528
if (dc->debug.notify_dpia_hr_bw)
@@ -551,7 +562,7 @@ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
551562
* since fractional part is only 16 bit in register definition but is 32 bit
552563
* in our fix point definiton, need to shift left by 16 to obtain correct value
553564
*/
554-
pll_req.value |= fbmult_frac_val << 16;
565+
pll_req.value |= (long long) fbmult_frac_val << 16;
555566

556567
/* multiply by REFCLK period */
557568
pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
@@ -778,7 +789,8 @@ static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct
778789
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
779790

780791
/* Modify previous watermark range to cover up to max */
781-
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
792+
if (num_valid_sets > 0)
793+
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
782794
}
783795
num_valid_sets++;
784796
}
@@ -939,8 +951,8 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
939951
is_valid_clock_value(min_dram_speed_mts));
940952

941953
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
942-
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
943-
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
954+
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS) {
955+
/*numDispclk is the same as numDPPclk*/
944956
max_dispclk = find_max_clk_value(clock_table->DispClocks,
945957
clock_table->NumDispClkLevelsEnabled);
946958
max_dppclk = find_max_clk_value(clock_table->DppClocks,

0 commit comments

Comments
 (0)