Skip to content

Commit

Permalink
drm/amd: Align SMU11 SMU_MSG_OverridePcieParameters implementation wi…
Browse files Browse the repository at this point in the history
…th SMU13

commit e701156 upstream.

SMU13 overrides dynamic PCIe lane width and dynamic speed by when on
certain hosts. commit 38e4ced ("drm/amd/pm: conditionally disable
pcie lane switching for some sienna_cichlid SKUs") worked around this
issue by setting up certain SKUs to set up certain limits, but the same
fundamental problem with those hosts affects all SMU11 implmentations
as well, so align the SMU11 and SMU13 driver handling.

Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org # 6.1.x
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
superm1 authored and gregkh committed Aug 3, 2023
1 parent 3d7a757 commit 979f8de
Showing 1 changed file with 18 additions and 71 deletions.
89 changes: 18 additions & 71 deletions drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
Original file line number Diff line number Diff line change
Expand Up @@ -2081,89 +2081,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}

static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
uint32_t *gen_speed_override,
uint32_t *lane_width_override)
{
struct amdgpu_device *adev = smu->adev;

*gen_speed_override = 0xff;
*lane_width_override = 0xff;

switch (adev->pdev->device) {
case 0x73A0:
case 0x73A1:
case 0x73A2:
case 0x73A3:
case 0x73AB:
case 0x73AE:
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
*lane_width_override = 6;
break;
case 0x73E0:
case 0x73E1:
case 0x73E3:
*lane_width_override = 4;
break;
case 0x7420:
case 0x7421:
case 0x7422:
case 0x7423:
case 0x7424:
*lane_width_override = 3;
break;
default:
break;
}
}

#define MAX(a, b) ((a) > (b) ? (a) : (b))

static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
uint32_t gen_speed_override, lane_width_override;
uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
u32 smu_pcie_arg;
int ret, i;

GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);

sienna_cichlid_get_override_pcie_settings(smu,
&gen_speed_override,
&lane_width_override);
/* PCIE gen speed and lane width override */
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];

/* PCIE gen speed override */
if (gen_speed_override != 0xff) {
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
} else {
min_gen_speed = MAX(0, table_member1[0]);
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
}
pcie_table->pcie_gen[0] = min_gen_speed;
pcie_table->pcie_gen[1] = max_gen_speed;
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];

/* PCIE lane width override */
if (lane_width_override != 0xff) {
min_lane_width = MIN(pcie_width_cap, lane_width_override);
max_lane_width = MIN(pcie_width_cap, lane_width_override);
/* Force all levels to use the same settings */
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
}
} else {
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
}
}
pcie_table->pcie_lane[0] = min_lane_width;
pcie_table->pcie_lane[1] = max_lane_width;

for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16 |
Expand Down

0 comments on commit 979f8de

Please sign in to comment.