Skip to content

Commit

Permalink
drm/amd/display: Add dpia display mode validation logic
Browse files Browse the repository at this point in the history
[ Upstream commit 59f1622 ]

[Why]
If bandwidth allocation feature is enabled, connection manager wont
limit the dp tunnel bandwidth. So, need to do display mode validation
for streams on dpia links to avoid oversubscription of dp tunnel
bandwidth.

[How]
- To read non reduced link rate and lane count and update
  reported link capability.
- To calculate the bandwidth required for streams of dpia links
  per host router and validate against the allocated bandwidth for
  the host router.

Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Reviewed-by: PeiChen Huang <peichen.huang@amd.com>
Reviewed-by: Aric Cyr <aric.cyr@amd.com>
Acked-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
Signed-off-by: Meenakshikumar Somasundaram <meenakshikumar.somasundaram@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Stable-dep-of: 0484e05 ("drm/amd/display: fixed integer types and null check locations")
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
Meenakshikumar Somasundaram authored and gregkh committed Mar 1, 2024
1 parent b45df83 commit c3682b6
Show file tree
Hide file tree
Showing 5 changed files with 104 additions and 40 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
Original file line number Diff line number Diff line change
Expand Up @@ -497,7 +497,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}

bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
{
return dc->link_srv->validate_dpia_bandwidth(streams, count);
}
4 changes: 2 additions & 2 deletions drivers/gpu/drm/amd/display/dc/dc.h
Original file line number Diff line number Diff line change
Expand Up @@ -2116,11 +2116,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
*
* @dc: pointer to dc struct
* @stream: pointer to all possible streams
* @num_streams: number of valid DPIA streams
* @count: number of valid DPIA streams
*
* return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
*/
bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
const unsigned int count);

/* Sink Interfaces - A sink corresponds to a display output device */
Expand Down
6 changes: 6 additions & 0 deletions drivers/gpu/drm/amd/display/dc/dc_dp_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -1433,6 +1433,12 @@ struct dp_trace {
#ifndef DP_TUNNELING_STATUS
#define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
#endif
#ifndef DP_TUNNELING_MAX_LINK_RATE
#define DP_TUNNELING_MAX_LINK_RATE 0xE0028 /* 1.4a */
#endif
#ifndef DP_TUNNELING_MAX_LANE_COUNT
#define DP_TUNNELING_MAX_LANE_COUNT 0xE0029 /* 1.4a */
#endif
#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
#define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
#endif
Expand Down
2 changes: 2 additions & 0 deletions drivers/gpu/drm/amd/display/dc/dc_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -1121,6 +1121,8 @@ struct dc_dpia_bw_alloc {
int bw_granularity; // BW Granularity
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
bool response_ready; // Response ready from the CM side
uint8_t nrd_max_lane_count; // Non-reduced max lane count
uint8_t nrd_max_link_rate; // Non-reduced max link rate
};

#define MAX_SINKS_PER_LINK 4
Expand Down
130 changes: 93 additions & 37 deletions drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ static void reset_bw_alloc_struct(struct dc_link *link)
link->dpia_bw_alloc_config.estimated_bw = 0;
link->dpia_bw_alloc_config.bw_granularity = 0;
link->dpia_bw_alloc_config.response_ready = false;
link->dpia_bw_alloc_config.sink_allocated_bw = 0;
}

#define BW_GRANULARITY_0 4 // 0.25 Gbps
Expand Down Expand Up @@ -104,20 +105,53 @@ static int get_estimated_bw(struct dc_link *link)
return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
}

static int get_non_reduced_max_link_rate(struct dc_link *link)
{
uint8_t nrd_max_link_rate = 0;

core_link_read_dpcd(
link,
DP_TUNNELING_MAX_LINK_RATE,
&nrd_max_link_rate,
sizeof(uint8_t));

return nrd_max_link_rate;
}

static int get_non_reduced_max_lane_count(struct dc_link *link)
{
uint8_t nrd_max_lane_count = 0;

core_link_read_dpcd(
link,
DP_TUNNELING_MAX_LANE_COUNT,
&nrd_max_lane_count,
sizeof(uint8_t));

return nrd_max_lane_count;
}

/*
* Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
* for host router and dpia
*/
static void init_usb4_bw_struct(struct dc_link *link)
{
// Init the known values
reset_bw_alloc_struct(link);

/* init the known values */
link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);

DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.bw_granularity,
link->dpia_bw_alloc_config.estimated_bw);
DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
__func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
link->dpia_bw_alloc_config.nrd_max_lane_count);
}

static uint8_t get_lowest_dpia_index(struct dc_link *link)
Expand All @@ -142,39 +176,50 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
}

/*
* Get the Max Available BW or Max Estimated BW for each Host Router
* Get the maximum dp tunnel banwidth of host router
*
* @link: pointer to the dc_link struct instance
* @type: ESTIMATD BW or MAX AVAILABLE BW
* @dc: pointer to the dc struct instance
* @hr_index: host router index
*
* return: response_ready flag from dc_link struct
* return: host router maximum dp tunnel bandwidth
*/
static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
{
const struct dc *dc_struct = link->dc;
uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
struct dc_link *link_temp;
uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
uint8_t hr_index_temp = 0;
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
int i;

for (i = 0; i < MAX_PIPES * 2; ++i) {

if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {

link_temp = dc_struct->links[i];
if (!link_temp || !link_temp->hpd_status)
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;

idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;

if (idx_temp == idx) {

if (type == HOST_ROUTER_BW_ESTIMATED)
total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
else if (type == HOST_ROUTER_BW_ALLOCATED)
total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;

if (hr_index_temp == hr_index) {
link_dpia_primary = dc->links[i];
link_dpia_secondary = dc->links[i + 1];

/**
* If BW allocation enabled on both DPIAs, then
* HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
* otherwise HR BW = Estimated(bw alloc enabled dpia)
*/
if ((link_dpia_primary->hpd_status &&
link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
(link_dpia_secondary->hpd_status &&
link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
link_dpia_secondary->dpia_bw_alloc_config.sink_allocated_bw;
} else if (link_dpia_primary->hpd_status &&
link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
} else if (link_dpia_secondary->hpd_status &&
link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
}
break;
}
}

Expand All @@ -194,7 +239,6 @@ static void dpia_bw_alloc_unplug(struct dc_link *link)
if (link) {
DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
__func__, link->link_index);
link->dpia_bw_alloc_config.sink_allocated_bw = 0;
reset_bw_alloc_struct(link);
}
}
Expand Down Expand Up @@ -397,7 +441,7 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
if (!timeout)
ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
ret = link->dpia_bw_alloc_config.sink_allocated_bw;
}
//2. Cold Unplug
else if (!link->hpd_status)
Expand Down Expand Up @@ -439,29 +483,41 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
{
bool ret = true;
int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
uint8_t lowest_dpia_index = 0, dpia_index = 0;
uint8_t i;
int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
uint8_t lowest_dpia_index, i, hr_index;

if (!num_dpias || num_dpias > MAX_DPIA_NUM)
return ret;

//Get total Host Router BW & Validate against each Host Router max BW
lowest_dpia_index = get_lowest_dpia_index(link[0]);

/* get total Host Router BW with granularity for the given modes */
for (i = 0; i < num_dpias; ++i) {
int granularity_Gbps = 0;
int bw_granularity = 0;

if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
continue;

lowest_dpia_index = get_lowest_dpia_index(link[i]);
if (link[i]->link_index < lowest_dpia_index)
continue;

dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);

ret = false;
break;
hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
bw_needed_per_hr[hr_index] += bw_granularity;
}

/* validate against each Host Router max BW */
for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
if (bw_needed_per_hr[hr_index]) {
host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
ret = false;
break;
}
}
}

Expand Down

0 comments on commit c3682b6

Please sign in to comment.