Skip to content

Commit

Permalink
drm/vc4: Increase the core clock based on HVS load
Browse files Browse the repository at this point in the history
Depending on a given HVS output (HVS to PixelValves) and input (planes
attached to a channel) load, the HVS needs for the core clock to be
raised above its boot time default.

Failing to do so will result in a vblank timeout and a stalled display
pipeline.

Signed-off-by: Maxime Ripard <maxime@cerno.tech>
  • Loading branch information
mripard authored and pelwell committed Jul 9, 2021
1 parent c9ba6cf commit 5146eb7
Show file tree
Hide file tree
Showing 3 changed files with 132 additions and 6 deletions.
15 changes: 15 additions & 0 deletions drivers/gpu/drm/vc4/vc4_crtc.c
Expand Up @@ -644,12 +644,27 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct drm_connector *conn;
struct drm_connector_state *conn_state;
struct drm_encoder *encoder;
int ret, i;

ret = vc4_hvs_atomic_check(crtc, crtc_state);
if (ret)
return ret;

encoder = vc4_get_crtc_encoder(crtc, crtc_state);
if (encoder) {
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);

mode = &crtc_state->adjusted_mode;
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
mode->clock * 9 / 10) * 1000;
} else {
vc4_state->hvs_load = mode->clock * 1000;
}
}

for_each_new_connector_in_state(crtc_state->state, conn, conn_state,
i) {
if (conn_state->crtc != crtc)
Expand Down
3 changes: 3 additions & 0 deletions drivers/gpu/drm/vc4/vc4_drv.h
Expand Up @@ -324,6 +324,7 @@ struct vc4_hvs {
u32 __iomem *dlist;

struct clk *core_clk;
struct clk_request *core_req;

/* Memory manager for CRTCs to allocate space in the display
* list. Units are dwords.
Expand Down Expand Up @@ -535,6 +536,8 @@ struct vc4_crtc_state {
unsigned int bottom;
} margins;

unsigned long hvs_load;

/* Transitional state below, only valid during atomic commits */
bool update_muxing;
};
Expand Down
120 changes: 114 additions & 6 deletions drivers/gpu/drm/vc4/vc4_kms.c
Expand Up @@ -39,9 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)

struct vc4_hvs_state {
struct drm_private_state base;
unsigned long core_clock_rate;

struct {
unsigned in_use: 1;
unsigned long fifo_load;
struct drm_crtc_commit *pending_commit;
} fifo_state[HVS_NUM_CHANNELS];
};
Expand Down Expand Up @@ -342,11 +344,20 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
struct vc4_hvs *hvs = vc4->hvs;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct vc4_hvs_state *new_hvs_state;
struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
struct clk_request *core_req;
int i;

old_hvs_state = vc4_hvs_get_old_global_state(state);
if (WARN_ON(!old_hvs_state))
return;

new_hvs_state = vc4_hvs_get_new_global_state(state);
if (WARN_ON(!new_hvs_state))
return;

for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state;

Expand All @@ -357,12 +368,19 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
}

if (vc4->hvs && vc4->hvs->hvs5)
if (vc4->hvs && vc4->hvs->hvs5) {
/*
* Do a temporary request on the core clock during the
* modeset.
*/
core_req = clk_request_start(hvs->core_clk, 500000000);

old_hvs_state = vc4_hvs_get_old_global_state(state);
if (!old_hvs_state)
return;
/*
* And remove the previous one based on the HVS
* requirements if any.
*/
clk_request_done(hvs->core_req);
}

for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state =
Expand Down Expand Up @@ -413,8 +431,20 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)

drm_atomic_helper_cleanup_planes(dev, state);

if (vc4->hvs && vc4->hvs->hvs5)
if (vc4->hvs && vc4->hvs->hvs5) {
drm_dbg(dev, "Running the core clock at %lu Hz\n",
new_hvs_state->core_clock_rate);

/*
* Request a clock rate based on the current HVS
* requirements.
*/
hvs->core_req = clk_request_start(hvs->core_clk,
new_hvs_state->core_clock_rate);

/* And drop the temporary request */
clk_request_done(core_req);
}
}

static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
Expand Down Expand Up @@ -683,6 +713,7 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)

for (i = 0; i < HVS_NUM_CHANNELS; i++) {
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;

if (!old_state->fifo_state[i].pending_commit)
continue;
Expand All @@ -691,6 +722,9 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
}

state->core_clock_rate = old_state->core_clock_rate;


return &state->base;
}

Expand Down Expand Up @@ -849,6 +883,76 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
return 0;
}

static int
vc4_core_clock_atomic_check(struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct drm_private_state *priv_state;
struct vc4_hvs_state *hvs_new_state;
struct vc4_load_tracker_state *load_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
unsigned int num_outputs;
unsigned long pixel_rate;
unsigned long cob_rate;
unsigned int i;

priv_state = drm_atomic_get_private_obj_state(state,
&vc4->load_tracker);
if (IS_ERR(priv_state))
return PTR_ERR(priv_state);

load_state = to_vc4_load_tracker_state(priv_state);

hvs_new_state = vc4_hvs_get_global_state(state);
if (!hvs_new_state)
return -EINVAL;

for_each_oldnew_crtc_in_state(state, crtc,
old_crtc_state,
new_crtc_state,
i) {
if (old_crtc_state->active) {
struct vc4_crtc_state *old_vc4_state =
to_vc4_crtc_state(old_crtc_state);
unsigned int channel = old_vc4_state->assigned_channel;

hvs_new_state->fifo_state[channel].fifo_load = 0;
}

if (new_crtc_state->active) {
struct vc4_crtc_state *new_vc4_state =
to_vc4_crtc_state(new_crtc_state);
unsigned int channel = new_vc4_state->assigned_channel;

hvs_new_state->fifo_state[channel].fifo_load =
new_vc4_state->hvs_load;
}
}

cob_rate = 0;
num_outputs = 0;
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
if (!hvs_new_state->fifo_state[i].in_use)
continue;

num_outputs++;
cob_rate += hvs_new_state->fifo_state[i].fifo_load;
}

pixel_rate = load_state->hvs_load;
if (num_outputs > 1) {
pixel_rate = (pixel_rate * 40) / 100;
} else {
pixel_rate = (pixel_rate * 60) / 100;
}

hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);

return 0;
}


static int
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
Expand All @@ -866,7 +970,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
if (ret)
return ret;

return vc4_load_tracker_atomic_check(state);
ret = vc4_load_tracker_atomic_check(state);
if (ret)
return ret;

return vc4_core_clock_atomic_check(state);
}

static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
Expand Down

0 comments on commit 5146eb7

Please sign in to comment.