Skip to content

Commit

Permalink
drm/msm/dpu: allow just single IRQ callback
Browse files Browse the repository at this point in the history
DPU interrupts code allows multiple callbacks per interrut. In reality
none of the interrupts is shared between blocks (and will probably never
be). Drop support for registering multiple callbacks per interrupt to
simplify interrupt handling code.

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
  • Loading branch information
lumag authored and intel-lab-lkp committed Feb 1, 2022
1 parent 7fa2b93 commit c3da64f
Show file tree
Hide file tree
Showing 9 changed files with 75 additions and 142 deletions.
16 changes: 5 additions & 11 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
Expand Up @@ -44,36 +44,30 @@ u32 dpu_core_irq_read(
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
* @irq_cb: IRQ callback structure, containing callback function
* and argument. Passing NULL for irq_cb will unregister
* the callback for the given irq_idx
* This must exist until un-registration.
* @irq_cb: IRQ callback funcion.
* @irq_arg: IRQ callback argument.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
*/
int dpu_core_irq_register_callback(
struct dpu_kms *dpu_kms,
int irq_idx,
struct dpu_irq_callback *irq_cb);
void (*irq_cb)(void *arg, int irq_idx),
void *irq_arg);

/**
* dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
* @irq_cb: IRQ callback structure, containing callback function
* and argument. Passing NULL for irq_cb will unregister
* the callback for the given irq_idx
* This must match with registration.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
*/
int dpu_core_irq_unregister_callback(
struct dpu_kms *dpu_kms,
int irq_idx,
struct dpu_irq_callback *irq_cb);
int irq_idx);

/**
* dpu_debugfs_core_irq_init - register core irq debugfs
Expand Down
7 changes: 3 additions & 4 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
Expand Up @@ -311,7 +311,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
local_irq_save(flags);
irq->cb.func(phys_enc, irq->irq_idx);
irq->func(phys_enc, irq->irq_idx);
local_irq_restore(flags);
ret = 0;
} else {
Expand Down Expand Up @@ -352,7 +352,7 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
}

ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
&irq->cb);
irq->func, phys_enc);
if (ret) {
DPU_ERROR_PHYS(phys_enc,
"failed to register IRQ callback for %s\n",
Expand Down Expand Up @@ -383,8 +383,7 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
return 0;
}

ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
&irq->cb);
ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx);
if (ret) {
DRM_ERROR("unreg cb fail id=%u, intr=%d, irq=%d ret=%d",
DRMID(phys_enc->parent), intr_idx,
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
Expand Up @@ -174,7 +174,7 @@ struct dpu_encoder_irq {
const char *name;
enum dpu_intr_idx intr_idx;
int irq_idx;
struct dpu_irq_callback cb;
void (*func)(void *arg, int irq_idx);
};

/**
Expand Down
10 changes: 4 additions & 6 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
Expand Up @@ -782,30 +782,28 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
phys_enc->enable_state = DPU_ENC_DISABLED;
for (i = 0; i < INTR_IDX_MAX; i++) {
irq = &phys_enc->irq[i];
INIT_LIST_HEAD(&irq->cb.list);
irq->irq_idx = -EINVAL;
irq->cb.arg = phys_enc;
}

irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->name = "ctl_start";
irq->intr_idx = INTR_IDX_CTL_START;
irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
irq->func = dpu_encoder_phys_cmd_ctl_start_irq;

irq = &phys_enc->irq[INTR_IDX_PINGPONG];
irq->name = "pp_done";
irq->intr_idx = INTR_IDX_PINGPONG;
irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
irq->func = dpu_encoder_phys_cmd_pp_tx_done_irq;

irq = &phys_enc->irq[INTR_IDX_RDPTR];
irq->name = "pp_rd_ptr";
irq->intr_idx = INTR_IDX_RDPTR;
irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
irq->func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;

irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->name = "underrun";
irq->intr_idx = INTR_IDX_UNDERRUN;
irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
irq->func = dpu_encoder_phys_cmd_underrun_irq;

atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
Expand Down
6 changes: 2 additions & 4 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
Expand Up @@ -725,20 +725,18 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
phys_enc->enc_spinlock = p->enc_spinlock;
for (i = 0; i < INTR_IDX_MAX; i++) {
irq = &phys_enc->irq[i];
INIT_LIST_HEAD(&irq->cb.list);
irq->irq_idx = -EINVAL;
irq->cb.arg = phys_enc;
}

irq = &phys_enc->irq[INTR_IDX_VSYNC];
irq->name = "vsync_irq";
irq->intr_idx = INTR_IDX_VSYNC;
irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
irq->func = dpu_encoder_phys_vid_vblank_irq;

irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->name = "underrun";
irq->intr_idx = INTR_IDX_UNDERRUN;
irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
irq->func = dpu_encoder_phys_vid_underrun_irq;

atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
Expand Down
140 changes: 47 additions & 93 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
Expand Up @@ -145,21 +145,17 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
*/
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
struct dpu_irq_callback *cb;

VERB("irq_idx=%d\n", irq_idx);

if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
DRM_ERROR("no registered cb, idx:%d\n", irq_idx);

atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);

/*
* Perform registered function callback
*/
list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
if (cb->func)
cb->func(cb->arg, irq_idx);
dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
}

irqreturn_t dpu_core_irq(struct msm_kms *kms)
Expand Down Expand Up @@ -408,24 +404,18 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
int nirq = MDP_INTR_MAX * 32;

if (!addr || !m)
return ERR_PTR(-EINVAL);

intr = kzalloc(sizeof(*intr), GFP_KERNEL);
intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);

__intr_offset(m, addr, &intr->hw);

intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;

intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
GFP_KERNEL);
if (intr->cache_irq_mask == NULL) {
kfree(intr);
return ERR_PTR(-ENOMEM);
}
intr->total_irqs = nirq;

intr->irq_mask = m->mdss_irqs;

Expand All @@ -436,31 +426,19 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,

void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
if (intr) {
kfree(intr->cache_irq_mask);

kfree(intr->irq_cb_tbl);
kfree(intr->irq_counts);

if (intr)
kfree(intr);
}
}

int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
struct dpu_irq_callback *register_irq_cb)
void (*irq_cb)(void *arg, int irq_idx),
void *irq_arg)
{
unsigned long irq_flags;
int ret;

if (!dpu_kms->hw_intr->irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}

if (!register_irq_cb || !register_irq_cb->func) {
DPU_ERROR("invalid irq_cb:%d func:%d\n",
register_irq_cb != NULL,
register_irq_cb ?
register_irq_cb->func != NULL : -1);
if (!irq_cb) {
DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
return -EINVAL;
}

Expand All @@ -472,41 +450,32 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);

spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
list_add_tail(&register_irq_cb->list,
&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
if (list_is_first(&register_irq_cb->list,
&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
int ret = dpu_hw_intr_enable_irq_locked(

if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);

return -EBUSY;
}

trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;

ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
if (ret)
DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
}
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);

return 0;
}

int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
struct dpu_irq_callback *register_irq_cb)
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
{
unsigned long irq_flags;

if (!dpu_kms->hw_intr->irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}

if (!register_irq_cb || !register_irq_cb->func) {
DPU_ERROR("invalid irq_cb:%d func:%d\n",
register_irq_cb != NULL,
register_irq_cb ?
register_irq_cb->func != NULL : -1);
return -EINVAL;
}
int ret;

if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
Expand All @@ -516,18 +485,16 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);

spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
/* empty callback list but interrupt is still enabled */
if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
int ret = dpu_hw_intr_disable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
irq_idx);
VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
}
trace_dpu_core_irq_unregister_callback(irq_idx);

ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
if (ret)
DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
irq_idx, ret);

dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;

spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);

return 0;
Expand All @@ -537,24 +504,18 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
struct dpu_irq_callback *cb;
unsigned long irq_flags;
int i, irq_count, cb_count;

if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
return 0;
int i, irq_count;
void *cb;

for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
cb_count = 0;
irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
cb_count++;
irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
cb = dpu_kms->hw_intr->irq_tbl[i].cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);

if (irq_count || cb_count)
seq_printf(s, "idx:%d irq:%d cb:%d\n",
i, irq_count, cb_count);
if (irq_count || cb)
seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
}

return 0;
Expand All @@ -580,15 +541,8 @@ void dpu_core_irq_preinstall(struct msm_kms *kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);

/* Create irq callbacks for all possible irq_idx */
dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
sizeof(struct list_head), GFP_KERNEL);
dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
sizeof(atomic_t), GFP_KERNEL);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
}
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
}

void dpu_core_irq_uninstall(struct msm_kms *kms)
Expand All @@ -598,7 +552,7 @@ void dpu_core_irq_uninstall(struct msm_kms *kms)

pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
if (dpu_kms->hw_intr->irq_tbl[i].cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);

dpu_clear_irqs(dpu_kms);
Expand Down
12 changes: 7 additions & 5 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
Expand Up @@ -43,19 +43,21 @@ enum dpu_hw_intr_reg {
* @save_irq_status: array of IRQ status reg storage created during init
* @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
* @irq_cb_tbl: array of IRQ callbacks lists
* @irq_counts: array of IRQ counts
* @irq_cb_tbl: array of IRQ callbacks
*/
struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
u32 *cache_irq_mask;
u32 cache_irq_mask[MDP_INTR_MAX];
u32 *save_irq_status;
u32 total_irqs;
spinlock_t irq_lock;
unsigned long irq_mask;

struct list_head *irq_cb_tbl;
atomic_t *irq_counts;
struct {
void (*cb)(void *arg, int irq_idx);
void *arg;
atomic_t count;
} irq_tbl[];
};

/**
Expand Down

0 comments on commit c3da64f

Please sign in to comment.