Commit d94fc8f3 authored by Dmitry Baryshkov's avatar Dmitry Baryshkov Committed by Rob Clark

drm/msm/dpu: simplify IRQ enabling/disabling

Merge dpu_core_irq_enable() into dpu_core_irq_register_callback() and
dpu_core_irq_disable() into dpu_core_irq_unregister_callback(), because
they are called in pairs. There is no need to have separate
enable/disable pair, we can enable hardware IRQ when first callback is
registered and when the last callback is unregistered.
Signed-off-by: default avatarDmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: default avatarAbhinav Kumar <abhinavk@codeaurora.org>
Link: https://lore.kernel.org/r/20210516202910.2141079-7-dmitry.baryshkov@linaro.org
[fixup tracepoint compile warns]
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 18b20ac0
...@@ -26,10 +26,8 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx) ...@@ -26,10 +26,8 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
pr_debug("irq_idx=%d\n", irq_idx); pr_debug("irq_idx=%d\n", irq_idx);
if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) { if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx, DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
}
atomic_inc(&irq_obj->irq_counts[irq_idx]); atomic_inc(&irq_obj->irq_counts[irq_idx]);
...@@ -43,127 +41,6 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx) ...@@ -43,127 +41,6 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags); spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
} }
/**
* _dpu_core_irq_enable - enable core interrupt given by the index
* @dpu_kms: Pointer to dpu kms context
* @irq_idx: interrupt index
*/
static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
{
unsigned long irq_flags;
int ret = 0, enable_count;
if (!dpu_kms->hw_intr ||
!dpu_kms->irq_obj.enable_counts ||
!dpu_kms->irq_obj.irq_counts) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
ret = dpu_kms->hw_intr->ops.enable_irq(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
/* empty callback list but interrupt is enabled */
if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
DPU_ERROR("irq_idx=%d enabled with no callback\n",
irq_idx);
spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
}
return ret;
}
int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
if (!irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
if (counts)
DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
for (i = 0; (i < irq_count) && !ret; i++)
ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
return ret;
}
/**
* _dpu_core_irq_disable - disable core interrupt given by the index
* @dpu_kms: Pointer to dpu kms context
* @irq_idx: interrupt index
*/
static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
{
int ret = 0, enable_count;
if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
ret = dpu_kms->hw_intr->ops.disable_irq(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
irq_idx);
DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
}
return ret;
}
int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
if (!irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
if (counts == 2)
DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
for (i = 0; (i < irq_count) && !ret; i++)
ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
return ret;
}
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear) u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
{ {
if (!dpu_kms->hw_intr || if (!dpu_kms->hw_intr ||
...@@ -210,6 +87,16 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, ...@@ -210,6 +87,16 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
list_del_init(&register_irq_cb->list); list_del_init(&register_irq_cb->list);
list_add_tail(&register_irq_cb->list, list_add_tail(&register_irq_cb->list,
&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]); &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
if (list_is_first(&register_irq_cb->list,
&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
int ret = dpu_kms->hw_intr->ops.enable_irq(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
}
spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags); spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
return 0; return 0;
...@@ -244,9 +131,15 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, ...@@ -244,9 +131,15 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb); trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list); list_del_init(&register_irq_cb->list);
/* empty callback list but interrupt is still enabled */ /* empty callback list but interrupt is still enabled */
if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) && if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])) int ret = dpu_kms->hw_intr->ops.disable_irq(
DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx); dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
irq_idx);
DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
}
spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags); spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
return 0; return 0;
...@@ -274,23 +167,22 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) ...@@ -274,23 +167,22 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
struct dpu_irq *irq_obj = s->private; struct dpu_irq *irq_obj = s->private;
struct dpu_irq_callback *cb; struct dpu_irq_callback *cb;
unsigned long irq_flags; unsigned long irq_flags;
int i, irq_count, enable_count, cb_count; int i, irq_count, cb_count;
if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl)) if (WARN_ON(!irq_obj->irq_cb_tbl))
return 0; return 0;
for (i = 0; i < irq_obj->total_irqs; i++) { for (i = 0; i < irq_obj->total_irqs; i++) {
spin_lock_irqsave(&irq_obj->cb_lock, irq_flags); spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
cb_count = 0; cb_count = 0;
irq_count = atomic_read(&irq_obj->irq_counts[i]); irq_count = atomic_read(&irq_obj->irq_counts[i]);
enable_count = atomic_read(&irq_obj->enable_counts[i]);
list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list) list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
cb_count++; cb_count++;
spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags); spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
if (irq_count || enable_count || cb_count) if (irq_count || cb_count)
seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n", seq_printf(s, "idx:%d irq:%d cb:%d\n",
i, irq_count, enable_count, cb_count); i, irq_count, cb_count);
} }
return 0; return 0;
...@@ -321,13 +213,10 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) ...@@ -321,13 +213,10 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs; dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs;
dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs, dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
sizeof(struct list_head), GFP_KERNEL); sizeof(struct list_head), GFP_KERNEL);
dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
sizeof(atomic_t), GFP_KERNEL);
dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs, dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
sizeof(atomic_t), GFP_KERNEL); sizeof(atomic_t), GFP_KERNEL);
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) { for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]); INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0); atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
} }
} }
...@@ -338,8 +227,7 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms) ...@@ -338,8 +227,7 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
pm_runtime_get_sync(&dpu_kms->pdev->dev); pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) || if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
DPU_ERROR("irq_idx=%d still enabled/registered\n", i); DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
dpu_clear_all_irqs(dpu_kms); dpu_clear_all_irqs(dpu_kms);
...@@ -347,10 +235,8 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms) ...@@ -347,10 +235,8 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
pm_runtime_put_sync(&dpu_kms->pdev->dev); pm_runtime_put_sync(&dpu_kms->pdev->dev);
kfree(dpu_kms->irq_obj.irq_cb_tbl); kfree(dpu_kms->irq_obj.irq_cb_tbl);
kfree(dpu_kms->irq_obj.enable_counts);
kfree(dpu_kms->irq_obj.irq_counts); kfree(dpu_kms->irq_obj.irq_counts);
dpu_kms->irq_obj.irq_cb_tbl = NULL; dpu_kms->irq_obj.irq_cb_tbl = NULL;
dpu_kms->irq_obj.enable_counts = NULL;
dpu_kms->irq_obj.irq_counts = NULL; dpu_kms->irq_obj.irq_counts = NULL;
dpu_kms->irq_obj.total_irqs = 0; dpu_kms->irq_obj.total_irqs = 0;
} }
......
...@@ -29,36 +29,6 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms); ...@@ -29,36 +29,6 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
*/ */
irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms); irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
/**
* dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
* @dpu_kms: DPU handle
* @irq_idxs: Array of irq index
* @irq_count: Number of irq_idx provided in the array
* @return: 0 for success enabling IRQ, otherwise failure
*
* This function increments count on each enable and decrements on each
* disable. Interrupts is enabled if count is 0 before increment.
*/
int dpu_core_irq_enable(
struct dpu_kms *dpu_kms,
int *irq_idxs,
uint32_t irq_count);
/**
* dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
* @dpu_kms: DPU handle
* @irq_idxs: Array of irq index
* @irq_count: Number of irq_idx provided in the array
* @return: 0 for success disabling IRQ, otherwise failure
*
* This function increments count on each enable and decrements on each
* disable. Interrupts is disabled if count is 0 after decrement.
*/
int dpu_core_irq_disable(
struct dpu_kms *dpu_kms,
int *irq_idxs,
uint32_t irq_count);
/** /**
* dpu_core_irq_read - IRQ helper function for reading IRQ status * dpu_core_irq_read - IRQ helper function for reading IRQ status
* @dpu_kms: DPU handle * @dpu_kms: DPU handle
......
...@@ -361,17 +361,6 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc, ...@@ -361,17 +361,6 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
return ret; return ret;
} }
ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
if (ret) {
DRM_ERROR("enable failed id=%u, intr=%d, irq=%d",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
irq->irq_idx, &irq->cb);
irq->irq_idx = -EINVAL;
return ret;
}
trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx, trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
irq->irq_idx); irq->irq_idx);
...@@ -394,13 +383,6 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc, ...@@ -394,13 +383,6 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
return 0; return 0;
} }
ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
if (ret) {
DRM_ERROR("disable failed id=%u, intr=%d, irq=%d ret=%d",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx, ret);
}
ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx, ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
&irq->cb); &irq->cb);
if (ret) { if (ret) {
......
...@@ -82,14 +82,12 @@ struct dpu_irq_callback { ...@@ -82,14 +82,12 @@ struct dpu_irq_callback {
* struct dpu_irq: IRQ structure contains callback registration info * struct dpu_irq: IRQ structure contains callback registration info
* @total_irq: total number of irq_idx obtained from HW interrupts mapping * @total_irq: total number of irq_idx obtained from HW interrupts mapping
* @irq_cb_tbl: array of IRQ callbacks setting * @irq_cb_tbl: array of IRQ callbacks setting
* @enable_counts array of IRQ enable counts
* @cb_lock: callback lock * @cb_lock: callback lock
* @debugfs_file: debugfs file for irq statistics * @debugfs_file: debugfs file for irq statistics
*/ */
struct dpu_irq { struct dpu_irq {
u32 total_irqs; u32 total_irqs;
struct list_head *irq_cb_tbl; struct list_head *irq_cb_tbl;
atomic_t *enable_counts;
atomic_t *irq_counts; atomic_t *irq_counts;
spinlock_t cb_lock; spinlock_t cb_lock;
}; };
......
...@@ -875,29 +875,6 @@ TRACE_EVENT(dpu_pp_connect_ext_te, ...@@ -875,29 +875,6 @@ TRACE_EVENT(dpu_pp_connect_ext_te,
TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg) TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
); );
DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
TP_PROTO(int irq_idx, int enable_count),
TP_ARGS(irq_idx, enable_count),
TP_STRUCT__entry(
__field( int, irq_idx )
__field( int, enable_count )
),
TP_fast_assign(
__entry->irq_idx = irq_idx;
__entry->enable_count = enable_count;
),
TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
__entry->enable_count)
);
DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
TP_PROTO(int irq_idx, int enable_count),
TP_ARGS(irq_idx, enable_count)
);
DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
TP_PROTO(int irq_idx, int enable_count),
TP_ARGS(irq_idx, enable_count)
);
DECLARE_EVENT_CLASS(dpu_core_irq_callback_template, DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
TP_PROTO(int irq_idx, struct dpu_irq_callback *callback), TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
TP_ARGS(irq_idx, callback), TP_ARGS(irq_idx, callback),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment