Skip to content
This repository has been archived by the owner on Aug 8, 2021. It is now read-only.

Commit

Permalink
Revert performance-critical patches
Browse files Browse the repository at this point in the history
44d1c4a9965f1 Revert "kernel: Add API to mark IRQs and kthreads as performance critical"
b1b575c6d4a5f Revert "msm: kgsl: Mark IRQ and worker thread as performance critical"
d4d12c3b9cf54 Revert "msm: mdss: Mark IRQ and important kthreads as performance critical"
fe7f76fa3130c Revert "input: fpc1020: Mark IRQ as performance critical"
b6fbffce4cea1 Revert "devfreq_boost: Mark boost kthreads as performance critical"
5f2cb6454489d Revert "msm: mdss: Mark display-wake kthread as performance critical"
d4b1ecfc5c9c6 Revert "input: touchscreen: wahoo: Mark IRQs as performance critical"
461d4ab28ff55 Revert "simple_lmk: Mark reclaim kthread as performance critical"

We can't tolerate excess IRQs on benchmark CPUs and this prevents us
from turning off entire clusters.

Signed-off-by: Danny Lin <danny@kdrag0n.dev>
  • Loading branch information
kdrag0n committed Dec 6, 2020
1 parent 21e76d0 commit 29b315c
Show file tree
Hide file tree
Showing 16 changed files with 14 additions and 195 deletions.
4 changes: 2 additions & 2 deletions drivers/android/simple_lmk.c
Expand Up @@ -296,8 +296,8 @@ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp)
struct task_struct *thread;

if (!atomic_cmpxchg(&init_done, 0, 1)) {
thread = kthread_run_perf_critical(simple_lmk_reclaim_thread,
NULL, "simple_lmkd");
thread = kthread_run(simple_lmk_reclaim_thread, NULL,
"simple_lmkd");
BUG_ON(IS_ERR(thread));
BUG_ON(vmpressure_notifier_register(&vmpressure_notif));
}
Expand Down
4 changes: 2 additions & 2 deletions drivers/devfreq/devfreq_boost.c
Expand Up @@ -295,8 +295,8 @@ static int __init devfreq_boost_init(void)
for (i = 0; i < DEVFREQ_MAX; i++) {
struct boost_dev *b = d->devices + i;

thread[i] = kthread_run_perf_critical(devfreq_boost_thread, b,
"devfreq_boostd/%d", i);
thread[i] = kthread_run(devfreq_boost_thread, b,
"devfreq_boostd/%d", i);
if (IS_ERR(thread[i])) {
ret = PTR_ERR(thread[i]);
pr_err("Failed to create kthread, err: %d\n", ret);
Expand Down
5 changes: 2 additions & 3 deletions drivers/gpu/msm/kgsl.c
Expand Up @@ -4731,8 +4731,7 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
}

status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
kgsl_irq_handler,
IRQF_TRIGGER_HIGH | IRQF_PERF_CRITICAL,
kgsl_irq_handler, IRQF_TRIGGER_HIGH,
device->name, device);
if (status) {
KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
Expand Down Expand Up @@ -4955,7 +4954,7 @@ static int __init kgsl_core_init(void)

init_kthread_worker(&kgsl_driver.worker);

kgsl_driver.worker_thread = kthread_run_perf_critical(kthread_worker_fn,
kgsl_driver.worker_thread = kthread_run(kthread_worker_fn,
&kgsl_driver.worker, "kgsl_worker_thread");

if (IS_ERR(kgsl_driver.worker_thread)) {
Expand Down
2 changes: 0 additions & 2 deletions drivers/input/touchscreen/stm/ftm4_ts.c
Expand Up @@ -1955,7 +1955,6 @@ static int fts_probe(struct i2c_client *client, const struct i2c_device_id *idp)
}

ctrl = client->dev.parent->driver_data;
irq_set_perf_affinity(ctrl->rsrcs.irq);

info->pm_i2c_req.type = PM_QOS_REQ_AFFINE_IRQ;
info->pm_i2c_req.irq = ctrl->rsrcs.irq;
Expand All @@ -1967,7 +1966,6 @@ static int fts_probe(struct i2c_client *client, const struct i2c_device_id *idp)
pm_qos_add_request(&info->pm_touch_req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);

info->board->irq_type |= IRQF_PERF_CRITICAL;
retval = request_threaded_irq(info->irq, fts_hard_interrupt_handler,
fts_interrupt_handler, info->board->irq_type,
FTS_TS_DRV_NAME, info);
Expand Down
Expand Up @@ -5707,7 +5707,6 @@ static int synaptics_rmi4_probe(struct platform_device *pdev)
"tp_direct_interrupt");

i2c_irq = synaptics_rmi4_i2c_irq();
irq_set_perf_affinity(i2c_irq);

rmi4_data->pm_i2c_req.type = PM_QOS_REQ_AFFINE_IRQ;
rmi4_data->pm_i2c_req.irq = i2c_irq;
Expand All @@ -5722,7 +5721,7 @@ static int synaptics_rmi4_probe(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_HTC)
retval = request_threaded_irq(rmi4_data->irq, synaptics_rmi4_hardirq,
synaptics_rmi4_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_PERF_CRITICAL,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
PLATFORM_DRIVER_NAME,
rmi4_data);
if (retval < 0) {
Expand Down
2 changes: 1 addition & 1 deletion drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c
Expand Up @@ -558,7 +558,7 @@ static int fpc1020_probe(struct platform_device *pdev)

atomic_set(&fpc1020->wakeup_enabled, 0);

irqf = IRQF_TRIGGER_RISING | IRQF_ONESHOT | IRQF_PERF_CRITICAL;
irqf = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
if (of_property_read_bool(dev->of_node, "fpc,enable-wakeup")) {
irqf |= IRQF_NO_SUSPEND;
device_init_wakeup(dev, 1);
Expand Down
5 changes: 2 additions & 3 deletions drivers/video/fbdev/msm/mdss_dsi.c
Expand Up @@ -3566,9 +3566,8 @@ static int mdss_dsi_ctrl_probe(struct platform_device *pdev)

init_completion(&ctrl_pdata->wake_comp);
init_waitqueue_head(&ctrl_pdata->wake_waitq);
ctrl_pdata->wake_thread =
kthread_run_perf_critical(mdss_dsi_disp_wake_thread,
ctrl_pdata, "mdss_display_wake");
ctrl_pdata->wake_thread = kthread_run(mdss_dsi_disp_wake_thread,
ctrl_pdata, "mdss_display_wake");
if (IS_ERR(ctrl_pdata->wake_thread)) {
rc = PTR_ERR(ctrl_pdata->wake_thread);
pr_err("%s: Failed to start display wake thread, rc=%d\n",
Expand Down
2 changes: 1 addition & 1 deletion drivers/video/fbdev/msm/mdss_fb.c
Expand Up @@ -1820,7 +1820,7 @@ static int mdss_fb_start_disp_thread(struct msm_fb_data_type *mfd)
mdss_fb_get_split(mfd);

atomic_set(&mfd->commits_pending, 0);
mfd->disp_thread = kthread_run_perf_critical(__mdss_fb_display_thread,
mfd->disp_thread = kthread_run(__mdss_fb_display_thread,
mfd, "mdss_fb%d", mfd->index);

if (IS_ERR(mfd->disp_thread)) {
Expand Down
3 changes: 1 addition & 2 deletions drivers/video/fbdev/msm/mdss_mdp.c
Expand Up @@ -1861,8 +1861,7 @@ static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate);

ret = devm_request_irq(&mdata->pdev->dev, mdss_mdp_hw.irq_info->irq,
mdss_irq_handler,
IRQF_PERF_CRITICAL, "MDSS", mdata);
mdss_irq_handler, 0x0, "MDSS", mdata);
if (ret) {
pr_err("mdp request_irq() failed!\n");
return ret;
Expand Down
2 changes: 1 addition & 1 deletion drivers/video/fbdev/msm/mdss_mdp_overlay.c
Expand Up @@ -6197,7 +6197,7 @@ static int __vsync_retire_setup(struct msm_fb_data_type *mfd)
init_kthread_worker(&mdp5_data->worker);
init_kthread_work(&mdp5_data->vsync_work, __vsync_retire_work_handler);

mdp5_data->thread = kthread_run_perf_critical(kthread_worker_fn,
mdp5_data->thread = kthread_run(kthread_worker_fn,
&mdp5_data->worker, "vsync_retire_work");

if (IS_ERR(mdp5_data->thread)) {
Expand Down
6 changes: 0 additions & 6 deletions include/linux/interrupt.h
Expand Up @@ -62,8 +62,6 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
* IRQF_PERF_CRITICAL - Interrupt is critical to the overall performance of the
* system and should be processed on a fast CPU.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
Expand All @@ -77,7 +75,6 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_PERF_CRITICAL 0x00080000

#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)

Expand Down Expand Up @@ -200,13 +197,10 @@ extern void disable_percpu_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
extern void enable_percpu_irq(unsigned int irq, unsigned int type);
extern void irq_wake_thread(unsigned int irq, void *dev_id);
extern void irq_set_perf_affinity(unsigned int irq);

/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
extern void unaffine_perf_irqs(void);
extern void reaffine_perf_irqs(void);

/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes
Expand Down
17 changes: 0 additions & 17 deletions include/linux/kthread.h
Expand Up @@ -37,23 +37,6 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
__k; \
})

/**
* kthread_run_perf_critical - create and wake a performance-critical thread.
*
* Same as kthread_create().
*/
#define kthread_run_perf_critical(threadfn, data, namefmt, ...) \
({ \
struct task_struct *__k \
= kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \
if (!IS_ERR(__k)) { \
__k->flags |= PF_PERF_CRITICAL; \
kthread_bind_mask(__k, cpu_perf_mask); \
wake_up_process(__k); \
} \
__k; \
})

void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
Expand Down
1 change: 0 additions & 1 deletion include/linux/sched.h
Expand Up @@ -2320,7 +2320,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_PERF_CRITICAL 0x01000000 /* Thread is performance-critical */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
Expand Down
10 changes: 0 additions & 10 deletions kernel/cpu.c
Expand Up @@ -435,16 +435,8 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)

int cpu_down(unsigned int cpu)
{
struct cpumask newmask;
int err;

cpumask_andnot(&newmask, cpu_online_mask, cpumask_of(cpu));

/* One big cluster CPU and one little cluster CPU must remain online */
if (!cpumask_intersects(&newmask, cpu_perf_mask) ||
!cpumask_intersects(&newmask, cpu_lp_mask))
return -EINVAL;

cpu_maps_update_begin();

if (cpu_hotplug_disabled) {
Expand Down Expand Up @@ -626,7 +618,6 @@ int disable_nonboot_cpus(void)
int cpu, first_cpu, error = 0;

cpu_maps_update_begin();
unaffine_perf_irqs();
first_cpu = cpumask_first(cpu_online_mask);
/*
* We take down all of the non-boot CPUs in one shot to avoid races
Expand Down Expand Up @@ -701,7 +692,6 @@ void enable_nonboot_cpus(void)
arch_enable_nonboot_cpus_end();

cpumask_clear(frozen_cpus);
reaffine_perf_irqs();
out:
cpu_maps_update_done();
}
Expand Down
139 changes: 1 addition & 138 deletions kernel/irq/manage.c
Expand Up @@ -18,20 +18,9 @@
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/task_work.h>
#include <linux/cpu.h>

#include "internals.h"

struct irq_desc_list {
struct list_head list;
struct irq_desc *desc;
} perf_crit_irqs = {
.list = LIST_HEAD_INIT(perf_crit_irqs.list)
};

static DEFINE_RAW_SPINLOCK(perf_irqs_lock);
static int perf_cpu_index = -1;

#ifdef CONFIG_IRQ_FORCED_THREADING
__read_mostly bool force_irqthreads;

Expand Down Expand Up @@ -1146,112 +1135,6 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
return 0;
}

static void add_desc_to_perf_list(struct irq_desc *desc)
{
struct irq_desc_list *item;

item = kmalloc(sizeof(*item), GFP_ATOMIC | __GFP_NOFAIL);
item->desc = desc;

raw_spin_lock(&perf_irqs_lock);
list_add(&item->list, &perf_crit_irqs.list);
raw_spin_unlock(&perf_irqs_lock);
}

static void affine_one_perf_thread(struct task_struct *t)
{
t->flags |= PF_PERF_CRITICAL;
set_cpus_allowed_ptr(t, cpu_perf_mask);
}

static void unaffine_one_perf_thread(struct task_struct *t)
{
t->flags &= ~PF_PERF_CRITICAL;
set_cpus_allowed_ptr(t, cpu_all_mask);
}

static void affine_one_perf_irq(struct irq_desc *desc)
{
int cpu;

/* Balance the performance-critical IRQs across all perf CPUs */
while (1) {
cpu = cpumask_next_and(perf_cpu_index, cpu_perf_mask,
cpu_online_mask);
if (cpu < nr_cpu_ids)
break;
perf_cpu_index = -1;
}
irq_set_affinity_locked(&desc->irq_data, cpumask_of(cpu), true);

perf_cpu_index = cpu;
}

static void setup_perf_irq_locked(struct irq_desc *desc)
{
add_desc_to_perf_list(desc);
irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
raw_spin_lock(&perf_irqs_lock);
affine_one_perf_irq(desc);
raw_spin_unlock(&perf_irqs_lock);
}

void irq_set_perf_affinity(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
unsigned long flags;

if (!desc)
return;

raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
while (action) {
action->flags |= IRQF_PERF_CRITICAL;
action = action->next;
}
setup_perf_irq_locked(desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}

void unaffine_perf_irqs(void)
{
struct irq_desc_list *data;
unsigned long flags;

raw_spin_lock_irqsave(&perf_irqs_lock, flags);
list_for_each_entry(data, &perf_crit_irqs.list, list) {
struct irq_desc *desc = data->desc;

raw_spin_lock(&desc->lock);
irq_set_affinity_locked(&desc->irq_data, cpu_all_mask, true);
if (desc->action->thread)
unaffine_one_perf_thread(desc->action->thread);
raw_spin_unlock(&desc->lock);
}
perf_cpu_index = -1;
raw_spin_unlock_irqrestore(&perf_irqs_lock, flags);
}

void reaffine_perf_irqs(void)
{
struct irq_desc_list *data;
unsigned long flags;

raw_spin_lock_irqsave(&perf_irqs_lock, flags);
list_for_each_entry(data, &perf_crit_irqs.list, list) {
struct irq_desc *desc = data->desc;

raw_spin_lock(&desc->lock);
affine_one_perf_irq(desc);
if (desc->action->thread)
affine_one_perf_thread(desc->action->thread);
raw_spin_unlock(&desc->lock);
}
raw_spin_unlock_irqrestore(&perf_irqs_lock, flags);
}

/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
Expand Down Expand Up @@ -1312,9 +1195,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (ret)
goto out_thread;
}

if (new->flags & IRQF_PERF_CRITICAL)
affine_one_perf_thread(new->thread);
}

if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
Expand Down Expand Up @@ -1477,10 +1357,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
}

/* Set default affinity mask once everything is setup */
if (new->flags & IRQF_PERF_CRITICAL)
setup_perf_irq_locked(desc);
else
setup_affinity(desc, mask);
setup_affinity(desc, mask);

} else if (new->flags & IRQF_TRIGGER_MASK) {
unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
Expand Down Expand Up @@ -1621,20 +1498,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
action_ptr = &action->next;
}

if (action->flags & IRQF_PERF_CRITICAL) {
struct irq_desc_list *data;

raw_spin_lock(&perf_irqs_lock);
list_for_each_entry(data, &perf_crit_irqs.list, list) {
if (data->desc == desc) {
list_del(&data->list);
kfree(data);
break;
}
}
raw_spin_unlock(&perf_irqs_lock);
}

/* Found it - now remove it from the list of entries: */
*action_ptr = action->next;

Expand Down

0 comments on commit 29b315c

Please sign in to comment.