Skip to content

Commit

Permalink
MIPS, Perf-events: Work with the new PMU interface
Browse files Browse the repository at this point in the history
This is the MIPS part of the following commits by Peter Zijlstra:

- a4eaf7f
    perf: Rework the PMU methods

    Replace pmu::{enable,disable,start,stop,unthrottle} with
    pmu::{add,del,start,stop}, all of which take a flags argument.

    The new interface extends the capability to stop a counter while
    keeping it scheduled on the PMU. We replace the throttled state with
    the generic stopped state.

    This also allows us to efficiently stop/start counters over certain
    code paths (like IRQ handlers).

    It also allows scheduling a counter without it starting, allowing for
    a generic frozen state (useful for rotating stopped counters).

    The stopped state is implemented in two different ways, depending on
    how the architecture implemented the throttled state:

     1) We disable the counter:
        a) the pmu has per-counter enable bits, we flip that
        b) we program a NOP event, preserving the counter state

     2) We store the counter state and ignore all read/overflow events

For MIPSXX, the stopped state is implemented in the way of 1.b as above.

- 33696fc
    perf: Per PMU disable

    Changes perf_disable() into perf_pmu_disable().

- 24cd7f5
    perf: Reduce perf_disable() usage

    Since the current perf_disable() usage is only an optimization,
    remove it for now. This eases the removal of the __weak
    hw_perf_enable() interface.

- b0a873e
    perf: Register PMU implementations

    Simple registration interface for struct pmu, this provides the
    infrastructure for removing all the weak functions.

- 51b0fe3
    perf: Deconstify struct pmu

    sed -ie 's/const struct pmu\>/struct pmu/g' `git grep -l "const struct pmu\>"`

Reported-by: Wu Zhangjin <wuzhangjin@gmail.com>
Acked-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
To: a.p.zijlstra@chello.nl
To: fweisbec@gmail.com
To: will.deacon@arm.com
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: wuzhangjin@gmail.com
Cc: paulus@samba.org
Cc: mingo@elte.hu
Cc: acme@redhat.com
Cc: dengcheng.zhu@gmail.com
Cc: matt@console-pimps.org
Cc: sshtylyov@mvista.com
Cc: ddaney@caviumnetworks.com
Patchwork: http://patchwork.linux-mips.org/patch/2012/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
  • Loading branch information
dczhu authored and ralfbaechle committed Mar 14, 2011
1 parent 91f0173 commit 404ff63
Show file tree
Hide file tree
Showing 2 changed files with 158 additions and 119 deletions.
275 changes: 156 additions & 119 deletions arch/mips/kernel/perf_event.c
Expand Up @@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event,
return ret;
}

static int mipspmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;

/* To look for a free counter for this event. */
idx = mipspmu->alloc_counter(cpuc, hwc);
if (idx < 0) {
err = idx;
goto out;
}

/*
* If there is an event in the counter we are going to use then
* make sure it is disabled.
*/
event->hw.idx = idx;
mipspmu->disable_event(idx);
cpuc->events[idx] = event;

/* Set the period for the event. */
mipspmu_event_set_period(event, hwc, idx);

/* Enable the event. */
mipspmu->enable_event(hwc, idx);

/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);

out:
return err;
}

static void mipspmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
Expand Down Expand Up @@ -231,32 +196,90 @@ static void mipspmu_event_update(struct perf_event *event,
return;
}

static void mipspmu_disable(struct perf_event *event)
static void mipspmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;

if (!mipspmu)
return;

if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));

hwc->state = 0;

/* Set the period for the event. */
mipspmu_event_set_period(event, hwc, hwc->idx);

/* Enable the event. */
mipspmu->enable_event(hwc, hwc->idx);
}

static void mipspmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;

if (!mipspmu)
return;

if (!(hwc->state & PERF_HES_STOPPED)) {
/* We are working on a local event. */
mipspmu->disable_event(hwc->idx);
barrier();
mipspmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}

static int mipspmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
int idx;
int err = 0;

perf_pmu_disable(event->pmu);

WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
/* To look for a free counter for this event. */
idx = mipspmu->alloc_counter(cpuc, hwc);
if (idx < 0) {
err = idx;
goto out;
}

/* We are working on a local event. */
/*
* If there is an event in the counter we are going to use then
* make sure it is disabled.
*/
event->hw.idx = idx;
mipspmu->disable_event(idx);
cpuc->events[idx] = event;

barrier();

mipspmu_event_update(event, hwc, idx);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
mipspmu_start(event, PERF_EF_RELOAD);

/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);

out:
perf_pmu_enable(event->pmu);
return err;
}

static void mipspmu_unthrottle(struct perf_event *event)
static void mipspmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;

mipspmu->enable_event(hwc, hwc->idx);
WARN_ON(idx < 0 || idx >= mipspmu->num_counters);

mipspmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);

perf_event_update_userpage(event);
}

static void mipspmu_read(struct perf_event *event)
Expand All @@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event)
mipspmu_event_update(event, hwc, hwc->idx);
}

static struct pmu pmu = {
.enable = mipspmu_enable,
.disable = mipspmu_disable,
.unthrottle = mipspmu_unthrottle,
.read = mipspmu_read,
};
static void mipspmu_enable(struct pmu *pmu)
{
if (mipspmu)
mipspmu->start();
}

static void mipspmu_disable(struct pmu *pmu)
{
if (mipspmu)
mipspmu->stop();
}

static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmu_reserve_mutex);
Expand Down Expand Up @@ -318,6 +346,82 @@ static void mipspmu_free_irq(void)
perf_irq = save_perf_irq;
}

/*
* mipsxx/rm9000/loongson2 have different performance counters, they have
* specific low-level init routines.
*/
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);

static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events,
&pmu_reserve_mutex)) {
/*
* We must not call the destroy function with interrupts
* disabled.
*/
on_each_cpu(reset_counters,
(void *)(long)mipspmu->num_counters, 1);
mipspmu_free_irq();
mutex_unlock(&pmu_reserve_mutex);
}
}

static int mipspmu_event_init(struct perf_event *event)
{
int err = 0;

switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;

default:
return -ENOENT;
}

if (!mipspmu || event->cpu >= nr_cpumask_bits ||
(event->cpu >= 0 && !cpu_online(event->cpu)))
return -ENODEV;

if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
atomic_dec(&active_events);
return -ENOSPC;
}

mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0)
err = mipspmu_get_irq();

if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmu_reserve_mutex);
}

if (err)
return err;

err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);

return err;
}

static struct pmu pmu = {
.pmu_enable = mipspmu_enable,
.pmu_disable = mipspmu_disable,
.event_init = mipspmu_event_init,
.add = mipspmu_add,
.del = mipspmu_del,
.start = mipspmu_start,
.stop = mipspmu_stop,
.read = mipspmu_read,
};

static inline unsigned int
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
{
Expand Down Expand Up @@ -409,73 +513,6 @@ static int validate_group(struct perf_event *event)
return 0;
}

/*
* mipsxx/rm9000/loongson2 have different performance counters, they have
* specific low-level init routines.
*/
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);

static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events,
&pmu_reserve_mutex)) {
/*
* We must not call the destroy function with interrupts
* disabled.
*/
on_each_cpu(reset_counters,
(void *)(long)mipspmu->num_counters, 1);
mipspmu_free_irq();
mutex_unlock(&pmu_reserve_mutex);
}
}

const struct pmu *hw_perf_event_init(struct perf_event *event)
{
int err = 0;

if (!mipspmu || event->cpu >= nr_cpumask_bits ||
(event->cpu >= 0 && !cpu_online(event->cpu)))
return ERR_PTR(-ENODEV);

if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
atomic_dec(&active_events);
return ERR_PTR(-ENOSPC);
}

mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0)
err = mipspmu_get_irq();

if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmu_reserve_mutex);
}

if (err)
return ERR_PTR(err);

err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);

return err ? ERR_PTR(err) : &pmu;
}

void hw_perf_enable(void)
{
if (mipspmu)
mipspmu->start();
}

void hw_perf_disable(void)
{
if (mipspmu)
mipspmu->stop();
}

/* This is needed by specific irq handlers in perf_event_*.c */
static void
handle_associated_event(struct cpu_hw_events *cpuc,
Expand Down
2 changes: 2 additions & 0 deletions arch/mips/kernel/perf_event_mipsxx.c
Expand Up @@ -1045,6 +1045,8 @@ init_hw_perf_events(void)
"CPU, irq %d%s\n", mipspmu->name, counters, irq,
irq < 0 ? " (share with timer interrupt)" : "");

perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);

return 0;
}
early_initcall(init_hw_perf_events);
Expand Down

0 comments on commit 404ff63

Please sign in to comment.