Skip to content

Commit

Permalink
perf: Register PMU implementations
Browse files Browse the repository at this point in the history
Simple registration interface for struct pmu, this provides the
infrastructure for removing all the weak functions.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 9, 2010
1 parent 51b0fe3 commit b0a873e
Show file tree
Hide file tree
Showing 10 changed files with 488 additions and 412 deletions.
37 changes: 22 additions & 15 deletions arch/alpha/kernel/perf_event.c
Expand Up @@ -642,34 +642,39 @@ static int __hw_perf_event_init(struct perf_event *event)
return 0;
}

static struct pmu pmu = {
.enable = alpha_pmu_enable,
.disable = alpha_pmu_disable,
.read = alpha_pmu_read,
.unthrottle = alpha_pmu_unthrottle,
};


/*
* Main entry point to initialise a HW performance event.
*/
struct pmu *hw_perf_event_init(struct perf_event *event)
static int alpha_pmu_event_init(struct perf_event *event)
{
int err;

switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;

default:
return -ENOENT;
}

if (!alpha_pmu)
return ERR_PTR(-ENODEV);
return -ENODEV;

/* Do the real initialisation work. */
err = __hw_perf_event_init(event);

if (err)
return ERR_PTR(err);

return &pmu;
return err;
}


static struct pmu pmu = {
.event_init = alpha_pmu_event_init,
.enable = alpha_pmu_enable,
.disable = alpha_pmu_disable,
.read = alpha_pmu_read,
.unthrottle = alpha_pmu_unthrottle,
};

/*
* Main entry point - enable HW performance counters.
Expand Down Expand Up @@ -838,5 +843,7 @@ void __init init_hw_perf_events(void)
/* And set up PMU specification */
alpha_pmu = &ev67_pmu;
perf_max_events = alpha_pmu->num_pmcs;

perf_pmu_register(&pmu);
}

38 changes: 26 additions & 12 deletions arch/arm/kernel/perf_event.c
Expand Up @@ -306,12 +306,7 @@ armpmu_enable(struct perf_event *event)
return err;
}

static struct pmu pmu = {
.enable = armpmu_enable,
.disable = armpmu_disable,
.unthrottle = armpmu_unthrottle,
.read = armpmu_read,
};
static struct pmu pmu;

static int
validate_event(struct cpu_hw_events *cpuc,
Expand Down Expand Up @@ -491,20 +486,29 @@ __hw_perf_event_init(struct perf_event *event)
return err;
}

struct pmu *
hw_perf_event_init(struct perf_event *event)
static int armpmu_event_init(struct perf_event *event)
{
int err = 0;

switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;

default:
return -ENOENT;
}

if (!armpmu)
return ERR_PTR(-ENODEV);
return -ENODEV;

event->destroy = hw_perf_event_destroy;

if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > perf_max_events) {
atomic_dec(&active_events);
return ERR_PTR(-ENOSPC);
return -ENOSPC;
}

mutex_lock(&pmu_reserve_mutex);
Expand All @@ -518,15 +522,23 @@ hw_perf_event_init(struct perf_event *event)
}

if (err)
return ERR_PTR(err);
return err;

err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);

return err ? ERR_PTR(err) : &pmu;
return err;
}

static struct pmu pmu = {
.event_init = armpmu_event_init,
.enable = armpmu_enable,
.disable = armpmu_disable,
.unthrottle = armpmu_unthrottle,
.read = armpmu_read,
};

void
hw_perf_enable(void)
{
Expand Down Expand Up @@ -2994,6 +3006,8 @@ init_hw_perf_events(void)
perf_max_events = -1;
}

perf_pmu_register(&pmu);

return 0;
}
arch_initcall(init_hw_perf_events);
Expand Down
46 changes: 24 additions & 22 deletions arch/powerpc/kernel/perf_event.c
Expand Up @@ -904,16 +904,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
return 0;
}

struct pmu power_pmu = {
.enable = power_pmu_enable,
.disable = power_pmu_disable,
.read = power_pmu_read,
.unthrottle = power_pmu_unthrottle,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
};

/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
Expand Down Expand Up @@ -1014,7 +1004,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0;
}

struct pmu *hw_perf_event_init(struct perf_event *event)
static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
Expand All @@ -1026,25 +1016,27 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
struct cpu_hw_events *cpuhw;

if (!ppmu)
return ERR_PTR(-ENXIO);
return -ENOENT;

switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP);
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return ERR_PTR(err);
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return ERR_PTR(-EINVAL);
return -ENOENT;
}

event->hw.config_base = ev;
event->hw.idx = 0;

Expand Down Expand Up @@ -1081,7 +1073,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
*/
ev = normal_pmc_alternative(ev, flags);
if (!ev)
return ERR_PTR(-EINVAL);
return -EINVAL;
}
}

Expand All @@ -1095,19 +1087,19 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
return ERR_PTR(-EINVAL);
return -EINVAL;
}
events[n] = ev;
ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
return ERR_PTR(-EINVAL);
return -EINVAL;

cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
put_cpu_var(cpu_hw_events);
if (err)
return ERR_PTR(-EINVAL);
return -EINVAL;

event->hw.config = events[n];
event->hw.event_base = cflags[n];
Expand All @@ -1132,11 +1124,20 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
}
event->destroy = hw_perf_event_destroy;

if (err)
return ERR_PTR(err);
return &power_pmu;
return err;
}

struct pmu power_pmu = {
.event_init = power_pmu_event_init,
.enable = power_pmu_enable,
.disable = power_pmu_disable,
.read = power_pmu_read,
.unthrottle = power_pmu_unthrottle,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
};

/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
Expand Down Expand Up @@ -1342,6 +1343,7 @@ int register_power_pmu(struct power_pmu *pmu)
freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */

perf_pmu_register(&power_pmu);
perf_cpu_notifier(power_pmu_notifier);

return 0;
Expand Down
37 changes: 19 additions & 18 deletions arch/powerpc/kernel/perf_event_fsl_emb.c
Expand Up @@ -378,13 +378,6 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
local_irq_restore(flags);
}

static struct pmu fsl_emb_pmu = {
.enable = fsl_emb_pmu_enable,
.disable = fsl_emb_pmu_disable,
.read = fsl_emb_pmu_read,
.unthrottle = fsl_emb_pmu_unthrottle,
};

/*
* Release the PMU if this is the last perf_event.
*/
Expand Down Expand Up @@ -428,7 +421,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0;
}

struct pmu *hw_perf_event_init(struct perf_event *event)
static int fsl_emb_pmu_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
Expand All @@ -441,27 +434,27 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP);
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;

case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return ERR_PTR(err);
return err;
break;

case PERF_TYPE_RAW:
ev = event->attr.config;
break;

default:
return ERR_PTR(-EINVAL);
return -ENOENT;
}

event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
return ERR_PTR(-EINVAL);
return -EINVAL;

/*
* If this is in a group, check if it can go on with all the
Expand All @@ -473,7 +466,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
return ERR_PTR(-EINVAL);
return -EINVAL;
}

if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
Expand All @@ -484,7 +477,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
}

if (num_restricted >= ppmu->n_restricted)
return ERR_PTR(-EINVAL);
return -EINVAL;
}

event->hw.idx = -1;
Expand All @@ -497,7 +490,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
return ERR_PTR(-ENOTSUPP);
return -ENOTSUPP;

event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
Expand All @@ -523,11 +516,17 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
}
event->destroy = hw_perf_event_destroy;

if (err)
return ERR_PTR(err);
return &fsl_emb_pmu;
return err;
}

static struct pmu fsl_emb_pmu = {
.event_init = fsl_emb_pmu_event_init,
.enable = fsl_emb_pmu_enable,
.disable = fsl_emb_pmu_disable,
.read = fsl_emb_pmu_read,
.unthrottle = fsl_emb_pmu_unthrottle,
};

/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
Expand Down Expand Up @@ -651,5 +650,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
pr_info("%s performance monitor hardware support registered\n",
pmu->name);

perf_pmu_register(&fsl_emb_pmu);

return 0;
}

0 comments on commit b0a873e

Please sign in to comment.