Skip to content

Commit 08047c4

Browse files
committed
x86: Move calibrate_cpu to tsc.c
Move the code where it's only user is. Also we need to look whether this hardwired hackery might interfere with perfcounters. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent 454ede7 commit 08047c4

File tree

4 files changed

+55
-56
lines changed

4 files changed

+55
-56
lines changed

arch/x86/include/asm/time.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,4 @@ extern void time_init(void);
5757

5858
#endif /* CONFIG_PARAVIRT */
5959

60-
extern unsigned long __init calibrate_cpu(void);
61-
6260
#endif /* _ASM_X86_TIME_H */

arch/x86/kernel/time_32.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
#include <asm/timer.h>
2222
#include <asm/hpet.h>
2323
#include <asm/time.h>
24-
#include <asm/nmi.h>
2524

2625
#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
2726
int timer_ack;

arch/x86/kernel/time_64.c

Lines changed: 0 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
#include <asm/timer.h>
2222
#include <asm/hpet.h>
2323
#include <asm/time.h>
24-
#include <asm/nmi.h>
2524

2625
#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
2726
int timer_ack;
@@ -84,56 +83,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
8483
return IRQ_HANDLED;
8584
}
8685

87-
/*
88-
* calibrate_cpu is used on systems with fixed rate TSCs to determine
89-
* processor frequency
90-
*/
91-
#define TICK_COUNT 100000000
92-
unsigned long __init calibrate_cpu(void)
93-
{
94-
int tsc_start, tsc_now;
95-
int i, no_ctr_free;
96-
unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
97-
unsigned long flags;
98-
99-
for (i = 0; i < 4; i++)
100-
if (avail_to_resrv_perfctr_nmi_bit(i))
101-
break;
102-
no_ctr_free = (i == 4);
103-
if (no_ctr_free) {
104-
WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
105-
"cpu_khz value may be incorrect.\n");
106-
i = 3;
107-
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
108-
wrmsrl(MSR_K7_EVNTSEL3, 0);
109-
rdmsrl(MSR_K7_PERFCTR3, pmc3);
110-
} else {
111-
reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
112-
reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
113-
}
114-
local_irq_save(flags);
115-
/* start measuring cycles, incrementing from 0 */
116-
wrmsrl(MSR_K7_PERFCTR0 + i, 0);
117-
wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
118-
rdtscl(tsc_start);
119-
do {
120-
rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
121-
tsc_now = get_cycles();
122-
} while ((tsc_now - tsc_start) < TICK_COUNT);
123-
124-
local_irq_restore(flags);
125-
if (no_ctr_free) {
126-
wrmsrl(MSR_K7_EVNTSEL3, 0);
127-
wrmsrl(MSR_K7_PERFCTR3, pmc3);
128-
wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
129-
} else {
130-
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
131-
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
132-
}
133-
134-
return pmc_now * tsc_khz / (tsc_now - tsc_start);
135-
}
136-
13786
static struct irqaction irq0 = {
13887
.handler = timer_interrupt,
13988
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,

arch/x86/kernel/tsc.c

Lines changed: 55 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <asm/time.h>
1818
#include <asm/delay.h>
1919
#include <asm/hypervisor.h>
20+
#include <asm/nmi.h>
2021

2122
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
2223
EXPORT_SYMBOL(cpu_khz);
@@ -852,6 +853,60 @@ static void __init init_tsc_clocksource(void)
852853
clocksource_register(&clocksource_tsc);
853854
}
854855

856+
#ifdef CONFIG_X86_64
857+
/*
858+
* calibrate_cpu is used on systems with fixed rate TSCs to determine
859+
* processor frequency
860+
*/
861+
#define TICK_COUNT 100000000
862+
static unsigned long __init calibrate_cpu(void)
863+
{
864+
int tsc_start, tsc_now;
865+
int i, no_ctr_free;
866+
unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
867+
unsigned long flags;
868+
869+
for (i = 0; i < 4; i++)
870+
if (avail_to_resrv_perfctr_nmi_bit(i))
871+
break;
872+
no_ctr_free = (i == 4);
873+
if (no_ctr_free) {
874+
WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
875+
"cpu_khz value may be incorrect.\n");
876+
i = 3;
877+
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
878+
wrmsrl(MSR_K7_EVNTSEL3, 0);
879+
rdmsrl(MSR_K7_PERFCTR3, pmc3);
880+
} else {
881+
reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
882+
reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
883+
}
884+
local_irq_save(flags);
885+
/* start measuring cycles, incrementing from 0 */
886+
wrmsrl(MSR_K7_PERFCTR0 + i, 0);
887+
wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
888+
rdtscl(tsc_start);
889+
do {
890+
rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
891+
tsc_now = get_cycles();
892+
} while ((tsc_now - tsc_start) < TICK_COUNT);
893+
894+
local_irq_restore(flags);
895+
if (no_ctr_free) {
896+
wrmsrl(MSR_K7_EVNTSEL3, 0);
897+
wrmsrl(MSR_K7_PERFCTR3, pmc3);
898+
wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
899+
} else {
900+
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
901+
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
902+
}
903+
904+
return pmc_now * tsc_khz / (tsc_now - tsc_start);
905+
}
906+
#else
907+
static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
908+
#endif
909+
855910
void __init tsc_init(void)
856911
{
857912
u64 lpj;
@@ -870,11 +925,9 @@ void __init tsc_init(void)
870925
return;
871926
}
872927

873-
#ifdef CONFIG_X86_64
874928
if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
875929
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
876930
cpu_khz = calibrate_cpu();
877-
#endif
878931

879932
printk("Detected %lu.%03lu MHz processor.\n",
880933
(unsigned long)cpu_khz / 1000,

0 commit comments

Comments
 (0)