From 8aec53690eb9034d8971dd86c887c7e8be6adc45 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 16 Sep 2015 09:22:23 +0200 Subject: [PATCH] inmates: x86: Add support for TSC-based timing Provide a service to calibrate the TSC against the PM timer and read out the current time in nanoseconds. This service is much faster than the slow PM timer, and it's also not affected by chipset-induced delays. Note that the simplistic algorithm only supports measuring relative time spans of a couple of seconds. Signed-off-by: Jan Kiszka --- inmates/lib/x86/inmate.h | 5 ++++ inmates/lib/x86/timing.c | 52 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/inmates/lib/x86/inmate.h b/inmates/lib/x86/inmate.h index 1ab988970..cbe663b09 100644 --- a/inmates/lib/x86/inmate.h +++ b/inmates/lib/x86/inmate.h @@ -230,7 +230,12 @@ void inmate_main(void); void hypercall_init(void); unsigned long pm_timer_read(void); + +unsigned long tsc_read(void); +unsigned long tsc_init(void); + void delay_us(unsigned long microsecs); + unsigned long apic_timer_init(unsigned int vector); void apic_timer_set(unsigned long timeout_ns); diff --git a/inmates/lib/x86/timing.c b/inmates/lib/x86/timing.c index ca211b2b9..9dbf028e6 100644 --- a/inmates/lib/x86/timing.c +++ b/inmates/lib/x86/timing.c @@ -23,6 +23,58 @@ static unsigned long divided_apic_freq; static unsigned long pm_timer_last[SMP_MAX_CPUS]; static unsigned long pm_timer_overflows[SMP_MAX_CPUS]; +static unsigned long tsc_freq, tsc_overflow; +static unsigned long tsc_last[SMP_MAX_CPUS]; +static unsigned long tsc_overflows[SMP_MAX_CPUS]; + +static u64 rdtsc(void) +{ +#ifdef __x86_64__ + u32 lo, hi; + + asm volatile("rdtsc" : "=a" (lo), "=d" (hi)); + return (u64)lo | (((u64)hi) << 32); +#else + u64 v; + + asm volatile("rdtsc" : "=A" (v)); + return v; +#endif +} + +unsigned long tsc_read(void) +{ + unsigned int cpu = cpu_id(); + unsigned long tmr; + + tmr = ((rdtsc() & 0xffffffffLL) * NS_PER_SEC) / tsc_freq; + if (tmr < tsc_last[cpu]) + tsc_overflows[cpu] += tsc_overflow; + tsc_last[cpu] = tmr; + return tmr + tsc_overflows[cpu]; +} + +unsigned long tsc_init(void) +{ + unsigned long start_pm, end_pm; + u64 start_tsc, end_tsc; + + start_pm = pm_timer_read(); + start_tsc = rdtsc(); + asm volatile("mfence" : : : "memory"); + + while (pm_timer_read() - start_pm < 100 * NS_PER_MSEC) + cpu_relax(); + + end_pm = pm_timer_read(); + end_tsc = rdtsc(); + asm volatile("mfence" : : : "memory"); + + tsc_freq = (end_tsc - start_tsc) * NS_PER_SEC / (end_pm - start_pm); + tsc_overflow = (0x100000000L * NS_PER_SEC) / tsc_freq; + + return tsc_freq; +} unsigned long pm_timer_read(void) {