Skip to content

Commit

Permalink
EXP timers: Debugging for last-resort jiffies update
Browse files Browse the repository at this point in the history
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
  • Loading branch information
paulmckrcu committed Jan 6, 2022
1 parent 3cda34a commit feba9f8
Show file tree
Hide file tree
Showing 6 changed files with 76 additions and 0 deletions.
2 changes: 2 additions & 0 deletions arch/x86/kernel/kvm.c
Expand Up @@ -315,6 +315,8 @@ static void kvm_register_steal_time(void)
wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
pr_info("stealtime: cpu %d, msr %llx\n", cpu,
(unsigned long long) slow_virt_to_phys(st));
cpu_hp_check_delay("", kvm_register_steal_time);
cpu_hp_stop_now();
}

static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kernel/kvmclock.c
Expand Up @@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/set_memory.h>
#include <linux/cc_platform.h>
#include <linux/cpu.h>

#include <asm/hypervisor.h>
#include <asm/x86_init.h>
Expand Down Expand Up @@ -190,6 +191,7 @@ static void kvm_restore_sched_clock_state(void)
static void kvm_setup_secondary_clock(void)
{
kvm_register_clock("secondary cpu clock");
cpu_hp_start_now();
}
#endif

Expand Down
4 changes: 4 additions & 0 deletions include/linux/cpu.h
Expand Up @@ -128,6 +128,10 @@ void clear_tasks_mm_cpumask(int cpu);
int remove_cpu(unsigned int cpu);
int cpu_device_down(struct device *dev);
extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
extern void cpu_hp_start_now(void);
extern void cpu_hp_stop_now(void);
extern bool cpu_hp_check_delay(const char *s, const void *func);
extern void tick_setup_sched_timer_dump(void);

#else /* CONFIG_HOTPLUG_CPU */

Expand Down
51 changes: 51 additions & 0 deletions kernel/cpu.c
Expand Up @@ -137,6 +137,57 @@ struct cpuhp_step {
bool multi_instance;
};

#ifdef CONFIG_SMP

static u64 cpu_hp_start_time;
static bool cpu_hp_start_time_valid;

void cpu_hp_start_now(void)
{
if (!rcu_inkernel_boot_has_ended())
return;
WRITE_ONCE(cpu_hp_start_time, ktime_get());
smp_store_release(&cpu_hp_start_time_valid, true);
pr_info("%s invoked on CPU %d, cpu_hp_start_time: %llu milliseconds.\n", __func__, raw_smp_processor_id(), cpu_hp_start_time / NSEC_PER_MSEC);
}

void cpu_hp_stop_now(void)
{
u64 t;

if (!rcu_inkernel_boot_has_ended())
return;
t = ktime_get();
pr_info("%s invoked, %llu - %llu = %llu milliseconds elapsed with flag %s.\n", __func__, t / NSEC_PER_MSEC, cpu_hp_start_time / NSEC_PER_MSEC, (t - cpu_hp_start_time) / NSEC_PER_MSEC, cpu_hp_start_time_valid ? "set" : "clear");
smp_store_release(&cpu_hp_start_time_valid, false);
}

/* Return true if a time-delay anomaly was detected. */
bool cpu_hp_check_delay(const char *s, const void *func)
{
bool ret = false;
u64 t, t1;

if (!smp_load_acquire(&cpu_hp_start_time_valid))
return false;
t = READ_ONCE(cpu_hp_start_time);
smp_mb();
if (!READ_ONCE(cpu_hp_start_time_valid))
return false;
t1 = ktime_get();
if (WARN_ONCE(time_after64(t1, t + 100 * NSEC_PER_SEC), "%s %ps took %llu milliseconds\n", s, func, (t1 - t) / NSEC_PER_MSEC)) {
WRITE_ONCE(cpu_hp_start_time, t1);
ret = true;
}
if (WARN_ONCE(time_before64(t1, t - 25 * NSEC_PER_MSEC), "%s %ps clock went backwards %llu milliseconds\n", s, func, (t - t1) / NSEC_PER_MSEC)){
WRITE_ONCE(cpu_hp_start_time, t1);
ret = true;
}
return ret;
}

#endif /* #ifdef CONFIG_SMP */

static DEFINE_MUTEX(cpuhp_state_mutex);
static struct cpuhp_step cpuhp_hp_states[];

Expand Down
3 changes: 3 additions & 0 deletions kernel/stop_machine.c
Expand Up @@ -251,6 +251,9 @@ static int multi_cpu_stop(void *data)
*/
touch_nmi_watchdog();
}
if (cpu_is_offline(smp_processor_id()) &&
cpu_hp_check_delay("MULTI_STOP_RUN in", multi_cpu_stop))
tick_setup_sched_timer_dump();
rcu_momentary_dyntick_idle();
} while (curstate != MULTI_STOP_EXIT);

Expand Down
14 changes: 14 additions & 0 deletions kernel/time/tick-sched.c
Expand Up @@ -1452,6 +1452,20 @@ static DEFINE_PER_CPU(unsigned long, tick_setup_sched_timer_jiffies);
static DEFINE_PER_CPU(int, tick_setup_sched_timer_jiffies_count);
DEFINE_PER_CPU(bool, tick_setup_sched_timer_help_needed);

void tick_setup_sched_timer_dump(void)
{
int cpu;
int j = jiffies;

pr_alert("%s state", __func__);
for_each_possible_cpu(cpu)
pr_cont(" j/c %x/%d %c",
(int)(j - per_cpu(tick_setup_sched_timer_jiffies, cpu)) & 0xfff,
per_cpu(tick_setup_sched_timer_jiffies_count, cpu),
".H"[per_cpu(tick_setup_sched_timer_help_needed, cpu)]);
pr_cont("\n");
}

/**
* tick_setup_sched_timer - setup the tick emulation timer
*/
Expand Down

0 comments on commit feba9f8

Please sign in to comment.