Skip to content
Permalink
Browse files
KVM: Consider SMT idle status when halt polling
SMT siblings share caches and other hardware, halt polling
will degrade its sibling performance if its sibling is busy

Signed-off-by: Li RongQing <lirongqing@baidu.com>
  • Loading branch information
lrq-max authored and intel-lab-lkp committed Jul 22, 2021
1 parent 031e3bd commit df205655d183524ffe8547bcc0f230a6f3217566
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 18 deletions.
@@ -268,7 +268,10 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)

static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
{
return single_task_running() && !need_resched() && ktime_before(cur, stop);
return single_task_running() &&
!need_resched() &&
ktime_before(cur, stop) &&
is_core_idle(raw_smp_processor_id());
}

/*
@@ -34,6 +34,7 @@
#include <linux/rseq.h>
#include <linux/seqlock.h>
#include <linux/kcsan.h>
#include <linux/topology.h>
#include <asm/kmap_size.h>

/* task_struct member predeclarations (sorted alphabetically): */
@@ -2190,6 +2191,22 @@ int sched_trace_rq_nr_running(struct rq *rq);

const struct cpumask *sched_trace_rd_span(struct root_domain *rd);

static inline bool is_core_idle(int cpu)
{
#ifdef CONFIG_SCHED_SMT
int sibling;

for_each_cpu(sibling, cpu_smt_mask(cpu)) {
if (cpu == sibling)
continue;

if (!idle_cpu(cpu))
return false;
}
#endif
return true;
}

#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p);
@@ -1477,23 +1477,6 @@ struct numa_stats {
int idle_cpu;
};

static inline bool is_core_idle(int cpu)
{
#ifdef CONFIG_SCHED_SMT
int sibling;

for_each_cpu(sibling, cpu_smt_mask(cpu)) {
if (cpu == sibling)
continue;

if (!idle_cpu(cpu))
return false;
}
#endif

return true;
}

struct task_numa_env {
struct task_struct *p;

0 comments on commit df20565

Please sign in to comment.