Skip to content

Commit 3db6d5a

Browse files
anadavIngo Molnar
authored andcommitted
x86/mm/tlb: Remove 'struct flush_tlb_info' from the stack
Move flush_tlb_info variables off the stack. This allows to align flush_tlb_info to cache-line and avoid potentially unnecessary cache line movements. It also allows to have a fixed virtual-to-physical translation of the variables, which reduces TLB misses. Use per-CPU struct for flush_tlb_mm_range() and flush_tlb_kernel_range(). Add debug assertions to ensure there are no nested TLB flushes that might overwrite the per-CPU data. For arch_tlbbatch_flush() use a const struct. Results when running a microbenchmarks that performs 10^6 MADV_DONTEED operations and touching a page, in which 3 additional threads run a busy-wait loop (5 runs, PTI and retpolines are turned off): base off-stack ---- --------- avg (usec/op) 1.629 1.570 (-3%) stddev 0.014 0.009 Signed-off-by: Nadav Amit <namit@vmware.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20190425230143.7008-1-namit@vmware.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent da398db commit 3db6d5a

File tree

1 file changed

+82
-34
lines changed

1 file changed

+82
-34
lines changed

arch/x86/mm/tlb.c

Lines changed: 82 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -634,7 +634,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
634634
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
635635
}
636636

637-
static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
637+
static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
638638
{
639639
const struct flush_tlb_info *f = info;
640640

@@ -722,43 +722,81 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
722722
*/
723723
unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
724724

725+
static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
726+
727+
#ifdef CONFIG_DEBUG_VM
728+
static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
729+
#endif
730+
731+
static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
732+
unsigned long start, unsigned long end,
733+
unsigned int stride_shift, bool freed_tables,
734+
u64 new_tlb_gen)
735+
{
736+
struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
737+
738+
#ifdef CONFIG_DEBUG_VM
739+
/*
740+
* Ensure that the following code is non-reentrant and flush_tlb_info
741+
* is not overwritten. This means no TLB flushing is initiated by
742+
* interrupt handlers and machine-check exception handlers.
743+
*/
744+
BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
745+
#endif
746+
747+
info->start = start;
748+
info->end = end;
749+
info->mm = mm;
750+
info->stride_shift = stride_shift;
751+
info->freed_tables = freed_tables;
752+
info->new_tlb_gen = new_tlb_gen;
753+
754+
return info;
755+
}
756+
757+
static inline void put_flush_tlb_info(void)
758+
{
759+
#ifdef CONFIG_DEBUG_VM
760+
/* Complete reentrency prevention checks */
761+
barrier();
762+
this_cpu_dec(flush_tlb_info_idx);
763+
#endif
764+
}
765+
725766
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
726767
unsigned long end, unsigned int stride_shift,
727768
bool freed_tables)
728769
{
770+
struct flush_tlb_info *info;
771+
u64 new_tlb_gen;
729772
int cpu;
730773

731-
struct flush_tlb_info info = {
732-
.mm = mm,
733-
.stride_shift = stride_shift,
734-
.freed_tables = freed_tables,
735-
};
736-
737774
cpu = get_cpu();
738775

739-
/* This is also a barrier that synchronizes with switch_mm(). */
740-
info.new_tlb_gen = inc_mm_tlb_gen(mm);
741-
742776
/* Should we flush just the requested range? */
743-
if ((end != TLB_FLUSH_ALL) &&
744-
((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
745-
info.start = start;
746-
info.end = end;
747-
} else {
748-
info.start = 0UL;
749-
info.end = TLB_FLUSH_ALL;
777+
if ((end == TLB_FLUSH_ALL) ||
778+
((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
779+
start = 0;
780+
end = TLB_FLUSH_ALL;
750781
}
751782

783+
/* This is also a barrier that synchronizes with switch_mm(). */
784+
new_tlb_gen = inc_mm_tlb_gen(mm);
785+
786+
info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
787+
new_tlb_gen);
788+
752789
if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
753-
VM_WARN_ON(irqs_disabled());
790+
lockdep_assert_irqs_enabled();
754791
local_irq_disable();
755-
flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
792+
flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
756793
local_irq_enable();
757794
}
758795

759796
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
760-
flush_tlb_others(mm_cpumask(mm), &info);
797+
flush_tlb_others(mm_cpumask(mm), info);
761798

799+
put_flush_tlb_info();
762800
put_cpu();
763801
}
764802

@@ -787,38 +825,48 @@ static void do_kernel_range_flush(void *info)
787825

788826
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
789827
{
790-
791828
/* Balance as user space task's flush, a bit conservative */
792829
if (end == TLB_FLUSH_ALL ||
793830
(end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
794831
on_each_cpu(do_flush_tlb_all, NULL, 1);
795832
} else {
796-
struct flush_tlb_info info;
797-
info.start = start;
798-
info.end = end;
799-
on_each_cpu(do_kernel_range_flush, &info, 1);
833+
struct flush_tlb_info *info;
834+
835+
preempt_disable();
836+
info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
837+
838+
on_each_cpu(do_kernel_range_flush, info, 1);
839+
840+
put_flush_tlb_info();
841+
preempt_enable();
800842
}
801843
}
802844

845+
/*
846+
* arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
847+
* This means that the 'struct flush_tlb_info' that describes which mappings to
848+
* flush is actually fixed. We therefore set a single fixed struct and use it in
849+
* arch_tlbbatch_flush().
850+
*/
851+
static const struct flush_tlb_info full_flush_tlb_info = {
852+
.mm = NULL,
853+
.start = 0,
854+
.end = TLB_FLUSH_ALL,
855+
};
856+
803857
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
804858
{
805-
struct flush_tlb_info info = {
806-
.mm = NULL,
807-
.start = 0UL,
808-
.end = TLB_FLUSH_ALL,
809-
};
810-
811859
int cpu = get_cpu();
812860

813861
if (cpumask_test_cpu(cpu, &batch->cpumask)) {
814-
VM_WARN_ON(irqs_disabled());
862+
lockdep_assert_irqs_enabled();
815863
local_irq_disable();
816-
flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
864+
flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
817865
local_irq_enable();
818866
}
819867

820868
if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
821-
flush_tlb_others(&batch->cpumask, &info);
869+
flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
822870

823871
cpumask_clear(&batch->cpumask);
824872

0 commit comments

Comments
 (0)