Skip to content

Commit 9de7d83

Browse files
Martin SchwidefskyIngo Molnar
authored andcommitted
s390/tlb: Convert to generic mmu_gather
No change in behavior intended. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: aneesh.kumar@linux.vnet.ibm.com Cc: heiko.carstens@de.ibm.com Cc: linux@armlinux.org.uk Cc: npiggin@gmail.com Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/20180918125151.31744-3-schwidefsky@de.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 952a31c commit 9de7d83

File tree

3 files changed

+42
-151
lines changed

3 files changed

+42
-151
lines changed

arch/s390/Kconfig

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,11 +164,13 @@ config S390
164164
select HAVE_PERF_USER_STACK_DUMP
165165
select HAVE_MEMBLOCK_NODE_MAP
166166
select HAVE_MEMBLOCK_PHYS_MAP
167+
select HAVE_MMU_GATHER_NO_GATHER
167168
select HAVE_MOD_ARCH_SPECIFIC
168169
select HAVE_NOP_MCOUNT
169170
select HAVE_OPROFILE
170171
select HAVE_PCI
171172
select HAVE_PERF_EVENTS
173+
select HAVE_RCU_TABLE_FREE
172174
select HAVE_REGS_AND_STACK_ACCESS_API
173175
select HAVE_RSEQ
174176
select HAVE_SYSCALL_TRACEPOINTS

arch/s390/include/asm/tlb.h

Lines changed: 39 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -22,107 +22,57 @@
2222
* Pages used for the page tables is a different story. FIXME: more
2323
*/
2424

25-
#include <linux/mm.h>
26-
#include <linux/pagemap.h>
27-
#include <linux/swap.h>
28-
#include <asm/processor.h>
29-
#include <asm/pgalloc.h>
30-
#include <asm/tlbflush.h>
31-
32-
struct mmu_gather {
33-
struct mm_struct *mm;
34-
struct mmu_table_batch *batch;
35-
unsigned int fullmm;
36-
unsigned long start, end;
37-
};
38-
39-
struct mmu_table_batch {
40-
struct rcu_head rcu;
41-
unsigned int nr;
42-
void *tables[0];
43-
};
44-
45-
#define MAX_TABLE_BATCH \
46-
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
47-
48-
extern void tlb_table_flush(struct mmu_gather *tlb);
49-
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
50-
51-
static inline void
52-
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
53-
unsigned long start, unsigned long end)
54-
{
55-
tlb->mm = mm;
56-
tlb->start = start;
57-
tlb->end = end;
58-
tlb->fullmm = !(start | (end+1));
59-
tlb->batch = NULL;
60-
}
61-
62-
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
63-
{
64-
__tlb_flush_mm_lazy(tlb->mm);
65-
}
66-
67-
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
68-
{
69-
tlb_table_flush(tlb);
70-
}
71-
25+
void __tlb_remove_table(void *_table);
26+
static inline void tlb_flush(struct mmu_gather *tlb);
27+
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
28+
struct page *page, int page_size);
7229

73-
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
74-
{
75-
tlb_flush_mmu_tlbonly(tlb);
76-
tlb_flush_mmu_free(tlb);
77-
}
30+
#define tlb_start_vma(tlb, vma) do { } while (0)
31+
#define tlb_end_vma(tlb, vma) do { } while (0)
7832

79-
static inline void
80-
arch_tlb_finish_mmu(struct mmu_gather *tlb,
81-
unsigned long start, unsigned long end, bool force)
82-
{
83-
if (force) {
84-
tlb->start = start;
85-
tlb->end = end;
86-
}
33+
#define tlb_flush tlb_flush
34+
#define pte_free_tlb pte_free_tlb
35+
#define pmd_free_tlb pmd_free_tlb
36+
#define p4d_free_tlb p4d_free_tlb
37+
#define pud_free_tlb pud_free_tlb
8738

88-
tlb_flush_mmu(tlb);
89-
}
39+
#include <asm/pgalloc.h>
40+
#include <asm/tlbflush.h>
41+
#include <asm-generic/tlb.h>
9042

9143
/*
9244
* Release the page cache reference for a pte removed by
9345
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
9446
* has already been freed, so just do free_page_and_swap_cache.
9547
*/
96-
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97-
{
98-
free_page_and_swap_cache(page);
99-
return false; /* avoid calling tlb_flush_mmu */
100-
}
101-
102-
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
103-
{
104-
free_page_and_swap_cache(page);
105-
}
106-
10748
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
10849
struct page *page, int page_size)
10950
{
110-
return __tlb_remove_page(tlb, page);
51+
free_page_and_swap_cache(page);
52+
return false;
11153
}
11254

113-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
114-
struct page *page, int page_size)
55+
static inline void tlb_flush(struct mmu_gather *tlb)
11556
{
116-
return tlb_remove_page(tlb, page);
57+
__tlb_flush_mm_lazy(tlb->mm);
11758
}
11859

11960
/*
12061
* pte_free_tlb frees a pte table and clears the CRSTE for the
12162
* page table from the tlb.
12263
*/
12364
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
124-
unsigned long address)
65+
unsigned long address)
12566
{
67+
__tlb_adjust_range(tlb, address, PAGE_SIZE);
68+
tlb->mm->context.flush_mm = 1;
69+
tlb->freed_tables = 1;
70+
tlb->cleared_ptes = 1;
71+
/*
72+
* page_table_free_rcu takes care of the allocation bit masks
73+
* of the 2K table fragments in the 4K page table page,
74+
* then calls tlb_remove_table.
75+
*/
12676
page_table_free_rcu(tlb, (unsigned long *) pte, address);
12777
}
12878

@@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
13989
if (mm_pmd_folded(tlb->mm))
14090
return;
14191
pgtable_pmd_page_dtor(virt_to_page(pmd));
92+
__tlb_adjust_range(tlb, address, PAGE_SIZE);
93+
tlb->mm->context.flush_mm = 1;
94+
tlb->freed_tables = 1;
95+
tlb->cleared_puds = 1;
14296
tlb_remove_table(tlb, pmd);
14397
}
14498

@@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
154108
{
155109
if (mm_p4d_folded(tlb->mm))
156110
return;
111+
__tlb_adjust_range(tlb, address, PAGE_SIZE);
112+
tlb->mm->context.flush_mm = 1;
113+
tlb->freed_tables = 1;
114+
tlb->cleared_p4ds = 1;
157115
tlb_remove_table(tlb, p4d);
158116
}
159117

@@ -169,19 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
169127
{
170128
if (mm_pud_folded(tlb->mm))
171129
return;
130+
tlb->mm->context.flush_mm = 1;
131+
tlb->freed_tables = 1;
132+
tlb->cleared_puds = 1;
172133
tlb_remove_table(tlb, pud);
173134
}
174135

175-
#define tlb_start_vma(tlb, vma) do { } while (0)
176-
#define tlb_end_vma(tlb, vma) do { } while (0)
177-
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
178-
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
179-
#define tlb_migrate_finish(mm) do { } while (0)
180-
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
181-
tlb_remove_tlb_entry(tlb, ptep, address)
182-
183-
static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
184-
{
185-
}
186136

187137
#endif /* _S390_TLB_H */

arch/s390/mm/pgalloc.c

Lines changed: 1 addition & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
290290
tlb_remove_table(tlb, table);
291291
}
292292

293-
static void __tlb_remove_table(void *_table)
293+
void __tlb_remove_table(void *_table)
294294
{
295295
unsigned int mask = (unsigned long) _table & 3;
296296
void *table = (void *)((unsigned long) _table ^ mask);
@@ -316,67 +316,6 @@ static void __tlb_remove_table(void *_table)
316316
}
317317
}
318318

319-
static void tlb_remove_table_smp_sync(void *arg)
320-
{
321-
/* Simply deliver the interrupt */
322-
}
323-
324-
static void tlb_remove_table_one(void *table)
325-
{
326-
/*
327-
* This isn't an RCU grace period and hence the page-tables cannot be
328-
* assumed to be actually RCU-freed.
329-
*
330-
* It is however sufficient for software page-table walkers that rely
331-
* on IRQ disabling. See the comment near struct mmu_table_batch.
332-
*/
333-
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
334-
__tlb_remove_table(table);
335-
}
336-
337-
static void tlb_remove_table_rcu(struct rcu_head *head)
338-
{
339-
struct mmu_table_batch *batch;
340-
int i;
341-
342-
batch = container_of(head, struct mmu_table_batch, rcu);
343-
344-
for (i = 0; i < batch->nr; i++)
345-
__tlb_remove_table(batch->tables[i]);
346-
347-
free_page((unsigned long)batch);
348-
}
349-
350-
void tlb_table_flush(struct mmu_gather *tlb)
351-
{
352-
struct mmu_table_batch **batch = &tlb->batch;
353-
354-
if (*batch) {
355-
call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
356-
*batch = NULL;
357-
}
358-
}
359-
360-
void tlb_remove_table(struct mmu_gather *tlb, void *table)
361-
{
362-
struct mmu_table_batch **batch = &tlb->batch;
363-
364-
tlb->mm->context.flush_mm = 1;
365-
if (*batch == NULL) {
366-
*batch = (struct mmu_table_batch *)
367-
__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
368-
if (*batch == NULL) {
369-
__tlb_flush_mm_lazy(tlb->mm);
370-
tlb_remove_table_one(table);
371-
return;
372-
}
373-
(*batch)->nr = 0;
374-
}
375-
(*batch)->tables[(*batch)->nr++] = table;
376-
if ((*batch)->nr == MAX_TABLE_BATCH)
377-
tlb_flush_mmu(tlb);
378-
}
379-
380319
/*
381320
* Base infrastructure required to generate basic asces, region, segment,
382321
* and page tables that do not make use of enhanced features like EDAT1.

0 commit comments

Comments
 (0)