22
22
* Pages used for the page tables is a different story. FIXME: more
23
23
*/
24
24
25
- #include <linux/mm.h>
26
- #include <linux/pagemap.h>
27
- #include <linux/swap.h>
28
- #include <asm/processor.h>
29
- #include <asm/pgalloc.h>
30
- #include <asm/tlbflush.h>
31
-
32
- struct mmu_gather {
33
- struct mm_struct * mm ;
34
- struct mmu_table_batch * batch ;
35
- unsigned int fullmm ;
36
- unsigned long start , end ;
37
- };
38
-
39
- struct mmu_table_batch {
40
- struct rcu_head rcu ;
41
- unsigned int nr ;
42
- void * tables [0 ];
43
- };
44
-
45
- #define MAX_TABLE_BATCH \
46
- ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
47
-
48
- extern void tlb_table_flush (struct mmu_gather * tlb );
49
- extern void tlb_remove_table (struct mmu_gather * tlb , void * table );
50
-
51
- static inline void
52
- arch_tlb_gather_mmu (struct mmu_gather * tlb , struct mm_struct * mm ,
53
- unsigned long start , unsigned long end )
54
- {
55
- tlb -> mm = mm ;
56
- tlb -> start = start ;
57
- tlb -> end = end ;
58
- tlb -> fullmm = !(start | (end + 1 ));
59
- tlb -> batch = NULL ;
60
- }
61
-
62
- static inline void tlb_flush_mmu_tlbonly (struct mmu_gather * tlb )
63
- {
64
- __tlb_flush_mm_lazy (tlb -> mm );
65
- }
66
-
67
- static inline void tlb_flush_mmu_free (struct mmu_gather * tlb )
68
- {
69
- tlb_table_flush (tlb );
70
- }
71
-
25
+ void __tlb_remove_table (void * _table );
26
+ static inline void tlb_flush (struct mmu_gather * tlb );
27
+ static inline bool __tlb_remove_page_size (struct mmu_gather * tlb ,
28
+ struct page * page , int page_size );
72
29
73
- static inline void tlb_flush_mmu (struct mmu_gather * tlb )
74
- {
75
- tlb_flush_mmu_tlbonly (tlb );
76
- tlb_flush_mmu_free (tlb );
77
- }
30
+ #define tlb_start_vma (tlb , vma ) do { } while (0)
31
+ #define tlb_end_vma (tlb , vma ) do { } while (0)
78
32
79
- static inline void
80
- arch_tlb_finish_mmu (struct mmu_gather * tlb ,
81
- unsigned long start , unsigned long end , bool force )
82
- {
83
- if (force ) {
84
- tlb -> start = start ;
85
- tlb -> end = end ;
86
- }
33
+ #define tlb_flush tlb_flush
34
+ #define pte_free_tlb pte_free_tlb
35
+ #define pmd_free_tlb pmd_free_tlb
36
+ #define p4d_free_tlb p4d_free_tlb
37
+ #define pud_free_tlb pud_free_tlb
87
38
88
- tlb_flush_mmu (tlb );
89
- }
39
+ #include <asm/pgalloc.h>
40
+ #include <asm/tlbflush.h>
41
+ #include <asm-generic/tlb.h>
90
42
91
43
/*
92
44
* Release the page cache reference for a pte removed by
93
45
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
94
46
* has already been freed, so just do free_page_and_swap_cache.
95
47
*/
96
- static inline bool __tlb_remove_page (struct mmu_gather * tlb , struct page * page )
97
- {
98
- free_page_and_swap_cache (page );
99
- return false; /* avoid calling tlb_flush_mmu */
100
- }
101
-
102
- static inline void tlb_remove_page (struct mmu_gather * tlb , struct page * page )
103
- {
104
- free_page_and_swap_cache (page );
105
- }
106
-
107
48
static inline bool __tlb_remove_page_size (struct mmu_gather * tlb ,
108
49
struct page * page , int page_size )
109
50
{
110
- return __tlb_remove_page (tlb , page );
51
+ free_page_and_swap_cache (page );
52
+ return false;
111
53
}
112
54
113
- static inline void tlb_remove_page_size (struct mmu_gather * tlb ,
114
- struct page * page , int page_size )
55
+ static inline void tlb_flush (struct mmu_gather * tlb )
115
56
{
116
- return tlb_remove_page (tlb , page );
57
+ __tlb_flush_mm_lazy (tlb -> mm );
117
58
}
118
59
119
60
/*
120
61
* pte_free_tlb frees a pte table and clears the CRSTE for the
121
62
* page table from the tlb.
122
63
*/
123
64
static inline void pte_free_tlb (struct mmu_gather * tlb , pgtable_t pte ,
124
- unsigned long address )
65
+ unsigned long address )
125
66
{
67
+ __tlb_adjust_range (tlb , address , PAGE_SIZE );
68
+ tlb -> mm -> context .flush_mm = 1 ;
69
+ tlb -> freed_tables = 1 ;
70
+ tlb -> cleared_ptes = 1 ;
71
+ /*
72
+ * page_table_free_rcu takes care of the allocation bit masks
73
+ * of the 2K table fragments in the 4K page table page,
74
+ * then calls tlb_remove_table.
75
+ */
126
76
page_table_free_rcu (tlb , (unsigned long * ) pte , address );
127
77
}
128
78
@@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
139
89
if (mm_pmd_folded (tlb -> mm ))
140
90
return ;
141
91
pgtable_pmd_page_dtor (virt_to_page (pmd ));
92
+ __tlb_adjust_range (tlb , address , PAGE_SIZE );
93
+ tlb -> mm -> context .flush_mm = 1 ;
94
+ tlb -> freed_tables = 1 ;
95
+ tlb -> cleared_puds = 1 ;
142
96
tlb_remove_table (tlb , pmd );
143
97
}
144
98
@@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
154
108
{
155
109
if (mm_p4d_folded (tlb -> mm ))
156
110
return ;
111
+ __tlb_adjust_range (tlb , address , PAGE_SIZE );
112
+ tlb -> mm -> context .flush_mm = 1 ;
113
+ tlb -> freed_tables = 1 ;
114
+ tlb -> cleared_p4ds = 1 ;
157
115
tlb_remove_table (tlb , p4d );
158
116
}
159
117
@@ -169,19 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
169
127
{
170
128
if (mm_pud_folded (tlb -> mm ))
171
129
return ;
130
+ tlb -> mm -> context .flush_mm = 1 ;
131
+ tlb -> freed_tables = 1 ;
132
+ tlb -> cleared_puds = 1 ;
172
133
tlb_remove_table (tlb , pud );
173
134
}
174
135
175
- #define tlb_start_vma (tlb , vma ) do { } while (0)
176
- #define tlb_end_vma (tlb , vma ) do { } while (0)
177
- #define tlb_remove_tlb_entry (tlb , ptep , addr ) do { } while (0)
178
- #define tlb_remove_pmd_tlb_entry (tlb , pmdp , addr ) do { } while (0)
179
- #define tlb_migrate_finish (mm ) do { } while (0)
180
- #define tlb_remove_huge_tlb_entry (h , tlb , ptep , address ) \
181
- tlb_remove_tlb_entry(tlb, ptep, address)
182
-
183
- static inline void tlb_change_page_size (struct mmu_gather * tlb , unsigned int page_size )
184
- {
185
- }
186
136
187
137
#endif /* _S390_TLB_H */
0 commit comments