|
7 | 7 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | 8 | */ |
9 | 9 |
|
10 | | -#include <linux/kernel.h> |
11 | | -#include <linux/errno.h> |
12 | | -#include <linux/types.h> |
13 | 10 | #include <linux/mm.h> |
14 | | -#include <linux/memblock.h> |
15 | | -#include <linux/gfp.h> |
16 | | -#include <linux/init.h> |
17 | | -#include <asm/asm-extable.h> |
18 | | -#include <asm/facility.h> |
19 | 11 | #include <asm/page-states.h> |
| 12 | +#include <asm/sections.h> |
| 13 | +#include <asm/page.h> |
20 | 14 |
|
21 | 15 | int __bootdata_preserved(cmma_flag); |
22 | 16 |
|
23 | | -static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end) |
24 | | -{ |
25 | | - unsigned long next; |
26 | | - struct page *page; |
27 | | - pmd_t *pmd; |
28 | | - |
29 | | - pmd = pmd_offset(pud, addr); |
30 | | - do { |
31 | | - next = pmd_addr_end(addr, end); |
32 | | - if (pmd_none(*pmd) || pmd_large(*pmd)) |
33 | | - continue; |
34 | | - page = phys_to_page(pmd_val(*pmd)); |
35 | | - set_bit(PG_arch_1, &page->flags); |
36 | | - } while (pmd++, addr = next, addr != end); |
37 | | -} |
38 | | - |
39 | | -static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end) |
40 | | -{ |
41 | | - unsigned long next; |
42 | | - struct page *page; |
43 | | - pud_t *pud; |
44 | | - int i; |
45 | | - |
46 | | - pud = pud_offset(p4d, addr); |
47 | | - do { |
48 | | - next = pud_addr_end(addr, end); |
49 | | - if (pud_none(*pud) || pud_large(*pud)) |
50 | | - continue; |
51 | | - if (!pud_folded(*pud)) { |
52 | | - page = phys_to_page(pud_val(*pud)); |
53 | | - for (i = 0; i < 4; i++) |
54 | | - set_bit(PG_arch_1, &page[i].flags); |
55 | | - } |
56 | | - mark_kernel_pmd(pud, addr, next); |
57 | | - } while (pud++, addr = next, addr != end); |
58 | | -} |
59 | | - |
60 | | -static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end) |
61 | | -{ |
62 | | - unsigned long next; |
63 | | - struct page *page; |
64 | | - p4d_t *p4d; |
65 | | - int i; |
66 | | - |
67 | | - p4d = p4d_offset(pgd, addr); |
68 | | - do { |
69 | | - next = p4d_addr_end(addr, end); |
70 | | - if (p4d_none(*p4d)) |
71 | | - continue; |
72 | | - if (!p4d_folded(*p4d)) { |
73 | | - page = phys_to_page(p4d_val(*p4d)); |
74 | | - for (i = 0; i < 4; i++) |
75 | | - set_bit(PG_arch_1, &page[i].flags); |
76 | | - } |
77 | | - mark_kernel_pud(p4d, addr, next); |
78 | | - } while (p4d++, addr = next, addr != end); |
79 | | -} |
80 | | - |
81 | | -static void mark_kernel_pgd(void) |
82 | | -{ |
83 | | - unsigned long addr, next, max_addr; |
84 | | - struct page *page; |
85 | | - pgd_t *pgd; |
86 | | - int i; |
87 | | - |
88 | | - addr = 0; |
89 | | - /* |
90 | | - * Figure out maximum virtual address accessible with the |
91 | | - * kernel ASCE. This is required to keep the page table walker |
92 | | - * from accessing non-existent entries. |
93 | | - */ |
94 | | - max_addr = (S390_lowcore.kernel_asce.val & _ASCE_TYPE_MASK) >> 2; |
95 | | - max_addr = 1UL << (max_addr * 11 + 31); |
96 | | - pgd = pgd_offset_k(addr); |
97 | | - do { |
98 | | - next = pgd_addr_end(addr, max_addr); |
99 | | - if (pgd_none(*pgd)) |
100 | | - continue; |
101 | | - if (!pgd_folded(*pgd)) { |
102 | | - page = phys_to_page(pgd_val(*pgd)); |
103 | | - for (i = 0; i < 4; i++) |
104 | | - set_bit(PG_arch_1, &page[i].flags); |
105 | | - } |
106 | | - mark_kernel_p4d(pgd, addr, next); |
107 | | - } while (pgd++, addr = next, addr != max_addr); |
108 | | -} |
109 | | - |
110 | | -void __init cmma_init_nodat(void) |
111 | | -{ |
112 | | - struct page *page; |
113 | | - unsigned long start, end, ix; |
114 | | - int i; |
115 | | - |
116 | | - if (cmma_flag < 2) |
117 | | - return; |
118 | | - /* Mark pages used in kernel page tables */ |
119 | | - mark_kernel_pgd(); |
120 | | - page = virt_to_page(&swapper_pg_dir); |
121 | | - for (i = 0; i < 4; i++) |
122 | | - set_bit(PG_arch_1, &page[i].flags); |
123 | | - page = virt_to_page(&invalid_pg_dir); |
124 | | - for (i = 0; i < 4; i++) |
125 | | - set_bit(PG_arch_1, &page[i].flags); |
126 | | - |
127 | | - /* Set all kernel pages not used for page tables to stable/no-dat */ |
128 | | - for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { |
129 | | - page = pfn_to_page(start); |
130 | | - for (ix = start; ix < end; ix++, page++) { |
131 | | - if (__test_and_clear_bit(PG_arch_1, &page->flags)) |
132 | | - continue; /* skip page table pages */ |
133 | | - if (!list_empty(&page->lru)) |
134 | | - continue; /* skip free pages */ |
135 | | - __set_page_stable_nodat(page_to_virt(page), 1); |
136 | | - } |
137 | | - } |
138 | | -} |
139 | | - |
140 | 17 | void arch_free_page(struct page *page, int order) |
141 | 18 | { |
142 | 19 | if (!cmma_flag) |
|
0 commit comments