Skip to content

Commit 74d2aaa

Browse files
Steven Pricetorvalds
authored andcommitted
x86: mm: point to struct seq_file from struct pg_state
mm/dump_pagetables.c passes both struct seq_file and struct pg_state down the chain of walk_*_level() functions to be passed to note_page(). Instead place the struct seq_file in struct pg_state and access it from struct pg_state (which is private to this file) in note_page(). Link: http://lkml.kernel.org/r/20191218162402.45610-17-steven.price@arm.com Signed-off-by: Steven Price <steven.price@arm.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Andy Lutomirski <luto@kernel.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Hogan <jhogan@kernel.org> Cc: James Morse <james.morse@arm.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: "Liang, Kan" <kan.liang@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Burton <paul.burton@mips.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Zong Li <zong.li@sifive.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent b7a16c7 commit 74d2aaa

File tree

1 file changed

+35
-34
lines changed

1 file changed

+35
-34
lines changed

arch/x86/mm/dump_pagetables.c

Lines changed: 35 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ struct pg_state {
3636
bool to_dmesg;
3737
bool check_wx;
3838
unsigned long wx_pages;
39+
struct seq_file *seq;
3940
};
4041

4142
struct addr_marker {
@@ -265,11 +266,12 @@ static void note_wx(struct pg_state *st)
265266
* of PTE entries; the next one is different so we need to
266267
* print what we collected so far.
267268
*/
268-
static void note_page(struct seq_file *m, struct pg_state *st,
269-
pgprot_t new_prot, pgprotval_t new_eff, int level)
269+
static void note_page(struct pg_state *st, pgprot_t new_prot,
270+
pgprotval_t new_eff, int level)
270271
{
271272
pgprotval_t prot, cur, eff;
272273
static const char units[] = "BKMGTPE";
274+
struct seq_file *m = st->seq;
273275

274276
/*
275277
* If we have a "break" in the series, we need to flush the state that
@@ -354,8 +356,8 @@ static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
354356
((prot1 | prot2) & _PAGE_NX);
355357
}
356358

357-
static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
358-
pgprotval_t eff_in, unsigned long P)
359+
static void walk_pte_level(struct pg_state *st, pmd_t addr, pgprotval_t eff_in,
360+
unsigned long P)
359361
{
360362
int i;
361363
pte_t *pte;
@@ -366,7 +368,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
366368
pte = pte_offset_map(&addr, st->current_address);
367369
prot = pte_flags(*pte);
368370
eff = effective_prot(eff_in, prot);
369-
note_page(m, st, __pgprot(prot), eff, 5);
371+
note_page(st, __pgprot(prot), eff, 5);
370372
pte_unmap(pte);
371373
}
372374
}
@@ -379,30 +381,28 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
379381
* us dozens of seconds (minutes for 5-level config) while checking for
380382
* W+X mapping or reading kernel_page_tables debugfs file.
381383
*/
382-
static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
383-
void *pt)
384+
static inline bool kasan_page_table(struct pg_state *st, void *pt)
384385
{
385386
if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
386387
(pgtable_l5_enabled() &&
387388
__pa(pt) == __pa(kasan_early_shadow_p4d)) ||
388389
__pa(pt) == __pa(kasan_early_shadow_pud)) {
389390
pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
390-
note_page(m, st, __pgprot(prot), 0, 5);
391+
note_page(st, __pgprot(prot), 0, 5);
391392
return true;
392393
}
393394
return false;
394395
}
395396
#else
396-
static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
397-
void *pt)
397+
static inline bool kasan_page_table(struct pg_state *st, void *pt)
398398
{
399399
return false;
400400
}
401401
#endif
402402

403403
#if PTRS_PER_PMD > 1
404404

405-
static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
405+
static void walk_pmd_level(struct pg_state *st, pud_t addr,
406406
pgprotval_t eff_in, unsigned long P)
407407
{
408408
int i;
@@ -416,27 +416,27 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
416416
prot = pmd_flags(*start);
417417
eff = effective_prot(eff_in, prot);
418418
if (pmd_large(*start) || !pmd_present(*start)) {
419-
note_page(m, st, __pgprot(prot), eff, 4);
420-
} else if (!kasan_page_table(m, st, pmd_start)) {
421-
walk_pte_level(m, st, *start, eff,
419+
note_page(st, __pgprot(prot), eff, 4);
420+
} else if (!kasan_page_table(st, pmd_start)) {
421+
walk_pte_level(st, *start, eff,
422422
P + i * PMD_LEVEL_MULT);
423423
}
424424
} else
425-
note_page(m, st, __pgprot(0), 0, 4);
425+
note_page(st, __pgprot(0), 0, 4);
426426
start++;
427427
}
428428
}
429429

430430
#else
431-
#define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
431+
#define walk_pmd_level(s,a,e,p) walk_pte_level(s,__pmd(pud_val(a)),e,p)
432432
#define pud_large(a) pmd_large(__pmd(pud_val(a)))
433433
#define pud_none(a) pmd_none(__pmd(pud_val(a)))
434434
#endif
435435

436436
#if PTRS_PER_PUD > 1
437437

438-
static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
439-
pgprotval_t eff_in, unsigned long P)
438+
static void walk_pud_level(struct pg_state *st, p4d_t addr, pgprotval_t eff_in,
439+
unsigned long P)
440440
{
441441
int i;
442442
pud_t *start, *pud_start;
@@ -450,33 +450,33 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
450450
prot = pud_flags(*start);
451451
eff = effective_prot(eff_in, prot);
452452
if (pud_large(*start) || !pud_present(*start)) {
453-
note_page(m, st, __pgprot(prot), eff, 3);
454-
} else if (!kasan_page_table(m, st, pud_start)) {
455-
walk_pmd_level(m, st, *start, eff,
453+
note_page(st, __pgprot(prot), eff, 3);
454+
} else if (!kasan_page_table(st, pud_start)) {
455+
walk_pmd_level(st, *start, eff,
456456
P + i * PUD_LEVEL_MULT);
457457
}
458458
} else
459-
note_page(m, st, __pgprot(0), 0, 3);
459+
note_page(st, __pgprot(0), 0, 3);
460460

461461
start++;
462462
}
463463
}
464464

465465
#else
466-
#define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
466+
#define walk_pud_level(s,a,e,p) walk_pmd_level(s,__pud(p4d_val(a)),e,p)
467467
#define p4d_large(a) pud_large(__pud(p4d_val(a)))
468468
#define p4d_none(a) pud_none(__pud(p4d_val(a)))
469469
#endif
470470

471-
static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
472-
pgprotval_t eff_in, unsigned long P)
471+
static void walk_p4d_level(struct pg_state *st, pgd_t addr, pgprotval_t eff_in,
472+
unsigned long P)
473473
{
474474
int i;
475475
p4d_t *start, *p4d_start;
476476
pgprotval_t prot, eff;
477477

478478
if (PTRS_PER_P4D == 1)
479-
return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P);
479+
return walk_pud_level(st, __p4d(pgd_val(addr)), eff_in, P);
480480

481481
p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
482482

@@ -486,13 +486,13 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
486486
prot = p4d_flags(*start);
487487
eff = effective_prot(eff_in, prot);
488488
if (p4d_large(*start) || !p4d_present(*start)) {
489-
note_page(m, st, __pgprot(prot), eff, 2);
490-
} else if (!kasan_page_table(m, st, p4d_start)) {
491-
walk_pud_level(m, st, *start, eff,
489+
note_page(st, __pgprot(prot), eff, 2);
490+
} else if (!kasan_page_table(st, p4d_start)) {
491+
walk_pud_level(st, *start, eff,
492492
P + i * P4D_LEVEL_MULT);
493493
}
494494
} else
495-
note_page(m, st, __pgprot(0), 0, 2);
495+
note_page(st, __pgprot(0), 0, 2);
496496

497497
start++;
498498
}
@@ -529,6 +529,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
529529
}
530530

531531
st.check_wx = checkwx;
532+
st.seq = m;
532533
if (checkwx)
533534
st.wx_pages = 0;
534535

@@ -542,21 +543,21 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
542543
eff = prot;
543544
#endif
544545
if (pgd_large(*start) || !pgd_present(*start)) {
545-
note_page(m, &st, __pgprot(prot), eff, 1);
546+
note_page(&st, __pgprot(prot), eff, 1);
546547
} else {
547-
walk_p4d_level(m, &st, *start, eff,
548+
walk_p4d_level(&st, *start, eff,
548549
i * PGD_LEVEL_MULT);
549550
}
550551
} else
551-
note_page(m, &st, __pgprot(0), 0, 1);
552+
note_page(&st, __pgprot(0), 0, 1);
552553

553554
cond_resched();
554555
start++;
555556
}
556557

557558
/* Flush out the last page */
558559
st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
559-
note_page(m, &st, __pgprot(0), 0, 0);
560+
note_page(&st, __pgprot(0), 0, 0);
560561
if (!checkwx)
561562
return;
562563
if (st.wx_pages)

0 commit comments

Comments
 (0)