Skip to content

Commit

Permalink
m68k: mm: Extend table allocator for multiple sizes
Browse files Browse the repository at this point in the history
In addition to the PGD/PMD table size (128*4) add a PTE table size
(64*4) to the table allocator. This completely removes the pte-table
overhead compared to the old code, even for dense tables.

Notes:

 - the allocator gained a list_empty() check to deal with there not
   being any pages at all.

 - the free mask is extended to cover more than the 8 bits required
   for the (512 byte) PGD/PMD tables.

 - NR_PAGETABLE accounting is restored.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Greg Ungerer <gerg@linux-m68k.org>
Tested-by: Michael Schmitz <schmitzmic@gmail.com>
Tested-by: Greg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.882175409@infradead.org
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
  • Loading branch information
Peter Zijlstra authored and geertu committed Feb 10, 2020
1 parent 61c64a2 commit 0e071ee
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 42 deletions.
31 changes: 19 additions & 12 deletions arch/m68k/include/asm/motorola_pgalloc.h
Expand Up @@ -8,61 +8,68 @@
extern void mmu_page_ctor(void *page);
extern void mmu_page_dtor(void *page);

extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
enum m68k_table_types {
TABLE_PGD = 0,
TABLE_PMD = 0, /* same size as PGD */
TABLE_PTE = 1,
};

extern void init_pointer_table(void *table, int type);
extern void *get_pointer_table(int type);
extern int free_pointer_table(void *table, int type);

static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)get_pointer_table();
return get_pointer_table(TABLE_PTE);
}

static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_pointer_table((void *)pte);
free_pointer_table(pte, TABLE_PTE);
}

static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
return (pte_t *)get_pointer_table();
return get_pointer_table(TABLE_PTE);
}

static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
{
free_pointer_table((void *)pgtable);
free_pointer_table(pgtable, TABLE_PTE);
}

static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
unsigned long address)
{
free_pointer_table((void *)pgtable);
free_pointer_table(pgtable, TABLE_PTE);
}


static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
return get_pointer_table();
return get_pointer_table(TABLE_PMD);
}

static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
return free_pointer_table(pmd);
return free_pointer_table(pmd, TABLE_PMD);
}

static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
return free_pointer_table(pmd);
return free_pointer_table(pmd, TABLE_PMD);
}


static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
pmd_free(mm, (pmd_t *)pgd);
free_pointer_table(pgd, TABLE_PGD);
}

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return (pgd_t *)get_pointer_table();
return get_pointer_table(TABLE_PGD);
}


Expand Down
16 changes: 8 additions & 8 deletions arch/m68k/mm/init.c
Expand Up @@ -40,10 +40,6 @@
void *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);

#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
extern void init_pointer_table(unsigned long ptable);
#endif

#ifdef CONFIG_MMU

pg_data_t pg_data_map[MAX_NUMNODES];
Expand Down Expand Up @@ -127,12 +123,16 @@ static inline void init_pointer_tables(void)
int i;

/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
init_pointer_table(kernel_pg_dir, TABLE_PGD);
for (i = 0; i < PTRS_PER_PGD; i++) {
pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
pud_t *pud = (pud_t *)&kernel_pg_dir[i];
pmd_t *pmd_dir;

if (!pud_present(*pud))
continue;

if (pud_present(*pud))
init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i]));
pmd_dir = (pmd_t *)pgd_page_vaddr(kernel_pg_dir[i]);
init_pointer_table(pmd_dir, TABLE_PMD);
}
#endif
}
Expand Down
65 changes: 43 additions & 22 deletions arch/m68k/mm/motorola.c
Expand Up @@ -72,24 +72,35 @@ void mmu_page_dtor(void *page)
arch/sparc/mm/srmmu.c ... */

typedef struct list_head ptable_desc;
static LIST_HEAD(ptable_list);

static struct list_head ptable_list[2] = {
LIST_HEAD_INIT(ptable_list[0]),
LIST_HEAD_INIT(ptable_list[1]),
};

#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)

static const int ptable_shift[2] = {
7+2, /* PGD, PMD */
6+2, /* PTE */
};

#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
#define ptable_size(type) (1U << ptable_shift[type])
#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)

void __init init_pointer_table(unsigned long ptable)
void __init init_pointer_table(void *table, int type)
{
ptable_desc *dp;
unsigned long ptable = (unsigned long)table;
unsigned long page = ptable & PAGE_MASK;
unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
unsigned int mask = 1U << ((ptable - page)/ptable_size(type));

dp = PD_PTABLE(page);
if (!(PD_MARKBITS(dp) & mask)) {
PD_MARKBITS(dp) = 0xff;
list_add(dp, &ptable_list);
PD_MARKBITS(dp) = ptable_mask(type);
list_add(dp, &ptable_list[type]);
}

PD_MARKBITS(dp) &= ~mask;
Expand All @@ -102,12 +113,11 @@ void __init init_pointer_table(unsigned long ptable)
return;
}

pmd_t *get_pointer_table (void)
void *get_pointer_table(int type)
{
ptable_desc *dp = ptable_list.next;
unsigned char mask = PD_MARKBITS (dp);
unsigned char tmp;
unsigned int off;
ptable_desc *dp = ptable_list[type].next;
unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
unsigned int tmp, off;

/*
* For a pointer table for a user process address space, a
Expand All @@ -122,49 +132,60 @@ pmd_t *get_pointer_table (void)
if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
return NULL;

if (type == TABLE_PTE) {
/*
* m68k doesn't have SPLIT_PTE_PTLOCKS for not having
* SMP.
*/
pgtable_pte_page_ctor(virt_to_page(page));
}

mmu_page_ctor(page);

new = PD_PTABLE(page);
PD_MARKBITS(new) = 0xfe;
PD_MARKBITS(new) = ptable_mask(type) - 1;
list_add_tail(new, dp);

return (pmd_t *)page;
}

for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
;
PD_MARKBITS(dp) = mask & ~tmp;
if (!PD_MARKBITS(dp)) {
/* move to end of list */
list_move_tail(dp, &ptable_list);
list_move_tail(dp, &ptable_list[type]);
}
return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
return page_address(PD_PAGE(dp)) + off;
}

int free_pointer_table (pmd_t *ptable)
int free_pointer_table(void *table, int type)
{
ptable_desc *dp;
unsigned long page = (unsigned long)ptable & PAGE_MASK;
unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
unsigned long ptable = (unsigned long)table;
unsigned long page = ptable & PAGE_MASK;
unsigned int mask = 1U << ((ptable - page)/ptable_size(type));

dp = PD_PTABLE(page);
if (PD_MARKBITS (dp) & mask)
panic ("table already free!");

PD_MARKBITS (dp) |= mask;

if (PD_MARKBITS(dp) == 0xff) {
if (PD_MARKBITS(dp) == ptable_mask(type)) {
/* all tables in page are free, free page */
list_del(dp);
mmu_page_dtor((void *)page);
if (type == TABLE_PTE)
pgtable_pte_page_dtor(virt_to_page(page));
free_page (page);
return 1;
} else if (ptable_list.next != dp) {
} else if (ptable_list[type].next != dp) {
/*
* move this descriptor to the front of the list, since
* it has one or more free tables.
*/
list_move(dp, &ptable_list);
list_move(dp, &ptable_list[type]);
}
return 0;
}
Expand Down

0 comments on commit 0e071ee

Please sign in to comment.