Skip to content

Commit

Permalink
mm: stack based kmap_atomic()
Browse files Browse the repository at this point in the history
Keep the current interface but ignore the KM_type and use a stack based
approach.

The advantage is that we get rid of crappy code like:

	#define __KM_PTE			\
		(in_nmi() ? KM_NMI_PTE : 	\
		 in_irq() ? KM_IRQ_PTE :	\
		 KM_PTE0)

and in general can stop worrying about what context we're in and what kmap
slots might be appropriate for that.

The downside is that FRV kmap_atomic() gets more expensive.

For now we use a CPP trick suggested by Andrew:

  #define kmap_atomic(page, args...) __kmap_atomic(page)

to avoid having to touch all kmap_atomic() users in a single patch.

[ not compiled on:
  - mn10300: the arch doesn't actually build with highmem to begin with ]

[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c]
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Peter Zijlstra authored and torvalds committed Oct 26, 2010
1 parent 61ecdb8 commit 3e4d3af
Show file tree
Hide file tree
Showing 28 changed files with 367 additions and 372 deletions.
6 changes: 3 additions & 3 deletions arch/arm/include/asm/highmem.h
Expand Up @@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page, enum km_type type);
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
extern void *__kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr);
#endif

Expand Down
23 changes: 14 additions & 9 deletions arch/arm/mm/highmem.c
Expand Up @@ -36,18 +36,17 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);

void *kmap_atomic(struct page *page, enum km_type type)
void *__kmap_atomic(struct page *page)
{
unsigned int idx;
unsigned long vaddr;
void *kmap;
int type;

pagefault_disable();
if (!PageHighMem(page))
return page_address(page);

debug_kmap_atomic(type);

#ifdef CONFIG_DEBUG_HIGHMEM
/*
* There is no cache coherency issue when non VIVT, so force the
Expand All @@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
if (kmap)
return kmap;

type = kmap_atomic_idx_push();

idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
Expand All @@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)

return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(__kmap_atomic);

void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
int idx, type;

if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx_pop();
idx = type + KM_TYPE_NR * smp_processor_id();

if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
Expand All @@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
}
pagefault_enable();
}
EXPORT_SYMBOL(kunmap_atomic_notypecheck);
EXPORT_SYMBOL(__kunmap_atomic);

void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
void *kmap_atomic_pfn(unsigned long pfn)
{
unsigned int idx;
unsigned long vaddr;
int idx, type;

pagefault_disable();

type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
Expand Down
25 changes: 5 additions & 20 deletions arch/frv/include/asm/highmem.h
Expand Up @@ -112,27 +112,18 @@ extern struct page *kmap_atomic_to_page(void *ptr);
(void *) damlr; \
})

static inline void *kmap_atomic(struct page *page, enum km_type type)
static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
{
unsigned long paddr;

pagefault_disable();
debug_kmap_atomic(type);
paddr = page_to_phys(page);

switch (type) {
case 0: return __kmap_atomic_primary(0, paddr, 2);
case 1: return __kmap_atomic_primary(1, paddr, 3);
case 2: return __kmap_atomic_primary(2, paddr, 4);
case 3: return __kmap_atomic_primary(3, paddr, 5);
case 4: return __kmap_atomic_primary(4, paddr, 6);
case 5: return __kmap_atomic_primary(5, paddr, 7);
case 6: return __kmap_atomic_primary(6, paddr, 8);
case 7: return __kmap_atomic_primary(7, paddr, 9);
case 8: return __kmap_atomic_primary(8, paddr, 10);

case 9 ... 9 + NR_TLB_LINES - 1:
return __kmap_atomic_secondary(type - 9, paddr);

default:
BUG();
Expand All @@ -152,29 +143,23 @@ do { \
asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
} while(0)

static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
{
switch (type) {
case 0: __kunmap_atomic_primary(0, 2); break;
case 1: __kunmap_atomic_primary(1, 3); break;
case 2: __kunmap_atomic_primary(2, 4); break;
case 3: __kunmap_atomic_primary(3, 5); break;
case 4: __kunmap_atomic_primary(4, 6); break;
case 5: __kunmap_atomic_primary(5, 7); break;
case 6: __kunmap_atomic_primary(6, 8); break;
case 7: __kunmap_atomic_primary(7, 9); break;
case 8: __kunmap_atomic_primary(8, 10); break;

case 9 ... 9 + NR_TLB_LINES - 1:
__kunmap_atomic_secondary(type - 9, kvaddr);
break;

default:
BUG();
}
pagefault_enable();
}

void *__kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);

#endif /* !__ASSEMBLY__ */

#endif /* __KERNEL__ */
Expand Down
4 changes: 2 additions & 2 deletions arch/frv/mb93090-mb00/pci-dma.c
Expand Up @@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
dampr2 = __get_DAMPR(2);

for (i = 0; i < nents; i++) {
vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE);
vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);

frv_dcache_writeback((unsigned long) vaddr,
(unsigned long) vaddr + PAGE_SIZE);

}

kunmap_atomic(vaddr, __KM_CACHE);
kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) {
__set_DAMPR(2, dampr2);
__set_IAMPR(2, dampr2);
Expand Down
8 changes: 4 additions & 4 deletions arch/frv/mm/cache-page.c
Expand Up @@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)

dampr2 = __get_DAMPR(2);

vaddr = kmap_atomic(page, __KM_CACHE);
vaddr = kmap_atomic_primary(page, __KM_CACHE);

frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);

kunmap_atomic(vaddr, __KM_CACHE);
kunmap_atomic_primary(vaddr, __KM_CACHE);

if (dampr2) {
__set_DAMPR(2, dampr2);
Expand All @@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,

dampr2 = __get_DAMPR(2);

vaddr = kmap_atomic(page, __KM_CACHE);
vaddr = kmap_atomic_primary(page, __KM_CACHE);

start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
frv_cache_wback_inv(start, start + len);

kunmap_atomic(vaddr, __KM_CACHE);
kunmap_atomic_primary(vaddr, __KM_CACHE);

if (dampr2) {
__set_DAMPR(2, dampr2);
Expand Down
50 changes: 50 additions & 0 deletions arch/frv/mm/highmem.c
Expand Up @@ -36,3 +36,53 @@ struct page *kmap_atomic_to_page(void *ptr)
{
return virt_to_page(ptr);
}

void *__kmap_atomic(struct page *page)
{
unsigned long paddr;
int type;

pagefault_disable();
type = kmap_atomic_idx_push();
paddr = page_to_phys(page);

switch (type) {
/*
* The first 4 primary maps are reserved for architecture code
*/
case 0: return __kmap_atomic_primary(4, paddr, 6);
case 1: return __kmap_atomic_primary(5, paddr, 7);
case 2: return __kmap_atomic_primary(6, paddr, 8);
case 3: return __kmap_atomic_primary(7, paddr, 9);
case 4: return __kmap_atomic_primary(8, paddr, 10);

case 5 ... 5 + NR_TLB_LINES - 1:
return __kmap_atomic_secondary(type - 5, paddr);

default:
BUG();
return NULL;
}
}
EXPORT_SYMBOL(__kmap_atomic);

void __kunmap_atomic(void *kvaddr)
{
int type = kmap_atomic_idx_pop();
switch (type) {
case 0: __kunmap_atomic_primary(4, 6); break;
case 1: __kunmap_atomic_primary(5, 7); break;
case 2: __kunmap_atomic_primary(6, 8); break;
case 3: __kunmap_atomic_primary(7, 9); break;
case 4: __kunmap_atomic_primary(8, 10); break;

case 5 ... 5 + NR_TLB_LINES - 1:
__kunmap_atomic_secondary(type - 5, kvaddr);
break;

default:
BUG();
}
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
18 changes: 6 additions & 12 deletions arch/mips/include/asm/highmem.h
Expand Up @@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table;
extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page);

extern void *__kmap(struct page *page);
extern void __kunmap(struct page *page);
extern void *__kmap_atomic(struct page *page, enum km_type type);
extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
extern struct page *__kmap_atomic_to_page(void *ptr);

#define kmap __kmap
#define kunmap __kunmap
#define kmap_atomic __kmap_atomic
#define kunmap_atomic_notypecheck __kunmap_atomic_notypecheck
#define kmap_atomic_to_page __kmap_atomic_to_page
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern void *__kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr);

#define flush_cache_kmaps() flush_cache_all()

Expand Down
50 changes: 27 additions & 23 deletions arch/mips/mm/highmem.c
Expand Up @@ -9,7 +9,7 @@ static pte_t *kmap_pte;

unsigned long highstart_pfn, highend_pfn;

void *__kmap(struct page *page)
void *kmap(struct page *page)
{
void *addr;

Expand All @@ -21,16 +21,16 @@ void *__kmap(struct page *page)

return addr;
}
EXPORT_SYMBOL(__kmap);
EXPORT_SYMBOL(kmap);

void __kunmap(struct page *page)
void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
EXPORT_SYMBOL(__kunmap);
EXPORT_SYMBOL(kunmap);

/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
Expand All @@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
* kmaps are appropriate for short, tight code paths only.
*/

void *__kmap_atomic(struct page *page, enum km_type type)
void *__kmap_atomic(struct page *page)
{
enum fixed_addresses idx;
unsigned long vaddr;
int idx, type;

/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);

debug_kmap_atomic(type);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
Expand All @@ -64,43 +64,47 @@ void *__kmap_atomic(struct page *page, enum km_type type)
}
EXPORT_SYMBOL(__kmap_atomic);

void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
void __kunmap_atomic(void *kvaddr)
{
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
int type;

if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
return;
}

BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
type = kmap_atomic_idx_pop();
#ifdef CONFIG_DEBUG_HIGHMEM
{
int idx = type + KM_TYPE_NR * smp_processor_id();

/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_one(vaddr);
#endif
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));

/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_one(vaddr);
}
#endif
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic_notypecheck);
EXPORT_SYMBOL(__kunmap_atomic);

/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
void *kmap_atomic_pfn(unsigned long pfn)
{
enum fixed_addresses idx;
unsigned long vaddr;
int idx, type;

pagefault_disable();

debug_kmap_atomic(type);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
Expand All @@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
return (void*) vaddr;
}

struct page *__kmap_atomic_to_page(void *ptr)
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
Expand Down

0 comments on commit 3e4d3af

Please sign in to comment.