Skip to content

Commit

Permalink
xtensa: mm: Convert to GENERIC_IOREMAP
Browse files Browse the repository at this point in the history
By taking GENERIC_IOREMAP method, the generic ioremap_prot() and
iounmap() are visible and available to arch. Arch only needs to
provide implementation of arch_ioremap() or arch_iounmap() if there's
arch specific handling needed in its ioremap() or iounmap(). This
change will simplify implementation by removing duplicated codes with
generic ioremap() and iounmap(), and has the equivalent functioality
as before.

For xtensa, add hooks arch_ioremap() and arch_iounmap() for xtensa's
special operation when ioremap() and iounmap(). Then define and
implement its own ioremap() and ioremap_cache() via ioremap_prot().

Signed-off-by: Baoquan He <bhe@redhat.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: linux-xtensa@linux-xtensa.org
  • Loading branch information
Baoquan He authored and intel-lab-lkp committed Oct 9, 2022
1 parent d6334f0 commit 1330d43
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 66 deletions.
1 change: 1 addition & 0 deletions arch/xtensa/Kconfig
Expand Up @@ -29,6 +29,7 @@ config XTENSA
select GENERIC_LIB_UCMPDI2
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select GENERIC_IOREMAP if MMU
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
Expand Down
39 changes: 17 additions & 22 deletions arch/xtensa/include/asm/io.h
Expand Up @@ -16,30 +16,37 @@
#include <asm/vectors.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/pgtable.h>

#include <linux/types.h>

#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
#define IO_SPACE_LIMIT ~0
#define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR)

#ifdef CONFIG_MMU

void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
void xtensa_iounmap(volatile void __iomem *addr);

/*
* Return the virtual address for the specified bus memory.
* I/O memory mapping functions.
*/
void __iomem *
arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val);
#define arch_ioremap arch_ioremap

bool arch_iounmap(void __iomem *addr);
#define arch_iounmap arch_iounmap

void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
unsigned long prot);

static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
if (offset >= XCHAL_KIO_PADDR
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
else
return xtensa_ioremap_nocache(offset, size);
return ioremap_prot(offset, size,
pgprot_val(pgprot_noncached(PAGE_KERNEL)));
}
#define ioremap ioremap

static inline void __iomem *ioremap_cache(unsigned long offset,
unsigned long size)
Expand All @@ -48,22 +55,10 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
else
return xtensa_ioremap_cache(offset, size);
}
#define ioremap_cache ioremap_cache

static inline void iounmap(volatile void __iomem *addr)
{
unsigned long va = (unsigned long) addr;
return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL));

if (!(va >= XCHAL_KIO_CACHED_VADDR &&
va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
!(va >= XCHAL_KIO_BYPASS_VADDR &&
va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
xtensa_iounmap(addr);
}

#endif /* CONFIG_MMU */
#define ioremap_cache ioremap_cache

#include <asm-generic/io.h>

Expand Down
56 changes: 12 additions & 44 deletions arch/xtensa/mm/ioremap.c
Expand Up @@ -6,60 +6,28 @@
*/

#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/io.h>

static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
pgprot_t prot)
void __iomem *
arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val)
{
unsigned long offset = paddr & ~PAGE_MASK;
unsigned long pfn = __phys_to_pfn(paddr);
struct vm_struct *area;
unsigned long vaddr;
int err;

paddr &= PAGE_MASK;

unsigned long pfn = __phys_to_pfn((*paddr));
WARN_ON(pfn_valid(pfn));

size = PAGE_ALIGN(offset + size);

area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;

vaddr = (unsigned long)area->addr;
area->phys_addr = paddr;

err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);

if (err) {
vunmap((void *)vaddr);
return NULL;
}

flush_cache_vmap(vaddr, vaddr + size);
return (void __iomem *)(offset + vaddr);
}

void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size)
{
return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL));
return NULL;
}
EXPORT_SYMBOL(xtensa_ioremap_nocache);

void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size)
bool arch_iounmap(void __iomem *addr)
{
return xtensa_ioremap(addr, size, PAGE_KERNEL);
}
EXPORT_SYMBOL(xtensa_ioremap_cache);
unsigned long va = (unsigned long) addr;

void xtensa_iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
if ((va >= XCHAL_KIO_CACHED_VADDR &&
va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) ||
(va >= XCHAL_KIO_BYPASS_VADDR &&
va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
return false;

vunmap(addr);
return true;
}
EXPORT_SYMBOL(xtensa_iounmap);

0 comments on commit 1330d43

Please sign in to comment.