diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index a98940e6424327..0ed18e673aba8b 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -36,6 +36,7 @@ config PARISC select GENERIC_ATOMIC64 if !64BIT select GENERIC_IRQ_PROBE select GENERIC_PCI_IOMAP + select GENERIC_IOREMAP select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_SMP_IDLE_THREAD select GENERIC_ARCH_TOPOLOGY if SMP diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index c05e781be2f5bd..1c54f83d4f7899 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -2,6 +2,8 @@ #ifndef _ASM_IO_H #define _ASM_IO_H +#define ARCH_HAS_IOREMAP_WC + #include #include @@ -125,12 +127,19 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr) /* * The standard PCI ioremap interfaces */ -void __iomem *ioremap(unsigned long offset, unsigned long size); -#define ioremap_wc ioremap -#define ioremap_uc ioremap -#define pci_iounmap pci_iounmap +void __iomem * +arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val); +#define arch_ioremap arch_ioremap + +#define _PAGE_IOREMAP (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_NO_CACHE) -extern void iounmap(const volatile void __iomem *addr); +#define ioremap_wc(addr, size) \ + ioremap_prot((addr), (size), _PAGE_IOREMAP) +#define ioremap_uc(addr, size) \ + ioremap_prot((addr), (size), _PAGE_IOREMAP) + +#define pci_iounmap pci_iounmap void memset_io(volatile void __iomem *addr, unsigned char val, int count); void memcpy_fromio(void *dst, const volatile void __iomem *src, int count); diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index 345ff0b6649935..28884757fad020 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -13,38 +13,19 @@ #include #include -/* - * Generic mapping function (not visible outside): - */ - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void __iomem *ioremap(unsigned long phys_addr, unsigned long size) +void __iomem * +arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val) { - void __iomem *addr; - struct vm_struct *area; - unsigned long offset, last_addr; - pgprot_t pgprot; + phys_addr_t phys_addr = *paddr; #ifdef CONFIG_EISA unsigned long end = phys_addr + size - 1; /* Support EISA addresses */ if ((phys_addr >= 0x00080000 && end < 0x000fffff) || (phys_addr >= 0x00500000 && end < 0x03bfffff)) - phys_addr |= F_EXTEND(0xfc000000); + *paddr = phys_addr |= F_EXTEND(0xfc000000); #endif - /* Don't allow wraparound or zero size */ - last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr) - return NULL; - /* * Don't allow anybody to remap normal RAM that we're using.. */ @@ -58,43 +39,9 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size) for (page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) { if(!PageReserved(page)) - return NULL; + return IOMEM_ERR_PTR(-EINVAL); } } - pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | - _PAGE_ACCESSED | _PAGE_NO_CACHE); - - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr + 1) - phys_addr; - - /* - * Ok, go for it.. - */ - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - - addr = (void __iomem *) area->addr; - if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, - phys_addr, pgprot)) { - vunmap(addr); - return NULL; - } - - return (void __iomem *) (offset + (char __iomem *)addr); -} -EXPORT_SYMBOL(ioremap); - -void iounmap(const volatile void __iomem *io_addr) -{ - unsigned long addr = (unsigned long)io_addr & PAGE_MASK; - - if (is_vmalloc_addr((void *)addr)) - vunmap((void *)addr); + return NULL; } -EXPORT_SYMBOL(iounmap);