Skip to content

Commit

Permalink
Merge tag 'xtensa-20180225' of git://github.com/jcmvbkbc/linux-xtensa
Browse files Browse the repository at this point in the history
Pull Xtensa fixes from Max Filippov:
 "Two fixes for reserved memory/DMA buffers allocation in high memory on
  xtensa architecture

   - fix memory accounting when reserved memory is in high memory region

   - fix DMA allocation from high memory"

* tag 'xtensa-20180225' of git://github.com/jcmvbkbc/linux-xtensa:
  xtensa: support DMA buffers in high memory
  xtensa: fix high memory/reserved memory collision
  • Loading branch information
torvalds committed Feb 26, 2018
2 parents c23a757 + 6137e41 commit e1171ac
Show file tree
Hide file tree
Showing 2 changed files with 93 additions and 17 deletions.
40 changes: 30 additions & 10 deletions arch/xtensa/kernel/pci-dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
*/

#include <linux/dma-contiguous.h>
#include <linux/dma-direct.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/mm.h>
Expand Down Expand Up @@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
unsigned long ret;
unsigned long uncached = 0;
unsigned long uncached;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;

Expand All @@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
if (!page)
return NULL;

ret = (unsigned long)page_address(page);
*handle = phys_to_dma(dev, page_to_phys(page));

/* We currently don't support coherent memory outside KSEG */
#ifdef CONFIG_MMU
if (PageHighMem(page)) {
void *p;

p = dma_common_contiguous_remap(page, size, VM_MAP,
pgprot_noncached(PAGE_KERNEL),
__builtin_return_address(0));
if (!p) {
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
}
return p;
}
#endif
ret = (unsigned long)page_address(page);
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);

uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
*handle = virt_to_bus((void *)ret);
__invalidate_dcache_range(ret, size);

return (void *)uncached;
Expand All @@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long)vaddr +
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
struct page *page = virt_to_page(addr);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;

BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
unsigned long addr = (unsigned long)vaddr;
struct page *page;

if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
page = virt_to_page(addr);
} else {
#ifdef CONFIG_MMU
dma_common_free_remap(vaddr, size, VM_MAP);
#endif
page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
}

if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
Expand Down
70 changes: 63 additions & 7 deletions arch/xtensa/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,19 +79,75 @@ void __init zones_init(void)
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}

#ifdef CONFIG_HIGHMEM
static void __init free_area_high(unsigned long pfn, unsigned long end)
{
for (; pfn < end; pfn++)
free_highmem_page(pfn_to_page(pfn));
}

static void __init free_highpages(void)
{
unsigned long max_low = max_low_pfn;
struct memblock_region *mem, *res;

reset_all_zones_managed_pages();
/* set highmem page free */
for_each_memblock(memory, mem) {
unsigned long start = memblock_region_memory_base_pfn(mem);
unsigned long end = memblock_region_memory_end_pfn(mem);

/* Ignore complete lowmem entries */
if (end <= max_low)
continue;

if (memblock_is_nomap(mem))
continue;

/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;

/* Find and exclude any reserved regions */
for_each_memblock(reserved, res) {
unsigned long res_start, res_end;

res_start = memblock_region_reserved_base_pfn(res);
res_end = memblock_region_reserved_end_pfn(res);

if (res_end < start)
continue;
if (res_start < start)
res_start = start;
if (res_start > end)
res_start = end;
if (res_end > end)
res_end = end;
if (res_start != start)
free_area_high(start, res_start);
start = res_end;
if (start == end)
break;
}

/* And now free anything which remains */
if (start < end)
free_area_high(start, end);
}
}
#else
static void __init free_highpages(void)
{
}
#endif

/*
* Initialize memory pages.
*/

void __init mem_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;

reset_all_zones_managed_pages();
for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
free_highpages();

max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
Expand Down

0 comments on commit e1171ac

Please sign in to comment.