Skip to content

Commit 122ea29

Browse files
committed
Merge remote-tracking branch 'xen-tip/linux-next'
2 parents 42ee0b7 + 63401a5 commit 122ea29

File tree

11 files changed

+213
-224
lines changed

11 files changed

+213
-224
lines changed

arch/arm/include/asm/xen/page-coherent.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,11 @@
55
#include <linux/dma-attrs.h>
66
#include <linux/dma-mapping.h>
77

8+
static inline bool xen_is_dma_coherent(struct device *dev)
9+
{
10+
return (__generic_dma_ops(dev) == &arm_coherent_dma_ops);
11+
}
12+
813
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
914
dma_addr_t *dma_handle, gfp_t flags,
1015
struct dma_attrs *attrs)

arch/arm/include/asm/xen/page.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
107107
#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
108108
#define xen_unmap(cookie) iounmap((cookie))
109109

110+
bool xen_arch_need_swiotlb(struct device *dev,
111+
unsigned long pfn,
112+
unsigned long mfn);
113+
110114
#endif /* _ASM_ARM_XEN_PAGE_H */

arch/arm/xen/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
1+
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o

arch/arm/xen/enlighten.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -261,11 +261,6 @@ static int __init xen_guest_init(void)
261261

262262
xen_setup_features();
263263

264-
if (!xen_feature(XENFEAT_grant_map_identity)) {
265-
pr_warn("Please upgrade your Xen.\n"
266-
"If your platform has any non-coherent DMA devices, they won't work properly.\n");
267-
}
268-
269264
if (xen_feature(XENFEAT_dom0))
270265
xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
271266
else

arch/arm/xen/mm.c

Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
1+
#include <linux/cpu.h>
2+
#include <linux/dma-mapping.h>
13
#include <linux/bootmem.h>
24
#include <linux/gfp.h>
5+
#include <linux/highmem.h>
36
#include <linux/export.h>
47
#include <linux/slab.h>
58
#include <linux/types.h>
@@ -8,14 +11,163 @@
811
#include <linux/swiotlb.h>
912

1013
#include <xen/xen.h>
14+
#include <xen/interface/grant_table.h>
1115
#include <xen/interface/memory.h>
1216
#include <xen/swiotlb-xen.h>
1317

1418
#include <asm/cacheflush.h>
1519
#include <asm/xen/page.h>
20+
#include <asm/xen/page-coherent.h>
1621
#include <asm/xen/hypercall.h>
1722
#include <asm/xen/interface.h>
1823

24+
25+
#ifdef CONFIG_ARM64
26+
static inline void dmac_map_area(const void *start, size_t size, int dir)
27+
{
28+
return __dma_map_area(start, size, dir);
29+
}
30+
31+
static inline void dmac_unmap_area(const void *start, size_t size, int dir)
32+
{
33+
return __dma_unmap_area(start, size, dir);
34+
}
35+
36+
static inline bool cache_is_vipt_nonaliasing(void)
37+
{
38+
return true;
39+
}
40+
41+
static inline void *kmap_high_get(struct page *page)
42+
{
43+
return NULL;
44+
}
45+
46+
static inline void kunmap_high(struct page *page) {}
47+
#endif
48+
49+
static bool hypercall_cflush = false;
50+
51+
52+
/* functions called by SWIOTLB */
53+
54+
static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
55+
size_t size, enum dma_data_direction dir,
56+
void (*op)(const void *, size_t, int))
57+
{
58+
unsigned long pfn;
59+
size_t left = size;
60+
61+
pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
62+
offset %= PAGE_SIZE;
63+
64+
do {
65+
size_t len = left;
66+
void *vaddr;
67+
68+
/* buffers in highmem or foreign pages cannot cross page
69+
* boundaries */
70+
if (len + offset > PAGE_SIZE)
71+
len = PAGE_SIZE - offset;
72+
73+
if (!pfn_valid(pfn))
74+
{
75+
struct gnttab_cache_flush cflush;
76+
77+
cflush.op = 0;
78+
cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
79+
cflush.offset = offset;
80+
cflush.length = len;
81+
82+
if (op == dmac_unmap_area && dir != DMA_TO_DEVICE)
83+
cflush.op = GNTTAB_CACHE_INVAL;
84+
if (op == dmac_map_area) {
85+
if (dir == DMA_FROM_DEVICE)
86+
cflush.op = GNTTAB_CACHE_INVAL;
87+
else
88+
cflush.op = GNTTAB_CACHE_CLEAN;
89+
}
90+
if (cflush.op)
91+
HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
92+
} else {
93+
struct page *page = pfn_to_page(pfn);
94+
95+
if (PageHighMem(page)) {
96+
if (cache_is_vipt_nonaliasing()) {
97+
vaddr = kmap_atomic(page);
98+
op(vaddr + offset, len, dir);
99+
kunmap_atomic(vaddr);
100+
} else {
101+
vaddr = kmap_high_get(page);
102+
if (vaddr) {
103+
op(vaddr + offset, len, dir);
104+
kunmap_high(page);
105+
}
106+
}
107+
} else {
108+
vaddr = page_address(page) + offset;
109+
op(vaddr, len, dir);
110+
}
111+
}
112+
113+
offset = 0;
114+
pfn++;
115+
left -= len;
116+
} while (left);
117+
}
118+
119+
static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
120+
size_t size, enum dma_data_direction dir)
121+
{
122+
/* Cannot use __dma_page_dev_to_cpu because we don't have a
123+
* struct page for handle */
124+
125+
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
126+
}
127+
128+
static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
129+
size_t size, enum dma_data_direction dir)
130+
{
131+
132+
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
133+
}
134+
135+
void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
136+
size_t size, enum dma_data_direction dir,
137+
struct dma_attrs *attrs)
138+
139+
{
140+
if (xen_is_dma_coherent(hwdev))
141+
return;
142+
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
143+
return;
144+
145+
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
146+
}
147+
148+
void xen_dma_sync_single_for_cpu(struct device *hwdev,
149+
dma_addr_t handle, size_t size, enum dma_data_direction dir)
150+
{
151+
if (xen_is_dma_coherent(hwdev))
152+
return;
153+
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
154+
}
155+
156+
void xen_dma_sync_single_for_device(struct device *hwdev,
157+
dma_addr_t handle, size_t size, enum dma_data_direction dir)
158+
{
159+
if (xen_is_dma_coherent(hwdev))
160+
return;
161+
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
162+
}
163+
164+
bool xen_arch_need_swiotlb(struct device *dev,
165+
unsigned long pfn,
166+
unsigned long mfn)
167+
{
168+
return (!hypercall_cflush && (pfn != mfn) && !xen_is_dma_coherent(dev));
169+
}
170+
19171
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
20172
unsigned int address_bits,
21173
dma_addr_t *dma_handle)
@@ -56,10 +208,18 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
56208

57209
int __init xen_mm_init(void)
58210
{
211+
struct gnttab_cache_flush cflush;
59212
if (!xen_initial_domain())
60213
return 0;
61214
xen_swiotlb_init(1, false);
62215
xen_dma_ops = &xen_swiotlb_dma_ops;
216+
217+
cflush.op = 0;
218+
cflush.a.dev_bus_addr = 0;
219+
cflush.offset = 0;
220+
cflush.length = 0;
221+
if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
222+
hypercall_cflush = true;
63223
return 0;
64224
}
65225
arch_initcall(xen_mm_init);

0 commit comments

Comments
 (0)