Skip to content

Commit a254738

Browse files
committed
drivers: dma-contiguous: clean source code and prepare for device tree
This patch cleans the initialization of dma contiguous framework. The all-in-one dma_declare_contiguous() function is now separated into dma_contiguous_reserve_area() which only steals the the memory from memblock allocator and dma_contiguous_add_device() function, which assigns given device to the specified reserved memory area. This improves the flexibility in defining contiguous memory areas and assigning device to them, because now it is possible to assign more than one device to the given contiguous memory area. Such split in initialization procedure is also required for upcoming device tree support. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Acked-by: Tomasz Figa <t.figa@samsung.com>
1 parent f7d8f1e commit a254738

File tree

6 files changed

+105
-108
lines changed

6 files changed

+105
-108
lines changed

arch/arm/include/asm/dma-contiguous.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
#ifdef CONFIG_DMA_CMA
66

77
#include <linux/types.h>
8-
#include <asm-generic/dma-contiguous.h>
98

109
void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
1110

arch/x86/include/asm/dma-contiguous.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
#ifdef __KERNEL__
55

66
#include <linux/types.h>
7-
#include <asm-generic/dma-contiguous.h>
87

98
static inline void
109
dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }

drivers/base/dma-contiguous.c

Lines changed: 44 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
9696
#endif
9797

9898
/**
99-
* dma_contiguous_reserve() - reserve area for contiguous memory handling
99+
* dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
100100
* @limit: End address of the reserved memory (optional, 0 for any).
101101
*
102102
* This function reserves memory from early allocator. It should be
@@ -124,22 +124,29 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
124124
#endif
125125
}
126126

127-
if (selected_size) {
127+
if (selected_size && !dma_contiguous_default_area) {
128128
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
129129
(unsigned long)selected_size / SZ_1M);
130130

131-
dma_declare_contiguous(NULL, selected_size, 0, limit);
131+
dma_contiguous_reserve_area(selected_size, 0, limit,
132+
&dma_contiguous_default_area);
132133
}
133134
};
134135

135136
static DEFINE_MUTEX(cma_mutex);
136137

137-
static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
138+
static int __init cma_activate_area(struct cma *cma)
138139
{
139-
unsigned long pfn = base_pfn;
140-
unsigned i = count >> pageblock_order;
140+
int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
141+
unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
142+
unsigned i = cma->count >> pageblock_order;
141143
struct zone *zone;
142144

145+
cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
146+
147+
if (!cma->bitmap)
148+
return -ENOMEM;
149+
143150
WARN_ON_ONCE(!pfn_valid(pfn));
144151
zone = page_zone(pfn_to_page(pfn));
145152

@@ -153,92 +160,53 @@ static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
153160
}
154161
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
155162
} while (--i);
156-
return 0;
157-
}
158-
159-
static __init struct cma *cma_create_area(unsigned long base_pfn,
160-
unsigned long count)
161-
{
162-
int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
163-
struct cma *cma;
164-
int ret = -ENOMEM;
165-
166-
pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
167-
168-
cma = kmalloc(sizeof *cma, GFP_KERNEL);
169-
if (!cma)
170-
return ERR_PTR(-ENOMEM);
171-
172-
cma->base_pfn = base_pfn;
173-
cma->count = count;
174-
cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
175163

176-
if (!cma->bitmap)
177-
goto no_mem;
178-
179-
ret = cma_activate_area(base_pfn, count);
180-
if (ret)
181-
goto error;
182-
183-
pr_debug("%s: returned %p\n", __func__, (void *)cma);
184-
return cma;
185-
186-
error:
187-
kfree(cma->bitmap);
188-
no_mem:
189-
kfree(cma);
190-
return ERR_PTR(ret);
164+
return 0;
191165
}
192166

193-
static struct cma_reserved {
194-
phys_addr_t start;
195-
unsigned long size;
196-
struct device *dev;
197-
} cma_reserved[MAX_CMA_AREAS] __initdata;
198-
static unsigned cma_reserved_count __initdata;
167+
static struct cma cma_areas[MAX_CMA_AREAS];
168+
static unsigned cma_area_count;
199169

200170
static int __init cma_init_reserved_areas(void)
201171
{
202-
struct cma_reserved *r = cma_reserved;
203-
unsigned i = cma_reserved_count;
204-
205-
pr_debug("%s()\n", __func__);
172+
int i;
206173

207-
for (; i; --i, ++r) {
208-
struct cma *cma;
209-
cma = cma_create_area(PFN_DOWN(r->start),
210-
r->size >> PAGE_SHIFT);
211-
if (!IS_ERR(cma))
212-
dev_set_cma_area(r->dev, cma);
174+
for (i = 0; i < cma_area_count; i++) {
175+
int ret = cma_activate_area(&cma_areas[i]);
176+
if (ret)
177+
return ret;
213178
}
179+
214180
return 0;
215181
}
216182
core_initcall(cma_init_reserved_areas);
217183

218184
/**
219-
* dma_declare_contiguous() - reserve area for contiguous memory handling
220-
* for particular device
221-
* @dev: Pointer to device structure.
222-
* @size: Size of the reserved memory.
223-
* @base: Start address of the reserved memory (optional, 0 for any).
185+
* dma_contiguous_reserve_area() - reserve custom contiguous area
186+
* @size: Size of the reserved area (in bytes),
187+
* @base: Base address of the reserved area optional, use 0 for any
224188
* @limit: End address of the reserved memory (optional, 0 for any).
189+
* @res_cma: Pointer to store the created cma region.
225190
*
226-
* This function reserves memory for specified device. It should be
227-
* called by board specific code when early allocator (memblock or bootmem)
228-
* is still activate.
191+
* This function reserves memory from early allocator. It should be
192+
* called by arch specific code once the early allocator (memblock or bootmem)
193+
* has been activated and all other subsystems have already allocated/reserved
194+
* memory. This function allows to create custom reserved areas for specific
195+
* devices.
229196
*/
230-
int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
231-
phys_addr_t base, phys_addr_t limit)
197+
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
198+
phys_addr_t limit, struct cma **res_cma)
232199
{
233-
struct cma_reserved *r = &cma_reserved[cma_reserved_count];
200+
struct cma *cma = &cma_areas[cma_area_count];
234201
phys_addr_t alignment;
202+
int ret = 0;
235203

236204
pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
237205
(unsigned long)size, (unsigned long)base,
238206
(unsigned long)limit);
239207

240208
/* Sanity checks */
241-
if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
209+
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
242210
pr_err("Not enough slots for CMA reserved regions!\n");
243211
return -ENOSPC;
244212
}
@@ -256,7 +224,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
256224
if (base) {
257225
if (memblock_is_region_reserved(base, size) ||
258226
memblock_reserve(base, size) < 0) {
259-
base = -EBUSY;
227+
ret = -EBUSY;
260228
goto err;
261229
}
262230
} else {
@@ -266,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
266234
*/
267235
phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
268236
if (!addr) {
269-
base = -ENOMEM;
237+
ret = -ENOMEM;
270238
goto err;
271239
} else {
272240
base = addr;
@@ -277,10 +245,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
277245
* Each reserved area must be initialised later, when more kernel
278246
* subsystems (like slab allocator) are available.
279247
*/
280-
r->start = base;
281-
r->size = size;
282-
r->dev = dev;
283-
cma_reserved_count++;
248+
cma->base_pfn = PFN_DOWN(base);
249+
cma->count = size >> PAGE_SHIFT;
250+
*res_cma = cma;
251+
cma_area_count++;
252+
284253
pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
285254
(unsigned long)base);
286255

@@ -289,7 +258,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
289258
return 0;
290259
err:
291260
pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
292-
return base;
261+
return ret;
293262
}
294263

295264
/**

include/asm-generic/dma-contiguous.h

Lines changed: 0 additions & 28 deletions
This file was deleted.

include/linux/device.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -711,7 +711,7 @@ struct device {
711711

712712
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
713713
override */
714-
#ifdef CONFIG_CMA
714+
#ifdef CONFIG_DMA_CMA
715715
struct cma *cma_area; /* contiguous memory area for dma
716716
allocations */
717717
#endif

include/linux/dma-contiguous.h

Lines changed: 60 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,53 @@ struct device;
6767

6868
extern struct cma *dma_contiguous_default_area;
6969

70+
static inline struct cma *dev_get_cma_area(struct device *dev)
71+
{
72+
if (dev && dev->cma_area)
73+
return dev->cma_area;
74+
return dma_contiguous_default_area;
75+
}
76+
77+
static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
78+
{
79+
if (dev)
80+
dev->cma_area = cma;
81+
}
82+
83+
static inline void dma_contiguous_set_default(struct cma *cma)
84+
{
85+
dma_contiguous_default_area = cma;
86+
}
87+
7088
void dma_contiguous_reserve(phys_addr_t addr_limit);
71-
int dma_declare_contiguous(struct device *dev, phys_addr_t size,
72-
phys_addr_t base, phys_addr_t limit);
89+
90+
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
91+
phys_addr_t limit, struct cma **res_cma);
92+
93+
/**
94+
* dma_declare_contiguous() - reserve area for contiguous memory handling
95+
* for particular device
96+
* @dev: Pointer to device structure.
97+
* @size: Size of the reserved memory.
98+
* @base: Start address of the reserved memory (optional, 0 for any).
99+
* @limit: End address of the reserved memory (optional, 0 for any).
100+
*
101+
* This function reserves memory for specified device. It should be
102+
* called by board specific code when early allocator (memblock or bootmem)
103+
* is still activate.
104+
*/
105+
106+
static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
107+
phys_addr_t base, phys_addr_t limit)
108+
{
109+
struct cma *cma;
110+
int ret;
111+
ret = dma_contiguous_reserve_area(size, base, limit, &cma);
112+
if (ret == 0)
113+
dev_set_cma_area(dev, cma);
114+
115+
return ret;
116+
}
73117

74118
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
75119
unsigned int order);
@@ -80,8 +124,22 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
80124

81125
#define MAX_CMA_AREAS (0)
82126

127+
static inline struct cma *dev_get_cma_area(struct device *dev)
128+
{
129+
return NULL;
130+
}
131+
132+
static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
133+
134+
static inline void dma_contiguous_set_default(struct cma *cma) { }
135+
83136
static inline void dma_contiguous_reserve(phys_addr_t limit) { }
84137

138+
static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
139+
phys_addr_t limit, struct cma **res_cma) {
140+
return -ENOSYS;
141+
}
142+
85143
static inline
86144
int dma_declare_contiguous(struct device *dev, phys_addr_t size,
87145
phys_addr_t base, phys_addr_t limit)

0 commit comments

Comments
 (0)