@@ -96,7 +96,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
9696#endif
9797
9898/**
99- * dma_contiguous_reserve() - reserve area for contiguous memory handling
99+ * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
100100 * @limit: End address of the reserved memory (optional, 0 for any).
101101 *
102102 * This function reserves memory from early allocator. It should be
@@ -124,22 +124,29 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
124124#endif
125125 }
126126
127- if (selected_size ) {
127+ if (selected_size && ! dma_contiguous_default_area ) {
128128 pr_debug ("%s: reserving %ld MiB for global area\n" , __func__ ,
129129 (unsigned long )selected_size / SZ_1M );
130130
131- dma_declare_contiguous (NULL , selected_size , 0 , limit );
131+ dma_contiguous_reserve_area (selected_size , 0 , limit ,
132+ & dma_contiguous_default_area );
132133 }
133134};
134135
135136static DEFINE_MUTEX (cma_mutex );
136137
137- static __init int cma_activate_area (unsigned long base_pfn , unsigned long count )
138+ static int __init cma_activate_area (struct cma * cma )
138139{
139- unsigned long pfn = base_pfn ;
140- unsigned i = count >> pageblock_order ;
140+ int bitmap_size = BITS_TO_LONGS (cma -> count ) * sizeof (long );
141+ unsigned long base_pfn = cma -> base_pfn , pfn = base_pfn ;
142+ unsigned i = cma -> count >> pageblock_order ;
141143 struct zone * zone ;
142144
145+ cma -> bitmap = kzalloc (bitmap_size , GFP_KERNEL );
146+
147+ if (!cma -> bitmap )
148+ return - ENOMEM ;
149+
143150 WARN_ON_ONCE (!pfn_valid (pfn ));
144151 zone = page_zone (pfn_to_page (pfn ));
145152
@@ -153,92 +160,53 @@ static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
153160 }
154161 init_cma_reserved_pageblock (pfn_to_page (base_pfn ));
155162 } while (-- i );
156- return 0 ;
157- }
158-
159- static __init struct cma * cma_create_area (unsigned long base_pfn ,
160- unsigned long count )
161- {
162- int bitmap_size = BITS_TO_LONGS (count ) * sizeof (long );
163- struct cma * cma ;
164- int ret = - ENOMEM ;
165-
166- pr_debug ("%s(base %08lx, count %lx)\n" , __func__ , base_pfn , count );
167-
168- cma = kmalloc (sizeof * cma , GFP_KERNEL );
169- if (!cma )
170- return ERR_PTR (- ENOMEM );
171-
172- cma -> base_pfn = base_pfn ;
173- cma -> count = count ;
174- cma -> bitmap = kzalloc (bitmap_size , GFP_KERNEL );
175163
176- if (!cma -> bitmap )
177- goto no_mem ;
178-
179- ret = cma_activate_area (base_pfn , count );
180- if (ret )
181- goto error ;
182-
183- pr_debug ("%s: returned %p\n" , __func__ , (void * )cma );
184- return cma ;
185-
186- error :
187- kfree (cma -> bitmap );
188- no_mem :
189- kfree (cma );
190- return ERR_PTR (ret );
164+ return 0 ;
191165}
192166
193- static struct cma_reserved {
194- phys_addr_t start ;
195- unsigned long size ;
196- struct device * dev ;
197- } cma_reserved [MAX_CMA_AREAS ] __initdata ;
198- static unsigned cma_reserved_count __initdata ;
167+ static struct cma cma_areas [MAX_CMA_AREAS ];
168+ static unsigned cma_area_count ;
199169
200170static int __init cma_init_reserved_areas (void )
201171{
202- struct cma_reserved * r = cma_reserved ;
203- unsigned i = cma_reserved_count ;
204-
205- pr_debug ("%s()\n" , __func__ );
172+ int i ;
206173
207- for (; i ; -- i , ++ r ) {
208- struct cma * cma ;
209- cma = cma_create_area (PFN_DOWN (r -> start ),
210- r -> size >> PAGE_SHIFT );
211- if (!IS_ERR (cma ))
212- dev_set_cma_area (r -> dev , cma );
174+ for (i = 0 ; i < cma_area_count ; i ++ ) {
175+ int ret = cma_activate_area (& cma_areas [i ]);
176+ if (ret )
177+ return ret ;
213178 }
179+
214180 return 0 ;
215181}
216182core_initcall (cma_init_reserved_areas );
217183
218184/**
219- * dma_declare_contiguous() - reserve area for contiguous memory handling
220- * for particular device
221- * @dev: Pointer to device structure.
222- * @size: Size of the reserved memory.
223- * @base: Start address of the reserved memory (optional, 0 for any).
185+ * dma_contiguous_reserve_area() - reserve custom contiguous area
186+ * @size: Size of the reserved area (in bytes),
187+ * @base: Base address of the reserved area optional, use 0 for any
224188 * @limit: End address of the reserved memory (optional, 0 for any).
189+ * @res_cma: Pointer to store the created cma region.
225190 *
226- * This function reserves memory for specified device. It should be
227- * called by board specific code when early allocator (memblock or bootmem)
228- * is still activate.
191+ * This function reserves memory from early allocator. It should be
192+ * called by arch specific code once the early allocator (memblock or bootmem)
193+ * has been activated and all other subsystems have already allocated/reserved
194+ * memory. This function allows to create custom reserved areas for specific
195+ * devices.
229196 */
230- int __init dma_declare_contiguous ( struct device * dev , phys_addr_t size ,
231- phys_addr_t base , phys_addr_t limit )
197+ int __init dma_contiguous_reserve_area ( phys_addr_t size , phys_addr_t base ,
198+ phys_addr_t limit , struct cma * * res_cma )
232199{
233- struct cma_reserved * r = & cma_reserved [ cma_reserved_count ];
200+ struct cma * cma = & cma_areas [ cma_area_count ];
234201 phys_addr_t alignment ;
202+ int ret = 0 ;
235203
236204 pr_debug ("%s(size %lx, base %08lx, limit %08lx)\n" , __func__ ,
237205 (unsigned long )size , (unsigned long )base ,
238206 (unsigned long )limit );
239207
240208 /* Sanity checks */
241- if (cma_reserved_count == ARRAY_SIZE (cma_reserved )) {
209+ if (cma_area_count == ARRAY_SIZE (cma_areas )) {
242210 pr_err ("Not enough slots for CMA reserved regions!\n" );
243211 return - ENOSPC ;
244212 }
@@ -256,7 +224,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
256224 if (base ) {
257225 if (memblock_is_region_reserved (base , size ) ||
258226 memblock_reserve (base , size ) < 0 ) {
259- base = - EBUSY ;
227+ ret = - EBUSY ;
260228 goto err ;
261229 }
262230 } else {
@@ -266,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
266234 */
267235 phys_addr_t addr = __memblock_alloc_base (size , alignment , limit );
268236 if (!addr ) {
269- base = - ENOMEM ;
237+ ret = - ENOMEM ;
270238 goto err ;
271239 } else {
272240 base = addr ;
@@ -277,10 +245,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
277245 * Each reserved area must be initialised later, when more kernel
278246 * subsystems (like slab allocator) are available.
279247 */
280- r -> start = base ;
281- r -> size = size ;
282- r -> dev = dev ;
283- cma_reserved_count ++ ;
248+ cma -> base_pfn = PFN_DOWN (base );
249+ cma -> count = size >> PAGE_SHIFT ;
250+ * res_cma = cma ;
251+ cma_area_count ++ ;
252+
284253 pr_info ("CMA: reserved %ld MiB at %08lx\n" , (unsigned long )size / SZ_1M ,
285254 (unsigned long )base );
286255
@@ -289,7 +258,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
289258 return 0 ;
290259err :
291260 pr_err ("CMA: failed to reserve %ld MiB\n" , (unsigned long )size / SZ_1M );
292- return base ;
261+ return ret ;
293262}
294263
295264/**
0 commit comments