Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Newer
Older
100644 344 lines (301 sloc) 8.807 kB
0c4490c @gregkh initial 4.40 import
authored
1 /**
2 * \file drm_memory.c
3 * Memory management wrappers for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9 /*
10 * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/highmem.h>
37 #include "drmP.h"
38
39 static struct {
40 spinlock_t lock;
41 uint64_t cur_used;
42 uint64_t emer_used;
43 uint64_t low_threshold;
44 uint64_t high_threshold;
45 uint64_t emer_threshold;
46 } drm_memctl = {
47 .lock = SPIN_LOCK_UNLOCKED
48 };
49
50 int drm_alloc_memctl(size_t size)
51 {
52 int ret = 0;
53 unsigned long a_size = drm_size_align(size);
54 unsigned long new_used;
55
56 spin_lock(&drm_memctl.lock);
57 new_used = drm_memctl.cur_used + a_size;
58 if (likely(new_used < drm_memctl.high_threshold)) {
59 drm_memctl.cur_used = new_used;
60 goto out;
61 }
62
63 new_used += drm_memctl.emer_used;
64 if (unlikely(!DRM_SUSER(DRM_CURPROC) ||
65 (a_size > 16*PAGE_SIZE) ||
66 (new_used > drm_memctl.emer_threshold))) {
67 ret = -ENOMEM;
68 goto out;
69 }
70
71 drm_memctl.cur_used = drm_memctl.high_threshold;
72 drm_memctl.emer_used = new_used - drm_memctl.high_threshold;
73 out:
74 spin_unlock(&drm_memctl.lock);
75 return ret;
76 }
77
78 void drm_free_memctl(size_t size)
79 {
80 unsigned long a_size = drm_size_align(size);
81
82 spin_lock(&drm_memctl.lock);
83 if (likely(a_size >= drm_memctl.emer_used)) {
84 a_size -= drm_memctl.emer_used;
85 drm_memctl.emer_used = 0;
86 } else {
87 drm_memctl.emer_used -= a_size;
88 a_size = 0;
89 }
90 drm_memctl.cur_used -= a_size;
91 spin_unlock(&drm_memctl.lock);
92 }
93 EXPORT_SYMBOL(drm_free_memctl);
94
95 void drm_query_memctl(uint64_t *cur_used,
96 uint64_t *emer_used,
97 uint64_t *low_threshold,
98 uint64_t *high_threshold,
99 uint64_t *emer_threshold)
100 {
101 spin_lock(&drm_memctl.lock);
102 *cur_used = drm_memctl.cur_used;
103 *emer_used = drm_memctl.emer_used;
104 *low_threshold = drm_memctl.low_threshold;
105 *high_threshold = drm_memctl.high_threshold;
106 *emer_threshold = drm_memctl.emer_threshold;
107 spin_unlock(&drm_memctl.lock);
108 }
109 EXPORT_SYMBOL(drm_query_memctl);
110
111 void drm_init_memctl(size_t p_low_threshold,
112 size_t p_high_threshold,
113 size_t unit_size)
114 {
115 spin_lock(&drm_memctl.lock);
116 drm_memctl.emer_used = 0;
117 drm_memctl.cur_used = 0;
118 drm_memctl.low_threshold = p_low_threshold * unit_size;
119 drm_memctl.high_threshold = p_high_threshold * unit_size;
120 drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) +
121 drm_memctl.high_threshold;
122 spin_unlock(&drm_memctl.lock);
123 }
124
125
126 #ifndef DEBUG_MEMORY
127
128 /** No-op. */
129 void drm_mem_init(void)
130 {
131 }
132
133 /**
134 * Called when "/proc/dri/%dev%/mem" is read.
135 *
136 * \param buf output buffer.
137 * \param start start of output data.
138 * \param offset requested start offset.
139 * \param len requested number of bytes.
140 * \param eof whether there is no more data to return.
141 * \param data private data.
142 * \return number of written bytes.
143 *
144 * No-op.
145 */
146 int drm_mem_info(char *buf, char **start, off_t offset,
147 int len, int *eof, void *data)
148 {
149 return 0;
150 }
151
152 /** Wrapper around kmalloc() */
153 void *drm_calloc(size_t nmemb, size_t size, int area)
154 {
155 return kcalloc(nmemb, size, GFP_KERNEL);
156 }
157 EXPORT_SYMBOL(drm_calloc);
158
159 /** Wrapper around kmalloc() and kfree() */
160 void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
161 {
162 void *pt;
163
164 if (!(pt = kmalloc(size, GFP_KERNEL)))
165 return NULL;
166 if (oldpt && oldsize) {
167 memcpy(pt, oldpt, oldsize);
168 kfree(oldpt);
169 }
170 return pt;
171 }
172
173 /**
174 * Allocate pages.
175 *
176 * \param order size order.
177 * \param area memory area. (Not used.)
178 * \return page address on success, or zero on failure.
179 *
180 * Allocate and reserve free pages.
181 */
182 unsigned long drm_alloc_pages(int order, int area)
183 {
184 unsigned long address;
185 unsigned long bytes = PAGE_SIZE << order;
186 unsigned long addr;
187 unsigned int sz;
188
189 address = __get_free_pages(GFP_KERNEL, order);
190 if (!address)
191 return 0;
192
193 /* Zero */
194 memset((void *)address, 0, bytes);
195
196 /* Reserve */
197 for (addr = address, sz = bytes;
198 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
199 SetPageReserved(virt_to_page(addr));
200 }
201
202 return address;
203 }
204
205 /**
206 * Free pages.
207 *
208 * \param address address of the pages to free.
209 * \param order size order.
210 * \param area memory area. (Not used.)
211 *
212 * Unreserve and free pages allocated by alloc_pages().
213 */
214 void drm_free_pages(unsigned long address, int order, int area)
215 {
216 unsigned long bytes = PAGE_SIZE << order;
217 unsigned long addr;
218 unsigned int sz;
219
220 if (!address)
221 return;
222
223 /* Unreserve */
224 for (addr = address, sz = bytes;
225 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
226 ClearPageReserved(virt_to_page(addr));
227 }
228
229 free_pages(address, order);
230 }
231
232 #if __OS_HAS_AGP
233 static void *agp_remap(unsigned long offset, unsigned long size,
234 struct drm_device * dev)
235 {
236 unsigned long *phys_addr_map, i, num_pages =
237 PAGE_ALIGN(size) / PAGE_SIZE;
238 struct drm_agp_mem *agpmem;
239 struct page **page_map;
240 void *addr;
241
242 size = PAGE_ALIGN(size);
243
244 #ifdef __alpha__
245 offset -= dev->hose->mem_space->start;
246 #endif
247
248 list_for_each_entry(agpmem, &dev->agp->memory, head)
249 if (agpmem->bound <= offset
250 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
251 (offset + size))
252 break;
253 if (!agpmem)
254 return NULL;
255
256 /*
257 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
258 * the CPU do not get remapped by the GART. We fix this by using the kernel's
259 * page-table instead (that's probably faster anyhow...).
260 */
261 /* note: use vmalloc() because num_pages could be large... */
262 page_map = vmalloc(num_pages * sizeof(struct page *));
263 if (!page_map)
264 return NULL;
265
266 phys_addr_map =
267 agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
268 for (i = 0; i < num_pages; ++i)
269 page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
270 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
271 vfree(page_map);
272
273 return addr;
274 }
275
276 /** Wrapper around agp_allocate_memory() */
277 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
278 DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
279 {
280 return drm_agp_allocate_memory(pages, type);
281 }
282 #else
283 DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
284 {
285 return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
286 }
287 #endif
288
289 /** Wrapper around agp_free_memory() */
290 int drm_free_agp(DRM_AGP_MEM * handle, int pages)
291 {
292 return drm_agp_free_memory(handle) ? 0 : -EINVAL;
293 }
294
295 /** Wrapper around agp_bind_memory() */
296 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
297 {
298 return drm_agp_bind_memory(handle, start);
299 }
300
301 /** Wrapper around agp_unbind_memory() */
302 int drm_unbind_agp(DRM_AGP_MEM * handle)
303 {
304 return drm_agp_unbind_memory(handle);
305 }
306
307 #else /* __OS_HAS_AGP*/
308 static void *agp_remap(unsigned long offset, unsigned long size,
309 struct drm_device * dev)
310 {
311 return NULL;
312 }
313 #endif /* agp */
314 #else
315 static void *agp_remap(unsigned long offset, unsigned long size,
316 struct drm_device * dev)
317 {
318 return NULL;
319 }
320 #endif /* debug_memory */
321
322 void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
323 {
324 if (drm_core_has_AGP(dev) &&
325 dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
326 map->handle = agp_remap(map->offset, map->size, dev);
327 else
328 map->handle = ioremap(map->offset, map->size);
329 }
330 EXPORT_SYMBOL_GPL(drm_core_ioremap);
331
332 void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
333 {
334 if (!map->handle || !map->size)
335 return;
336
337 if (drm_core_has_AGP(dev) &&
338 dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
339 vunmap(map->handle);
340 else
341 iounmap(map->handle);
342 }
343 EXPORT_SYMBOL_GPL(drm_core_ioremapfree);
Something went wrong with that request. Please try again.