Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Newer
Older
100644 630 lines (526 sloc) 15.635 kb
0c4490c @gregkh initial 4.40 import
authored
1 /**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
20 *
21 **************************************************************************/
22 /*
23 * Make calls into closed source X server code.
24 */
25
26 #include "drmP.h"
27 #include "psb_drv.h"
28
29 void
30 psb_xhw_clean_buf(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
31 {
32 unsigned long irq_flags;
33
34 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
35 list_del_init(&buf->head);
36 if (dev_priv->xhw_cur_buf == buf)
37 dev_priv->xhw_cur_buf = NULL;
38 atomic_set(&buf->done, 1);
39 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
40 }
41
42 static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
43 struct psb_xhw_buf *buf)
44 {
45 unsigned long irq_flags;
46
47 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
48 atomic_set(&buf->done, 0);
49 if (unlikely(!dev_priv->xhw_submit_ok)) {
50 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
51 DRM_ERROR("No Xpsb 3D extension available.\n");
52 return -EINVAL;
53 }
54 if (!list_empty(&buf->head)) {
55 DRM_ERROR("Recursive list adding.\n");
56 goto out;
57 }
58 list_add_tail(&buf->head, &dev_priv->xhw_in);
59 wake_up_interruptible(&dev_priv->xhw_queue);
60 out:
61 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
62 return 0;
63 }
64
65 int psb_xhw_hotplug(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
66 {
67 struct drm_psb_xhw_arg *xa = &buf->arg;
68 int ret;
69
70 buf->copy_back = 1;
71 xa->op = PSB_XHW_HOTPLUG;
72 xa->issue_irq = 0;
73 xa->irq_op = 0;
74
75 ret = psb_xhw_add(dev_priv, buf);
76 return ret;
77 }
78
79 int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
80 struct psb_xhw_buf *buf,
81 uint32_t w,
82 uint32_t h,
83 uint32_t * hw_cookie,
84 uint32_t * bo_size,
85 uint32_t * clear_p_start, uint32_t * clear_num_pages)
86 {
87 struct drm_psb_xhw_arg *xa = &buf->arg;
88 int ret;
89
90 buf->copy_back = 1;
91 xa->op = PSB_XHW_SCENE_INFO;
92 xa->irq_op = 0;
93 xa->issue_irq = 0;
94 xa->arg.si.w = w;
95 xa->arg.si.h = h;
96
97 ret = psb_xhw_add(dev_priv, buf);
98 if (ret)
99 return ret;
100
101 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
102 atomic_read(&buf->done), DRM_HZ);
103
104 if (!atomic_read(&buf->done)) {
105 psb_xhw_clean_buf(dev_priv, buf);
106 return -EBUSY;
107 }
108
109 if (!xa->ret) {
110 memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
111 *bo_size = xa->arg.si.size;
112 *clear_p_start = xa->arg.si.clear_p_start;
113 *clear_num_pages = xa->arg.si.clear_num_pages;
114 }
115 return xa->ret;
116 }
117
118 int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
119 struct psb_xhw_buf *buf, uint32_t fire_flags)
120 {
121 struct drm_psb_xhw_arg *xa = &buf->arg;
122
123 buf->copy_back = 0;
124 xa->op = PSB_XHW_FIRE_RASTER;
125 xa->issue_irq = 0;
126 xa->arg.sb.fire_flags = 0;
127
128 return psb_xhw_add(dev_priv, buf);
129 }
130
131 int psb_xhw_vistest(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
132 {
133 struct drm_psb_xhw_arg *xa = &buf->arg;
134
135 buf->copy_back = 1;
136 xa->op = PSB_XHW_VISTEST;
137 /*
138 * Could perhaps decrease latency somewhat by
139 * issuing an irq in this case.
140 */
141 xa->issue_irq = 0;
142 xa->irq_op = PSB_UIRQ_VISTEST;
143 return psb_xhw_add(dev_priv, buf);
144 }
145
146 int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
147 struct psb_xhw_buf *buf,
148 uint32_t fire_flags,
149 uint32_t hw_context,
150 uint32_t * cookie,
151 uint32_t * oom_cmds,
152 uint32_t num_oom_cmds,
153 uint32_t offset, uint32_t engine, uint32_t flags)
154 {
155 struct drm_psb_xhw_arg *xa = &buf->arg;
156
157 buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
158 xa->op = PSB_XHW_SCENE_BIND_FIRE;
159 xa->issue_irq = (buf->copy_back) ? 1 : 0;
160 if (unlikely(buf->copy_back))
161 xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
162 PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
163 else
164 xa->irq_op = 0;
165 xa->arg.sb.fire_flags = fire_flags;
166 xa->arg.sb.hw_context = hw_context;
167 xa->arg.sb.offset = offset;
168 xa->arg.sb.engine = engine;
169 xa->arg.sb.flags = flags;
170 xa->arg.sb.num_oom_cmds = num_oom_cmds;
171 memcpy(xa->cookie, cookie, sizeof(xa->cookie));
172 if (num_oom_cmds)
173 memcpy(xa->arg.sb.oom_cmds, oom_cmds,
174 sizeof(uint32_t) * num_oom_cmds);
175 return psb_xhw_add(dev_priv, buf);
176 }
177
178 int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
179 {
180 struct drm_psb_xhw_arg *xa = &buf->arg;
181 int ret;
182
183 buf->copy_back = 1;
184 xa->op = PSB_XHW_RESET_DPM;
185 xa->issue_irq = 0;
186 xa->irq_op = 0;
187
188 ret = psb_xhw_add(dev_priv, buf);
189 if (ret)
190 return ret;
191
192 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
193 atomic_read(&buf->done), 3 * DRM_HZ);
194
195 if (!atomic_read(&buf->done)) {
196 psb_xhw_clean_buf(dev_priv, buf);
197 return -EBUSY;
198 }
199
200 return xa->ret;
201 }
202
203 int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
204 struct psb_xhw_buf *buf, uint32_t * value)
205 {
206 struct drm_psb_xhw_arg *xa = &buf->arg;
207 int ret;
208
209 *value = 0;
210
211 buf->copy_back = 1;
212 xa->op = PSB_XHW_CHECK_LOCKUP;
213 xa->issue_irq = 0;
214 xa->irq_op = 0;
215
216 ret = psb_xhw_add(dev_priv, buf);
217 if (ret)
218 return ret;
219
220 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
221 atomic_read(&buf->done), DRM_HZ * 3);
222
223 if (!atomic_read(&buf->done)) {
224 psb_xhw_clean_buf(dev_priv, buf);
225 return -EBUSY;
226 }
227
228 if (!xa->ret)
229 *value = xa->arg.cl.value;
230
231 return xa->ret;
232 }
233
234 static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
235 struct psb_xhw_buf *buf)
236 {
237 struct drm_psb_xhw_arg *xa = &buf->arg;
238 unsigned long irq_flags;
239
240 buf->copy_back = 0;
241 xa->op = PSB_XHW_TERMINATE;
242 xa->issue_irq = 0;
243
244 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
245 dev_priv->xhw_submit_ok = 0;
246 atomic_set(&buf->done, 0);
247 if (!list_empty(&buf->head)) {
248 DRM_ERROR("Recursive list adding.\n");
249 goto out;
250 }
251 list_add_tail(&buf->head, &dev_priv->xhw_in);
252 out:
253 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
254 wake_up_interruptible(&dev_priv->xhw_queue);
255
256 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
257 atomic_read(&buf->done), DRM_HZ / 10);
258
259 if (!atomic_read(&buf->done)) {
260 DRM_ERROR("Xpsb terminate timeout.\n");
261 psb_xhw_clean_buf(dev_priv, buf);
262 return -EBUSY;
263 }
264
265 return 0;
266 }
267
268 int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
269 struct psb_xhw_buf *buf,
270 uint32_t pages, uint32_t * hw_cookie, uint32_t * size)
271 {
272 struct drm_psb_xhw_arg *xa = &buf->arg;
273 int ret;
274
275 buf->copy_back = 1;
276 xa->op = PSB_XHW_TA_MEM_INFO;
277 xa->issue_irq = 0;
278 xa->irq_op = 0;
279 xa->arg.bi.pages = pages;
280
281 ret = psb_xhw_add(dev_priv, buf);
282 if (ret)
283 return ret;
284
285 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
286 atomic_read(&buf->done), DRM_HZ);
287
288 if (!atomic_read(&buf->done)) {
289 psb_xhw_clean_buf(dev_priv, buf);
290 return -EBUSY;
291 }
292
293 if (!xa->ret)
294 memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
295
296 *size = xa->arg.bi.size;
297 return xa->ret;
298 }
299
300 int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
301 struct psb_xhw_buf *buf,
302 uint32_t flags,
303 uint32_t param_offset,
304 uint32_t pt_offset, uint32_t * hw_cookie)
305 {
306 struct drm_psb_xhw_arg *xa = &buf->arg;
307 int ret;
308
309 buf->copy_back = 1;
310 xa->op = PSB_XHW_TA_MEM_LOAD;
311 xa->issue_irq = 0;
312 xa->irq_op = 0;
313 xa->arg.bl.flags = flags;
314 xa->arg.bl.param_offset = param_offset;
315 xa->arg.bl.pt_offset = pt_offset;
316 memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
317
318 ret = psb_xhw_add(dev_priv, buf);
319 if (ret)
320 return ret;
321
322 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
323 atomic_read(&buf->done), 3 * DRM_HZ);
324
325 if (!atomic_read(&buf->done)) {
326 psb_xhw_clean_buf(dev_priv, buf);
327 return -EBUSY;
328 }
329
330 if (!xa->ret)
331 memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
332
333 return xa->ret;
334 }
335
336 int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
337 struct psb_xhw_buf *buf, uint32_t * cookie)
338 {
339 struct drm_psb_xhw_arg *xa = &buf->arg;
340
341 /*
342 * This calls the extensive closed source
343 * OOM handler, which resolves the condition and
344 * sends a reply telling the scheduler what to do
345 * with the task.
346 */
347
348 buf->copy_back = 1;
349 xa->op = PSB_XHW_OOM;
350 xa->issue_irq = 1;
351 xa->irq_op = PSB_UIRQ_OOM_REPLY;
352 memcpy(xa->cookie, cookie, sizeof(xa->cookie));
353
354 return psb_xhw_add(dev_priv, buf);
355 }
356
357 void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
358 struct psb_xhw_buf *buf,
359 uint32_t * cookie,
360 uint32_t * bca, uint32_t * rca, uint32_t * flags)
361 {
362 struct drm_psb_xhw_arg *xa = &buf->arg;
363
364 /*
365 * Get info about how to schedule an OOM task.
366 */
367
368 memcpy(cookie, xa->cookie, sizeof(xa->cookie));
369 *bca = xa->arg.oom.bca;
370 *rca = xa->arg.oom.rca;
371 *flags = xa->arg.oom.flags;
372 }
373
374 void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
375 struct psb_xhw_buf *buf, uint32_t * cookie)
376 {
377 struct drm_psb_xhw_arg *xa = &buf->arg;
378
379 memcpy(cookie, xa->cookie, sizeof(xa->cookie));
380 }
381
382 int psb_xhw_resume(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
383 {
384 struct drm_psb_xhw_arg *xa = &buf->arg;
385
386 buf->copy_back = 0;
387 xa->op = PSB_XHW_RESUME;
388 xa->issue_irq = 0;
389 xa->irq_op = 0;
390 return psb_xhw_add(dev_priv, buf);
391 }
392
393 void psb_xhw_takedown(struct drm_psb_private *dev_priv)
394 {
395 }
396
397 int psb_xhw_init(struct drm_device *dev)
398 {
399 struct drm_psb_private *dev_priv =
400 (struct drm_psb_private *)dev->dev_private;
401 unsigned long irq_flags;
402
403 INIT_LIST_HEAD(&dev_priv->xhw_in);
404 dev_priv->xhw_lock = SPIN_LOCK_UNLOCKED;
405 atomic_set(&dev_priv->xhw_client, 0);
406 init_waitqueue_head(&dev_priv->xhw_queue);
407 init_waitqueue_head(&dev_priv->xhw_caller_queue);
408 mutex_init(&dev_priv->xhw_mutex);
409 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
410 dev_priv->xhw_on = 0;
411 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
412
413 return 0;
414 }
415
416 static int psb_xhw_init_init(struct drm_device *dev,
417 struct drm_file *file_priv,
418 struct drm_psb_xhw_init_arg *arg)
419 {
420 struct drm_psb_private *dev_priv =
421 (struct drm_psb_private *)dev->dev_private;
422 int ret;
423 int is_iomem;
424
425 if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
426 unsigned long irq_flags;
427
428 mutex_lock(&dev->struct_mutex);
429 dev_priv->xhw_bo =
430 drm_lookup_buffer_object(file_priv, arg->buffer_handle, 1);
431 mutex_unlock(&dev->struct_mutex);
432 if (!dev_priv->xhw_bo) {
433 ret = -EINVAL;
434 goto out_err;
435 }
436 ret = drm_bo_kmap(dev_priv->xhw_bo, 0,
437 dev_priv->xhw_bo->num_pages,
438 &dev_priv->xhw_kmap);
439 if (ret) {
440 DRM_ERROR("Failed mapping X server "
441 "communications buffer.\n");
442 goto out_err0;
443 }
444 dev_priv->xhw = drm_bmo_virtual(&dev_priv->xhw_kmap, &is_iomem);
445 if (is_iomem) {
446 DRM_ERROR("X server communications buffer"
447 "is in device memory.\n");
448 ret = -EINVAL;
449 goto out_err1;
450 }
451 dev_priv->xhw_file = file_priv;
452
453 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
454 dev_priv->xhw_on = 1;
455 dev_priv->xhw_submit_ok = 1;
456 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
457
458 return 0;
459 } else {
460 DRM_ERROR("Xhw is already initialized.\n");
461 return -EBUSY;
462 }
463 out_err1:
464 dev_priv->xhw = NULL;
465 drm_bo_kunmap(&dev_priv->xhw_kmap);
466 out_err0:
467 drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
468 out_err:
469 atomic_dec(&dev_priv->xhw_client);
470 return ret;
471 }
472
473 static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
474 {
475 struct psb_xhw_buf *cur_buf, *next;
476 unsigned long irq_flags;
477
478 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
479 dev_priv->xhw_submit_ok = 0;
480
481 list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
482 list_del_init(&cur_buf->head);
483 if (cur_buf->copy_back) {
484 cur_buf->arg.ret = -EINVAL;
485 }
486 atomic_set(&cur_buf->done, 1);
487 }
488 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
489 wake_up(&dev_priv->xhw_caller_queue);
490 }
491
492 void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
493 struct drm_file *file_priv, int closing)
494 {
495
496 if (dev_priv->xhw_file == file_priv &&
497 atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
498
499 if (closing)
500 psb_xhw_queue_empty(dev_priv);
501 else {
502 struct psb_xhw_buf buf;
503 INIT_LIST_HEAD(&buf.head);
504
505 psb_xhw_terminate(dev_priv, &buf);
506 psb_xhw_queue_empty(dev_priv);
507 }
508
509 dev_priv->xhw = NULL;
510 drm_bo_kunmap(&dev_priv->xhw_kmap);
511 drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
512 dev_priv->xhw_file = NULL;
513 }
514 }
515
516 int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
517 struct drm_file *file_priv)
518 {
519 struct drm_psb_xhw_init_arg *arg = (struct drm_psb_xhw_init_arg *)data;
520 struct drm_psb_private *dev_priv =
521 (struct drm_psb_private *)dev->dev_private;
522
523 switch (arg->operation) {
524 case PSB_XHW_INIT:
525 return psb_xhw_init_init(dev, file_priv, arg);
526 case PSB_XHW_TAKEDOWN:
527 psb_xhw_init_takedown(dev_priv, file_priv, 0);
528 }
529 return 0;
530 }
531
532 static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
533 {
534 int empty;
535 unsigned long irq_flags;
536
537 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
538 empty = list_empty(&dev_priv->xhw_in);
539 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
540 return empty;
541 }
542
543 int psb_xhw_handler(struct drm_psb_private *dev_priv)
544 {
545 unsigned long irq_flags;
546 struct drm_psb_xhw_arg *xa;
547 struct psb_xhw_buf *buf;
548
549 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
550
551 if (!dev_priv->xhw_on) {
552 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
553 return -EINVAL;
554 }
555
556 buf = dev_priv->xhw_cur_buf;
557 if (buf && buf->copy_back) {
558 xa = &buf->arg;
559 memcpy(xa, dev_priv->xhw, sizeof(*xa));
560 dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
561 atomic_set(&buf->done, 1);
562 wake_up(&dev_priv->xhw_caller_queue);
563 } else
564 dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
565
566 dev_priv->xhw_cur_buf = 0;
567 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
568 return 0;
569 }
570
571 int psb_xhw_ioctl(struct drm_device *dev, void *data,
572 struct drm_file *file_priv)
573 {
574 struct drm_psb_private *dev_priv =
575 (struct drm_psb_private *)dev->dev_private;
576 unsigned long irq_flags;
577 struct drm_psb_xhw_arg *xa;
578 int ret;
579 struct list_head *list;
580 struct psb_xhw_buf *buf;
581
582 if (!dev_priv)
583 return -EINVAL;
584
585 if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
586 return -EAGAIN;
587
588 if (psb_forced_user_interrupt(dev_priv)) {
589 mutex_unlock(&dev_priv->xhw_mutex);
590 return -EINVAL;
591 }
592
593 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
594 while (list_empty(&dev_priv->xhw_in)) {
595 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
596 ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
597 !psb_xhw_in_empty
598 (dev_priv), DRM_HZ);
599 if (ret == -ERESTARTSYS || ret == 0) {
600 mutex_unlock(&dev_priv->xhw_mutex);
601 return -EAGAIN;
602 }
603 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
604 }
605
606 list = dev_priv->xhw_in.next;
607 list_del_init(list);
608
609 buf = list_entry(list, struct psb_xhw_buf, head);
610 xa = &buf->arg;
611 memcpy(dev_priv->xhw, xa, sizeof(*xa));
612
613 if (unlikely(buf->copy_back))
614 dev_priv->xhw_cur_buf = buf;
615 else {
616 atomic_set(&buf->done, 1);
617 dev_priv->xhw_cur_buf = NULL;
618 }
619
620 if (xa->op == PSB_XHW_TERMINATE) {
621 dev_priv->xhw_on = 0;
622 wake_up(&dev_priv->xhw_caller_queue);
623 }
624 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
625
626 mutex_unlock(&dev_priv->xhw_mutex);
627
628 return 0;
629 }
Something went wrong with that request. Please try again.