Skip to content
Newer
Older
100644 375 lines (323 sloc) 10.3 KB
0c4490c @gregkh initial 4.40 import
authored Jul 1, 2009
1 /**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
20 *
21 **************************************************************************/
22 /*
23 * Authors:
24 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
25 */
26
27 #include "drmP.h"
28 #include "psb_drv.h"
29 #include "psb_reg.h"
30 #include "psb_scene.h"
31 #include "psb_msvdx.h"
32
33 #define PSB_2D_TIMEOUT_MSEC 100
34
35 void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
36 {
37 uint32_t val;
38
39 val = _PSB_CS_RESET_BIF_RESET |
40 _PSB_CS_RESET_DPM_RESET |
41 _PSB_CS_RESET_TA_RESET |
42 _PSB_CS_RESET_USE_RESET |
43 _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
44
45 if (reset_2d)
46 val |= _PSB_CS_RESET_TWOD_RESET;
47
48 PSB_WSGX32(val, PSB_CR_SOFT_RESET);
49 (void)PSB_RSGX32(PSB_CR_SOFT_RESET);
50
51 msleep(1);
52
53 PSB_WSGX32(0, PSB_CR_SOFT_RESET);
54 wmb();
55 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
56 PSB_CR_BIF_CTRL);
57 wmb();
58 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
59
60 msleep(1);
61 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
62 PSB_CR_BIF_CTRL);
63 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
64 }
65
66 void psb_print_pagefault(struct drm_psb_private *dev_priv)
67 {
68 uint32_t val;
69 uint32_t addr;
70
71 val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
72 addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
73
74 if (val) {
75 if (val & _PSB_CBI_STAT_PF_N_RW)
76 DRM_ERROR("Poulsbo MMU page fault:\n");
77 else
78 DRM_ERROR("Poulsbo MMU read / write "
79 "protection fault:\n");
80
81 if (val & _PSB_CBI_STAT_FAULT_CACHE)
82 DRM_ERROR("\tCache requestor.\n");
83 if (val & _PSB_CBI_STAT_FAULT_TA)
84 DRM_ERROR("\tTA requestor.\n");
85 if (val & _PSB_CBI_STAT_FAULT_VDM)
86 DRM_ERROR("\tVDM requestor.\n");
87 if (val & _PSB_CBI_STAT_FAULT_2D)
88 DRM_ERROR("\t2D requestor.\n");
89 if (val & _PSB_CBI_STAT_FAULT_PBE)
90 DRM_ERROR("\tPBE requestor.\n");
91 if (val & _PSB_CBI_STAT_FAULT_TSP)
92 DRM_ERROR("\tTSP requestor.\n");
93 if (val & _PSB_CBI_STAT_FAULT_ISP)
94 DRM_ERROR("\tISP requestor.\n");
95 if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
96 DRM_ERROR("\tUSSEPDS requestor.\n");
97 if (val & _PSB_CBI_STAT_FAULT_HOST)
98 DRM_ERROR("\tHost requestor.\n");
99
100 DRM_ERROR("\tMMU failing address is 0x%08x.\n", (unsigned)addr);
101 }
102 }
103
104 void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
105 {
106 struct timer_list *wt = &dev_priv->watchdog_timer;
107 unsigned long irq_flags;
108
109 spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
110 if (dev_priv->timer_available && !timer_pending(wt)) {
111 wt->expires = jiffies + PSB_WATCHDOG_DELAY;
112 add_timer(wt);
113 }
114 spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
115 }
116
117 #if 0
118 static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
119 unsigned int engine, int *lockup, int *idle)
120 {
121 uint32_t received_seq;
122
123 received_seq = dev_priv->comm[engine << 4];
124 spin_lock(&dev_priv->sequence_lock);
125 *idle = (received_seq == dev_priv->sequence[engine]);
126 spin_unlock(&dev_priv->sequence_lock);
127
128 if (*idle) {
129 dev_priv->idle[engine] = 1;
130 *lockup = 0;
131 return;
132 }
133
134 if (dev_priv->idle[engine]) {
135 dev_priv->idle[engine] = 0;
136 dev_priv->last_sequence[engine] = received_seq;
137 *lockup = 0;
138 return;
139 }
140
141 *lockup = (dev_priv->last_sequence[engine] == received_seq);
142 }
143
144 #endif
145 static void psb_watchdog_func(unsigned long data)
146 {
147 struct drm_psb_private *dev_priv = (struct drm_psb_private *)data;
148 int lockup;
149 int msvdx_lockup;
150 int msvdx_idle;
151 int lockup_2d;
152 int idle_2d;
153 int idle;
154 unsigned long irq_flags;
155
156 psb_scheduler_lockup(dev_priv, &lockup, &idle);
157 psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
158 #if 0
159 psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
160 #else
161 lockup_2d = FALSE;
162 idle_2d = TRUE;
163 #endif
164 if (lockup || msvdx_lockup || lockup_2d) {
165 spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
166 dev_priv->timer_available = 0;
167 spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
168 if (lockup) {
169 psb_print_pagefault(dev_priv);
170 schedule_work(&dev_priv->watchdog_wq);
171 }
172 if (msvdx_lockup)
173 schedule_work(&dev_priv->msvdx_watchdog_wq);
174 }
175 if (!idle || !msvdx_idle || !idle_2d)
176 psb_schedule_watchdog(dev_priv);
177 }
178
179 void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
180 {
181 struct drm_psb_private *dev_priv = dev->dev_private;
182 struct psb_msvdx_cmd_queue *msvdx_cmd;
183 struct list_head *list, *next;
184 /*Flush the msvdx cmd queue and signal all fences in the queue */
185 list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
186 msvdx_cmd = list_entry(list, struct psb_msvdx_cmd_queue, head);
187 PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
188 msvdx_cmd->sequence);
189 dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
190 psb_fence_error(dev, PSB_ENGINE_VIDEO,
191 dev_priv->msvdx_current_sequence,
192 DRM_FENCE_TYPE_EXE, DRM_CMD_HANG);
193 list_del(list);
194 kfree(msvdx_cmd->cmd);
195 drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
196 DRM_MEM_DRIVER);
197 }
198 }
199
200 static void psb_msvdx_reset_wq(struct work_struct *work)
201 {
202 struct drm_psb_private *dev_priv =
203 container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
204
205 struct psb_scheduler *scheduler = &dev_priv->scheduler;
206 unsigned long irq_flags;
207
208 mutex_lock(&dev_priv->msvdx_mutex);
209 dev_priv->msvdx_needs_reset = 1;
210 dev_priv->msvdx_current_sequence++;
211 PSB_DEBUG_GENERAL
212 ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
213 dev_priv->msvdx_current_sequence);
214
215 psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
216 dev_priv->msvdx_current_sequence, DRM_FENCE_TYPE_EXE,
217 DRM_CMD_HANG);
218
219 spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
220 dev_priv->timer_available = 1;
221 spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
222
223 spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
224 psb_msvdx_flush_cmd_queue(scheduler->dev);
225 spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
226
227 psb_schedule_watchdog(dev_priv);
228 mutex_unlock(&dev_priv->msvdx_mutex);
229 }
230
231 static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
232 {
233 struct psb_xhw_buf buf;
234 uint32_t bif_ctrl;
235
236 INIT_LIST_HEAD(&buf.head);
237 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
238 bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
239 PSB_WSGX32(bif_ctrl |
240 _PSB_CB_CTRL_CLEAR_FAULT |
241 _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
242 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
243 msleep(1);
244 PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
245 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
246 return psb_xhw_reset_dpm(dev_priv, &buf);
247 }
248
249 /*
250 * Block command submission and reset hardware and schedulers.
251 */
252
253 static void psb_reset_wq(struct work_struct *work)
254 {
255 struct drm_psb_private *dev_priv =
256 container_of(work, struct drm_psb_private, watchdog_wq);
257 int lockup_2d;
258 int idle_2d;
259 unsigned long irq_flags;
260 int ret;
261 int reset_count = 0;
262 struct psb_xhw_buf buf;
263 uint32_t xhw_lockup;
264
265 /*
266 * Block command submission.
267 */
268
269 mutex_lock(&dev_priv->reset_mutex);
270
271 INIT_LIST_HEAD(&buf.head);
272 if (psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup) == 0) {
273 if (xhw_lockup == 0 && psb_extend_raster_timeout(dev_priv) == 0) {
274 /*
275 * no lockup, just re-schedule
276 */
277 spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
278 dev_priv->timer_available = 1;
279 spin_unlock_irqrestore(&dev_priv->watchdog_lock,
280 irq_flags);
281 psb_schedule_watchdog(dev_priv);
282 mutex_unlock(&dev_priv->reset_mutex);
283 return;
284 }
285 }
286 #if 0
287 msleep(PSB_2D_TIMEOUT_MSEC);
288
289 psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
290
291 if (lockup_2d) {
292 uint32_t seq_2d;
293 spin_lock(&dev_priv->sequence_lock);
294 seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
295 spin_unlock(&dev_priv->sequence_lock);
296 psb_fence_error(dev_priv->scheduler.dev,
297 PSB_ENGINE_2D,
298 seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
299 DRM_INFO("Resetting 2D engine.\n");
300 }
301
302 psb_reset(dev_priv, lockup_2d);
303 #else
304 (void)lockup_2d;
305 (void)idle_2d;
306 psb_reset(dev_priv, 0);
307 #endif
308 (void)psb_xhw_mmu_reset(dev_priv);
309 DRM_INFO("Resetting scheduler.\n");
310 psb_scheduler_pause(dev_priv);
311 psb_scheduler_reset(dev_priv, -EBUSY);
312 psb_scheduler_ta_mem_check(dev_priv);
313
314 while (dev_priv->ta_mem &&
315 !dev_priv->force_ta_mem_load && ++reset_count < 10) {
316
317 /*
318 * TA memory is currently fenced so offsets
319 * are valid. Reload offsets into the dpm now.
320 */
321
322 struct psb_xhw_buf buf;
323 INIT_LIST_HEAD(&buf.head);
324
325 msleep(100);
326 DRM_INFO("Trying to reload TA memory.\n");
327 ret = psb_xhw_ta_mem_load(dev_priv, &buf,
328 PSB_TA_MEM_FLAG_TA |
329 PSB_TA_MEM_FLAG_RASTER |
330 PSB_TA_MEM_FLAG_HOSTA |
331 PSB_TA_MEM_FLAG_HOSTD |
332 PSB_TA_MEM_FLAG_INIT,
333 dev_priv->ta_mem->ta_memory->offset,
334 dev_priv->ta_mem->hw_data->offset,
335 dev_priv->ta_mem->hw_cookie);
336 if (!ret)
337 break;
338
339 psb_reset(dev_priv, 0);
340 (void)psb_xhw_mmu_reset(dev_priv);
341 }
342
343 psb_scheduler_restart(dev_priv);
344 spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
345 dev_priv->timer_available = 1;
346 spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
347 mutex_unlock(&dev_priv->reset_mutex);
348 }
349
350 void psb_watchdog_init(struct drm_psb_private *dev_priv)
351 {
352 struct timer_list *wt = &dev_priv->watchdog_timer;
353 unsigned long irq_flags;
354
355 dev_priv->watchdog_lock = SPIN_LOCK_UNLOCKED;
356 spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
357 init_timer(wt);
358 INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
359 INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
360 wt->data = (unsigned long)dev_priv;
361 wt->function = &psb_watchdog_func;
362 dev_priv->timer_available = 1;
363 spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
364 }
365
366 void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
367 {
368 unsigned long irq_flags;
369
370 spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
371 dev_priv->timer_available = 0;
372 spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
373 (void)del_timer_sync(&dev_priv->watchdog_timer);
374 }
Something went wrong with that request. Please try again.