drm/nouveau: ratelimit IRQ messages
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / gpu / drm / nouveau / nouveau_irq.c
blobb9e1ffed855786c5bba2e4106ccbfb123fb41c78
1 /*
2 * Copyright (C) 2006 Ben Skeggs.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
33 #include "drmP.h"
34 #include "drm.h"
35 #include "nouveau_drm.h"
36 #include "nouveau_drv.h"
37 #include "nouveau_reg.h"
38 #include "nouveau_ramht.h"
39 #include <linux/ratelimit.h>
41 /* needed for hotplug irq */
42 #include "nouveau_connector.h"
43 #include "nv50_display.h"
45 static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
47 static int nouveau_ratelimit(void)
49 return __ratelimit(&nouveau_ratelimit_state);
52 void
53 nouveau_irq_preinstall(struct drm_device *dev)
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
57 /* Master disable */
58 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
60 if (dev_priv->card_type >= NV_50) {
61 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
62 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
63 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
67 int
68 nouveau_irq_postinstall(struct drm_device *dev)
70 /* Master enable */
71 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
72 return 0;
75 void
76 nouveau_irq_uninstall(struct drm_device *dev)
78 /* Master disable */
79 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
82 static int
83 nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
85 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
86 struct nouveau_pgraph_object_method *grm;
87 struct nouveau_pgraph_object_class *grc;
89 grc = dev_priv->engine.graph.grclass;
90 while (grc->id) {
91 if (grc->id == class)
92 break;
93 grc++;
96 if (grc->id != class || !grc->methods)
97 return -ENOENT;
99 grm = grc->methods;
100 while (grm->id) {
101 if (grm->id == mthd)
102 return grm->exec(chan, class, mthd, data);
103 grm++;
106 return -ENOENT;
109 static bool
110 nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
112 struct drm_device *dev = chan->dev;
113 const int subc = (addr >> 13) & 0x7;
114 const int mthd = addr & 0x1ffc;
116 if (mthd == 0x0000) {
117 struct nouveau_gpuobj *gpuobj;
119 gpuobj = nouveau_ramht_find(chan, data);
120 if (!gpuobj)
121 return false;
123 if (gpuobj->engine != NVOBJ_ENGINE_SW)
124 return false;
126 chan->sw_subchannel[subc] = gpuobj->class;
127 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
128 NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
129 return true;
132 /* hw object */
133 if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
134 return false;
136 if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
137 return false;
139 return true;
142 static void
143 nouveau_fifo_irq_handler(struct drm_device *dev)
145 struct drm_nouveau_private *dev_priv = dev->dev_private;
146 struct nouveau_engine *engine = &dev_priv->engine;
147 uint32_t status, reassign;
148 int cnt = 0;
150 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
151 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
152 struct nouveau_channel *chan = NULL;
153 uint32_t chid, get;
155 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
157 chid = engine->fifo.channel_id(dev);
158 if (chid >= 0 && chid < engine->fifo.channels)
159 chan = dev_priv->fifos[chid];
160 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
162 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
163 uint32_t mthd, data;
164 int ptr;
166 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
167 * wrapping on my G80 chips, but CACHE1 isn't big
168 * enough for this much data.. Tests show that it
169 * wraps around to the start at GET=0x800.. No clue
170 * as to why..
172 ptr = (get & 0x7ff) >> 2;
174 if (dev_priv->card_type < NV_40) {
175 mthd = nv_rd32(dev,
176 NV04_PFIFO_CACHE1_METHOD(ptr));
177 data = nv_rd32(dev,
178 NV04_PFIFO_CACHE1_DATA(ptr));
179 } else {
180 mthd = nv_rd32(dev,
181 NV40_PFIFO_CACHE1_METHOD(ptr));
182 data = nv_rd32(dev,
183 NV40_PFIFO_CACHE1_DATA(ptr));
186 if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
187 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
188 "Mthd 0x%04x Data 0x%08x\n",
189 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
190 data);
193 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
194 nv_wr32(dev, NV03_PFIFO_INTR_0,
195 NV_PFIFO_INTR_CACHE_ERROR);
197 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
198 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
199 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
200 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
201 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
202 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
204 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
205 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
206 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
208 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
211 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
212 u32 get = nv_rd32(dev, 0x003244);
213 u32 put = nv_rd32(dev, 0x003240);
214 u32 push = nv_rd32(dev, 0x003220);
215 u32 state = nv_rd32(dev, 0x003228);
217 if (dev_priv->card_type == NV_50) {
218 u32 ho_get = nv_rd32(dev, 0x003328);
219 u32 ho_put = nv_rd32(dev, 0x003320);
220 u32 ib_get = nv_rd32(dev, 0x003334);
221 u32 ib_put = nv_rd32(dev, 0x003330);
223 if (nouveau_ratelimit())
224 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
225 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
226 "State 0x%08x Push 0x%08x\n",
227 chid, ho_get, get, ho_put, put,
228 ib_get, ib_put, state, push);
230 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
231 nv_wr32(dev, 0x003364, 0x00000000);
232 if (get != put || ho_get != ho_put) {
233 nv_wr32(dev, 0x003244, put);
234 nv_wr32(dev, 0x003328, ho_put);
235 } else
236 if (ib_get != ib_put) {
237 nv_wr32(dev, 0x003334, ib_put);
239 } else {
240 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
241 "Put 0x%08x State 0x%08x Push 0x%08x\n",
242 chid, get, put, state, push);
244 if (get != put)
245 nv_wr32(dev, 0x003244, put);
248 nv_wr32(dev, 0x003228, 0x00000000);
249 nv_wr32(dev, 0x003220, 0x00000001);
250 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
251 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
254 if (status & NV_PFIFO_INTR_SEMAPHORE) {
255 uint32_t sem;
257 status &= ~NV_PFIFO_INTR_SEMAPHORE;
258 nv_wr32(dev, NV03_PFIFO_INTR_0,
259 NV_PFIFO_INTR_SEMAPHORE);
261 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
262 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
264 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
265 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
268 if (dev_priv->card_type == NV_50) {
269 if (status & 0x00000010) {
270 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
271 status &= ~0x00000010;
272 nv_wr32(dev, 0x002100, 0x00000010);
276 if (status) {
277 if (nouveau_ratelimit())
278 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
279 status, chid);
280 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
281 status = 0;
284 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
287 if (status) {
288 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
289 nv_wr32(dev, 0x2140, 0);
290 nv_wr32(dev, 0x140, 0);
293 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
296 struct nouveau_bitfield_names {
297 uint32_t mask;
298 const char *name;
301 static struct nouveau_bitfield_names nstatus_names[] =
303 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
304 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
305 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
306 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
309 static struct nouveau_bitfield_names nstatus_names_nv10[] =
311 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
312 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
313 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
314 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
317 static struct nouveau_bitfield_names nsource_names[] =
319 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
320 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
321 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
322 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
323 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
324 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
325 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
326 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
327 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
328 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
329 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
330 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
331 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
332 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
333 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
334 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
335 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
336 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
337 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
340 static void
341 nouveau_print_bitfield_names_(uint32_t value,
342 const struct nouveau_bitfield_names *namelist,
343 const int namelist_len)
346 * Caller must have already printed the KERN_* log level for us.
347 * Also the caller is responsible for adding the newline.
349 int i;
350 for (i = 0; i < namelist_len; ++i) {
351 uint32_t mask = namelist[i].mask;
352 if (value & mask) {
353 printk(" %s", namelist[i].name);
354 value &= ~mask;
357 if (value)
358 printk(" (unknown bits 0x%08x)", value);
360 #define nouveau_print_bitfield_names(val, namelist) \
361 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
363 struct nouveau_enum_names {
364 uint32_t value;
365 const char *name;
368 static void
369 nouveau_print_enum_names_(uint32_t value,
370 const struct nouveau_enum_names *namelist,
371 const int namelist_len)
374 * Caller must have already printed the KERN_* log level for us.
375 * Also the caller is responsible for adding the newline.
377 int i;
378 for (i = 0; i < namelist_len; ++i) {
379 if (value == namelist[i].value) {
380 printk("%s", namelist[i].name);
381 return;
384 printk("unknown value 0x%08x", value);
386 #define nouveau_print_enum_names(val, namelist) \
387 nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
389 static int
390 nouveau_graph_chid_from_grctx(struct drm_device *dev)
392 struct drm_nouveau_private *dev_priv = dev->dev_private;
393 uint32_t inst;
394 int i;
396 if (dev_priv->card_type < NV_40)
397 return dev_priv->engine.fifo.channels;
398 else
399 if (dev_priv->card_type < NV_50) {
400 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
402 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
403 struct nouveau_channel *chan = dev_priv->fifos[i];
405 if (!chan || !chan->ramin_grctx)
406 continue;
408 if (inst == chan->ramin_grctx->pinst)
409 break;
411 } else {
412 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
414 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
415 struct nouveau_channel *chan = dev_priv->fifos[i];
417 if (!chan || !chan->ramin)
418 continue;
420 if (inst == chan->ramin->vinst)
421 break;
426 return i;
429 static int
430 nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
432 struct drm_nouveau_private *dev_priv = dev->dev_private;
433 struct nouveau_engine *engine = &dev_priv->engine;
434 int channel;
436 if (dev_priv->card_type < NV_10)
437 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
438 else
439 if (dev_priv->card_type < NV_40)
440 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
441 else
442 channel = nouveau_graph_chid_from_grctx(dev);
444 if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
445 NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
446 return -EINVAL;
449 *channel_ret = channel;
450 return 0;
453 struct nouveau_pgraph_trap {
454 int channel;
455 int class;
456 int subc, mthd, size;
457 uint32_t data, data2;
458 uint32_t nsource, nstatus;
461 static void
462 nouveau_graph_trap_info(struct drm_device *dev,
463 struct nouveau_pgraph_trap *trap)
465 struct drm_nouveau_private *dev_priv = dev->dev_private;
466 uint32_t address;
468 trap->nsource = trap->nstatus = 0;
469 if (dev_priv->card_type < NV_50) {
470 trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
471 trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
474 if (nouveau_graph_trapped_channel(dev, &trap->channel))
475 trap->channel = -1;
476 address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
478 trap->mthd = address & 0x1FFC;
479 trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
480 if (dev_priv->card_type < NV_10) {
481 trap->subc = (address >> 13) & 0x7;
482 } else {
483 trap->subc = (address >> 16) & 0x7;
484 trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
487 if (dev_priv->card_type < NV_10)
488 trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
489 else if (dev_priv->card_type < NV_40)
490 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
491 else if (dev_priv->card_type < NV_50)
492 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
493 else
494 trap->class = nv_rd32(dev, 0x400814);
497 static void
498 nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
499 struct nouveau_pgraph_trap *trap)
501 struct drm_nouveau_private *dev_priv = dev->dev_private;
502 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
504 if (dev_priv->card_type < NV_50) {
505 NV_INFO(dev, "%s - nSource:", id);
506 nouveau_print_bitfield_names(nsource, nsource_names);
507 printk(", nStatus:");
508 if (dev_priv->card_type < NV_10)
509 nouveau_print_bitfield_names(nstatus, nstatus_names);
510 else
511 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
512 printk("\n");
515 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
516 "Data 0x%08x:0x%08x\n",
517 id, trap->channel, trap->subc,
518 trap->class, trap->mthd,
519 trap->data2, trap->data);
522 static int
523 nouveau_pgraph_intr_swmthd(struct drm_device *dev,
524 struct nouveau_pgraph_trap *trap)
526 struct drm_nouveau_private *dev_priv = dev->dev_private;
528 if (trap->channel < 0 ||
529 trap->channel >= dev_priv->engine.fifo.channels ||
530 !dev_priv->fifos[trap->channel])
531 return -ENODEV;
533 return nouveau_call_method(dev_priv->fifos[trap->channel],
534 trap->class, trap->mthd, trap->data);
537 static inline void
538 nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
540 struct nouveau_pgraph_trap trap;
541 int unhandled = 0;
543 nouveau_graph_trap_info(dev, &trap);
545 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
546 if (nouveau_pgraph_intr_swmthd(dev, &trap))
547 unhandled = 1;
548 } else {
549 unhandled = 1;
552 if (unhandled)
553 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
557 static inline void
558 nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
560 struct nouveau_pgraph_trap trap;
561 int unhandled = 0;
563 nouveau_graph_trap_info(dev, &trap);
564 trap.nsource = nsource;
566 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
567 if (nouveau_pgraph_intr_swmthd(dev, &trap))
568 unhandled = 1;
569 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
570 uint32_t v = nv_rd32(dev, 0x402000);
571 nv_wr32(dev, 0x402000, v);
573 /* dump the error anyway for now: it's useful for
574 Gallium development */
575 unhandled = 1;
576 } else {
577 unhandled = 1;
580 if (unhandled && nouveau_ratelimit())
581 nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
584 static inline void
585 nouveau_pgraph_intr_context_switch(struct drm_device *dev)
587 struct drm_nouveau_private *dev_priv = dev->dev_private;
588 struct nouveau_engine *engine = &dev_priv->engine;
589 uint32_t chid;
591 chid = engine->fifo.channel_id(dev);
592 NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
594 switch (dev_priv->card_type) {
595 case NV_04:
596 nv04_graph_context_switch(dev);
597 break;
598 case NV_10:
599 nv10_graph_context_switch(dev);
600 break;
601 default:
602 NV_ERROR(dev, "Context switch not implemented\n");
603 break;
607 static void
608 nouveau_pgraph_irq_handler(struct drm_device *dev)
610 uint32_t status;
612 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
613 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
615 if (status & NV_PGRAPH_INTR_NOTIFY) {
616 nouveau_pgraph_intr_notify(dev, nsource);
618 status &= ~NV_PGRAPH_INTR_NOTIFY;
619 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
622 if (status & NV_PGRAPH_INTR_ERROR) {
623 nouveau_pgraph_intr_error(dev, nsource);
625 status &= ~NV_PGRAPH_INTR_ERROR;
626 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
629 if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
630 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
631 nv_wr32(dev, NV03_PGRAPH_INTR,
632 NV_PGRAPH_INTR_CONTEXT_SWITCH);
634 nouveau_pgraph_intr_context_switch(dev);
637 if (status) {
638 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
639 nv_wr32(dev, NV03_PGRAPH_INTR, status);
642 if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
643 nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
646 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
649 static struct nouveau_enum_names nv50_mp_exec_error_names[] =
651 { 3, "STACK_UNDERFLOW" },
652 { 4, "QUADON_ACTIVE" },
653 { 8, "TIMEOUT" },
654 { 0x10, "INVALID_OPCODE" },
655 { 0x40, "BREAKPOINT" },
658 static void
659 nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
661 struct drm_nouveau_private *dev_priv = dev->dev_private;
662 uint32_t units = nv_rd32(dev, 0x1540);
663 uint32_t addr, mp10, status, pc, oplow, ophigh;
664 int i;
665 int mps = 0;
666 for (i = 0; i < 4; i++) {
667 if (!(units & 1 << (i+24)))
668 continue;
669 if (dev_priv->chipset < 0xa0)
670 addr = 0x408200 + (tpid << 12) + (i << 7);
671 else
672 addr = 0x408100 + (tpid << 11) + (i << 7);
673 mp10 = nv_rd32(dev, addr + 0x10);
674 status = nv_rd32(dev, addr + 0x14);
675 if (!status)
676 continue;
677 if (display) {
678 nv_rd32(dev, addr + 0x20);
679 pc = nv_rd32(dev, addr + 0x24);
680 oplow = nv_rd32(dev, addr + 0x70);
681 ophigh= nv_rd32(dev, addr + 0x74);
682 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
683 "TP %d MP %d: ", tpid, i);
684 nouveau_print_enum_names(status,
685 nv50_mp_exec_error_names);
686 printk(" at %06x warp %d, opcode %08x %08x\n",
687 pc&0xffffff, pc >> 24,
688 oplow, ophigh);
690 nv_wr32(dev, addr + 0x10, mp10);
691 nv_wr32(dev, addr + 0x14, 0);
692 mps++;
694 if (!mps && display)
695 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
696 "No MPs claiming errors?\n", tpid);
699 static void
700 nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
701 uint32_t ustatus_new, int display, const char *name)
703 struct drm_nouveau_private *dev_priv = dev->dev_private;
704 int tps = 0;
705 uint32_t units = nv_rd32(dev, 0x1540);
706 int i, r;
707 uint32_t ustatus_addr, ustatus;
708 for (i = 0; i < 16; i++) {
709 if (!(units & (1 << i)))
710 continue;
711 if (dev_priv->chipset < 0xa0)
712 ustatus_addr = ustatus_old + (i << 12);
713 else
714 ustatus_addr = ustatus_new + (i << 11);
715 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
716 if (!ustatus)
717 continue;
718 tps++;
719 switch (type) {
720 case 6: /* texture error... unknown for now */
721 nv50_fb_vm_trap(dev, display, name);
722 if (display) {
723 NV_ERROR(dev, "magic set %d:\n", i);
724 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
725 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
726 nv_rd32(dev, r));
728 break;
729 case 7: /* MP error */
730 if (ustatus & 0x00010000) {
731 nv50_pgraph_mp_trap(dev, i, display);
732 ustatus &= ~0x00010000;
734 break;
735 case 8: /* TPDMA error */
737 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
738 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
739 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
740 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
741 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
742 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
743 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
744 nv50_fb_vm_trap(dev, display, name);
745 /* 2d engine destination */
746 if (ustatus & 0x00000010) {
747 if (display) {
748 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
749 i, e14, e10);
750 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
751 i, e0c, e18, e1c, e20, e24);
753 ustatus &= ~0x00000010;
755 /* Render target */
756 if (ustatus & 0x00000040) {
757 if (display) {
758 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
759 i, e14, e10);
760 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
761 i, e0c, e18, e1c, e20, e24);
763 ustatus &= ~0x00000040;
765 /* CUDA memory: l[], g[] or stack. */
766 if (ustatus & 0x00000080) {
767 if (display) {
768 if (e18 & 0x80000000) {
769 /* g[] read fault? */
770 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
771 i, e14, e10 | ((e18 >> 24) & 0x1f));
772 e18 &= ~0x1f000000;
773 } else if (e18 & 0xc) {
774 /* g[] write fault? */
775 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
776 i, e14, e10 | ((e18 >> 7) & 0x1f));
777 e18 &= ~0x00000f80;
778 } else {
779 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
780 i, e14, e10);
782 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
783 i, e0c, e18, e1c, e20, e24);
785 ustatus &= ~0x00000080;
788 break;
790 if (ustatus) {
791 if (display)
792 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
794 nv_wr32(dev, ustatus_addr, 0xc0000000);
797 if (!tps && display)
798 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
801 static void
802 nv50_pgraph_trap_handler(struct drm_device *dev)
804 struct nouveau_pgraph_trap trap;
805 uint32_t status = nv_rd32(dev, 0x400108);
806 uint32_t ustatus;
807 int display = nouveau_ratelimit();
810 if (!status && display) {
811 nouveau_graph_trap_info(dev, &trap);
812 nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
813 NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
816 /* DISPATCH: Relays commands to other units and handles NOTIFY,
817 * COND, QUERY. If you get a trap from it, the command is still stuck
818 * in DISPATCH and you need to do something about it. */
819 if (status & 0x001) {
820 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
821 if (!ustatus && display) {
822 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
825 /* Known to be triggered by screwed up NOTIFY and COND... */
826 if (ustatus & 0x00000001) {
827 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
828 nv_wr32(dev, 0x400500, 0);
829 if (nv_rd32(dev, 0x400808) & 0x80000000) {
830 if (display) {
831 if (nouveau_graph_trapped_channel(dev, &trap.channel))
832 trap.channel = -1;
833 trap.class = nv_rd32(dev, 0x400814);
834 trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
835 trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
836 trap.data = nv_rd32(dev, 0x40080c);
837 trap.data2 = nv_rd32(dev, 0x400810);
838 nouveau_graph_dump_trap_info(dev,
839 "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
840 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
841 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
843 nv_wr32(dev, 0x400808, 0);
844 } else if (display) {
845 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
847 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
848 nv_wr32(dev, 0x400848, 0);
849 ustatus &= ~0x00000001;
851 if (ustatus & 0x00000002) {
852 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
853 nv_wr32(dev, 0x400500, 0);
854 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
855 if (display) {
856 if (nouveau_graph_trapped_channel(dev, &trap.channel))
857 trap.channel = -1;
858 trap.class = nv_rd32(dev, 0x400814);
859 trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
860 trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
861 trap.data = nv_rd32(dev, 0x40085c);
862 trap.data2 = 0;
863 nouveau_graph_dump_trap_info(dev,
864 "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
865 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
867 nv_wr32(dev, 0x40084c, 0);
868 } else if (display) {
869 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
871 ustatus &= ~0x00000002;
873 if (ustatus && display)
874 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
875 nv_wr32(dev, 0x400804, 0xc0000000);
876 nv_wr32(dev, 0x400108, 0x001);
877 status &= ~0x001;
880 /* TRAPs other than dispatch use the "normal" trap regs. */
881 if (status && display) {
882 nouveau_graph_trap_info(dev, &trap);
883 nouveau_graph_dump_trap_info(dev,
884 "PGRAPH_TRAP", &trap);
887 /* M2MF: Memory to memory copy engine. */
888 if (status & 0x002) {
889 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
890 if (!ustatus && display) {
891 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
893 if (ustatus & 0x00000001) {
894 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
895 ustatus &= ~0x00000001;
897 if (ustatus & 0x00000002) {
898 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
899 ustatus &= ~0x00000002;
901 if (ustatus & 0x00000004) {
902 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
903 ustatus &= ~0x00000004;
905 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
906 nv_rd32(dev, 0x406804),
907 nv_rd32(dev, 0x406808),
908 nv_rd32(dev, 0x40680c),
909 nv_rd32(dev, 0x406810));
910 if (ustatus && display)
911 NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
912 /* No sane way found yet -- just reset the bugger. */
913 nv_wr32(dev, 0x400040, 2);
914 nv_wr32(dev, 0x400040, 0);
915 nv_wr32(dev, 0x406800, 0xc0000000);
916 nv_wr32(dev, 0x400108, 0x002);
917 status &= ~0x002;
920 /* VFETCH: Fetches data from vertex buffers. */
921 if (status & 0x004) {
922 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
923 if (!ustatus && display) {
924 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
926 if (ustatus & 0x00000001) {
927 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
928 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
929 nv_rd32(dev, 0x400c00),
930 nv_rd32(dev, 0x400c08),
931 nv_rd32(dev, 0x400c0c),
932 nv_rd32(dev, 0x400c10));
933 ustatus &= ~0x00000001;
935 if (ustatus && display)
936 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
937 nv_wr32(dev, 0x400c04, 0xc0000000);
938 nv_wr32(dev, 0x400108, 0x004);
939 status &= ~0x004;
942 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
943 if (status & 0x008) {
944 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
945 if (!ustatus && display) {
946 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
948 if (ustatus & 0x00000001) {
949 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
950 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
951 nv_rd32(dev, 0x401804),
952 nv_rd32(dev, 0x401808),
953 nv_rd32(dev, 0x40180c),
954 nv_rd32(dev, 0x401810));
955 ustatus &= ~0x00000001;
957 if (ustatus && display)
958 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
959 /* No sane way found yet -- just reset the bugger. */
960 nv_wr32(dev, 0x400040, 0x80);
961 nv_wr32(dev, 0x400040, 0);
962 nv_wr32(dev, 0x401800, 0xc0000000);
963 nv_wr32(dev, 0x400108, 0x008);
964 status &= ~0x008;
967 /* CCACHE: Handles code and c[] caches and fills them. */
968 if (status & 0x010) {
969 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
970 if (!ustatus && display) {
971 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
973 if (ustatus & 0x00000001) {
974 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
975 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
976 nv_rd32(dev, 0x405800),
977 nv_rd32(dev, 0x405804),
978 nv_rd32(dev, 0x405808),
979 nv_rd32(dev, 0x40580c),
980 nv_rd32(dev, 0x405810),
981 nv_rd32(dev, 0x405814),
982 nv_rd32(dev, 0x40581c));
983 ustatus &= ~0x00000001;
985 if (ustatus && display)
986 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
987 nv_wr32(dev, 0x405018, 0xc0000000);
988 nv_wr32(dev, 0x400108, 0x010);
989 status &= ~0x010;
992 /* Unknown, not seen yet... 0x402000 is the only trap status reg
993 * remaining, so try to handle it anyway. Perhaps related to that
994 * unknown DMA slot on tesla? */
995 if (status & 0x20) {
996 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
997 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
998 if (display)
999 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
1000 nv_wr32(dev, 0x402000, 0xc0000000);
1001 /* no status modifiction on purpose */
1004 /* TEXTURE: CUDA texturing units */
1005 if (status & 0x040) {
1006 nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
1007 "PGRAPH_TRAP_TEXTURE");
1008 nv_wr32(dev, 0x400108, 0x040);
1009 status &= ~0x040;
1012 /* MP: CUDA execution engines. */
1013 if (status & 0x080) {
1014 nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
1015 "PGRAPH_TRAP_MP");
1016 nv_wr32(dev, 0x400108, 0x080);
1017 status &= ~0x080;
1020 /* TPDMA: Handles TP-initiated uncached memory accesses:
1021 * l[], g[], stack, 2d surfaces, render targets. */
1022 if (status & 0x100) {
1023 nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
1024 "PGRAPH_TRAP_TPDMA");
1025 nv_wr32(dev, 0x400108, 0x100);
1026 status &= ~0x100;
1029 if (status) {
1030 if (display)
1031 NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
1032 status);
1033 nv_wr32(dev, 0x400108, status);
1037 /* There must be a *lot* of these. Will take some time to gather them up. */
1038 static struct nouveau_enum_names nv50_data_error_names[] =
1040 { 4, "INVALID_VALUE" },
1041 { 5, "INVALID_ENUM" },
1042 { 8, "INVALID_OBJECT" },
1043 { 0xc, "INVALID_BITFIELD" },
1044 { 0x28, "MP_NO_REG_SPACE" },
1045 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
1048 static void
1049 nv50_pgraph_irq_handler(struct drm_device *dev)
1051 struct nouveau_pgraph_trap trap;
1052 int unhandled = 0;
1053 uint32_t status;
1055 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1056 /* NOTIFY: You've set a NOTIFY an a command and it's done. */
1057 if (status & 0x00000001) {
1058 nouveau_graph_trap_info(dev, &trap);
1059 if (nouveau_ratelimit())
1060 nouveau_graph_dump_trap_info(dev,
1061 "PGRAPH_NOTIFY", &trap);
1062 status &= ~0x00000001;
1063 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
1066 /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
1067 * when you write 0x200 to 0x50c0 method 0x31c. */
1068 if (status & 0x00000002) {
1069 nouveau_graph_trap_info(dev, &trap);
1070 if (nouveau_ratelimit())
1071 nouveau_graph_dump_trap_info(dev,
1072 "PGRAPH_COMPUTE_QUERY", &trap);
1073 status &= ~0x00000002;
1074 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
1077 /* Unknown, never seen: 0x4 */
1079 /* ILLEGAL_MTHD: You used a wrong method for this class. */
1080 if (status & 0x00000010) {
1081 nouveau_graph_trap_info(dev, &trap);
1082 if (nouveau_pgraph_intr_swmthd(dev, &trap))
1083 unhandled = 1;
1084 if (unhandled && nouveau_ratelimit())
1085 nouveau_graph_dump_trap_info(dev,
1086 "PGRAPH_ILLEGAL_MTHD", &trap);
1087 status &= ~0x00000010;
1088 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
1091 /* ILLEGAL_CLASS: You used a wrong class. */
1092 if (status & 0x00000020) {
1093 nouveau_graph_trap_info(dev, &trap);
1094 if (nouveau_ratelimit())
1095 nouveau_graph_dump_trap_info(dev,
1096 "PGRAPH_ILLEGAL_CLASS", &trap);
1097 status &= ~0x00000020;
1098 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
1101 /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
1102 if (status & 0x00000040) {
1103 nouveau_graph_trap_info(dev, &trap);
1104 if (nouveau_ratelimit())
1105 nouveau_graph_dump_trap_info(dev,
1106 "PGRAPH_DOUBLE_NOTIFY", &trap);
1107 status &= ~0x00000040;
1108 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
1111 /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
1112 if (status & 0x00001000) {
1113 nv_wr32(dev, 0x400500, 0x00000000);
1114 nv_wr32(dev, NV03_PGRAPH_INTR,
1115 NV_PGRAPH_INTR_CONTEXT_SWITCH);
1116 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
1117 NV40_PGRAPH_INTR_EN) &
1118 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
1119 nv_wr32(dev, 0x400500, 0x00010001);
1121 nv50_graph_context_switch(dev);
1123 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1126 /* BUFFER_NOTIFY: Your m2mf transfer finished */
1127 if (status & 0x00010000) {
1128 nouveau_graph_trap_info(dev, &trap);
1129 if (nouveau_ratelimit())
1130 nouveau_graph_dump_trap_info(dev,
1131 "PGRAPH_BUFFER_NOTIFY", &trap);
1132 status &= ~0x00010000;
1133 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
1136 /* DATA_ERROR: Invalid value for this method, or invalid
1137 * state in current PGRAPH context for this operation */
1138 if (status & 0x00100000) {
1139 nouveau_graph_trap_info(dev, &trap);
1140 if (nouveau_ratelimit()) {
1141 nouveau_graph_dump_trap_info(dev,
1142 "PGRAPH_DATA_ERROR", &trap);
1143 NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
1144 nouveau_print_enum_names(nv_rd32(dev, 0x400110),
1145 nv50_data_error_names);
1146 printk("\n");
1148 status &= ~0x00100000;
1149 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
1152 /* TRAP: Something bad happened in the middle of command
1153 * execution. Has a billion types, subtypes, and even
1154 * subsubtypes. */
1155 if (status & 0x00200000) {
1156 nv50_pgraph_trap_handler(dev);
1157 status &= ~0x00200000;
1158 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
1161 /* Unknown, never seen: 0x00400000 */
1163 /* SINGLE_STEP: Happens on every method if you turned on
1164 * single stepping in 40008c */
1165 if (status & 0x01000000) {
1166 nouveau_graph_trap_info(dev, &trap);
1167 if (nouveau_ratelimit())
1168 nouveau_graph_dump_trap_info(dev,
1169 "PGRAPH_SINGLE_STEP", &trap);
1170 status &= ~0x01000000;
1171 nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
1174 /* 0x02000000 happens when you pause a ctxprog...
1175 * but the only way this can happen that I know is by
1176 * poking the relevant MMIO register, and we don't
1177 * do that. */
1179 if (status) {
1180 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
1181 status);
1182 nv_wr32(dev, NV03_PGRAPH_INTR, status);
1186 const int isb = (1 << 16) | (1 << 0);
1188 if ((nv_rd32(dev, 0x400500) & isb) != isb)
1189 nv_wr32(dev, 0x400500,
1190 nv_rd32(dev, 0x400500) | isb);
1194 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
1195 if (nv_rd32(dev, 0x400824) & (1 << 31))
1196 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1199 static void
1200 nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
1202 if (crtc & 1)
1203 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
1205 if (crtc & 2)
1206 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
1209 irqreturn_t
1210 nouveau_irq_handler(DRM_IRQ_ARGS)
1212 struct drm_device *dev = (struct drm_device *)arg;
1213 struct drm_nouveau_private *dev_priv = dev->dev_private;
1214 uint32_t status;
1215 unsigned long flags;
1217 status = nv_rd32(dev, NV03_PMC_INTR_0);
1218 if (!status)
1219 return IRQ_NONE;
1221 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1223 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1224 nouveau_fifo_irq_handler(dev);
1225 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
1228 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
1229 if (dev_priv->card_type >= NV_50)
1230 nv50_pgraph_irq_handler(dev);
1231 else
1232 nouveau_pgraph_irq_handler(dev);
1234 status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
1237 if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
1238 nouveau_crtc_irq_handler(dev, (status>>24)&3);
1239 status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
1242 if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1243 NV_PMC_INTR_0_NV50_I2C_PENDING)) {
1244 nv50_display_irq_handler(dev);
1245 status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1246 NV_PMC_INTR_0_NV50_I2C_PENDING);
1249 if (status)
1250 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
1252 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1254 return IRQ_HANDLED;