MFC r1.2. Fix CVE-2008-3831. Affects the Intel G33 series and newer only.
[dragonfly.git] / sys / dev / drm / i915_dma.c
blobb3eabb65da36a1e04188324a6ea1a19964267f92
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * $DragonFly: src/sys/dev/drm/i915_dma.c,v 1.1.2.1 2008/10/27 03:46:17 hasso Exp $
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drm.h"
33 #include "i915_drv.h"
35 /* Really want an OS-independent resettable timer. Would like to have
36 * this loop run for (eg) 3 sec, but have the timer reset every time
37 * the head pointer changes, so that EBUSY only happens if the ring
38 * actually stalls for (eg) 3 seconds.
40 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
42 drm_i915_private_t *dev_priv = dev->dev_private;
43 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
44 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
45 int i;
47 for (i = 0; i < 10000; i++) {
48 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
49 ring->space = ring->head - (ring->tail + 8);
50 if (ring->space < 0)
51 ring->space += ring->Size;
52 if (ring->space >= n)
53 return 0;
55 if (ring->head != last_head)
56 i = 0;
58 last_head = ring->head;
59 DRM_UDELAY(1);
62 return -EBUSY;
65 void i915_kernel_lost_context(struct drm_device * dev)
67 drm_i915_private_t *dev_priv = dev->dev_private;
68 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
70 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
71 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
72 ring->space = ring->head - (ring->tail + 8);
73 if (ring->space < 0)
74 ring->space += ring->Size;
77 static int i915_dma_cleanup(struct drm_device * dev)
79 drm_i915_private_t *dev_priv = dev->dev_private;
80 /* Make sure interrupts are disabled here because the uninstall ioctl
81 * may not have been called from userspace and after dev_private
82 * is freed, it's too late.
84 if (dev->irq)
85 drm_irq_uninstall(dev);
87 if (dev_priv->ring.virtual_start) {
88 drm_core_ioremapfree(&dev_priv->ring.map, dev);
89 dev_priv->ring.virtual_start = 0;
90 dev_priv->ring.map.handle = 0;
91 dev_priv->ring.map.size = 0;
94 if (dev_priv->status_page_dmah) {
95 drm_pci_free(dev, dev_priv->status_page_dmah);
96 dev_priv->status_page_dmah = NULL;
97 /* Need to rewrite hardware status page */
98 I915_WRITE(0x02080, 0x1ffff000);
101 if (dev_priv->status_gfx_addr) {
102 dev_priv->status_gfx_addr = 0;
103 drm_core_ioremapfree(&dev_priv->hws_map, dev);
104 I915_WRITE(0x02080, 0x1ffff000);
107 return 0;
110 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
112 drm_i915_private_t *dev_priv = dev->dev_private;
114 dev_priv->sarea = drm_getsarea(dev);
115 if (!dev_priv->sarea) {
116 DRM_ERROR("can not find sarea!\n");
117 i915_dma_cleanup(dev);
118 return -EINVAL;
121 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
122 if (!dev_priv->mmio_map) {
123 i915_dma_cleanup(dev);
124 DRM_ERROR("can not find mmio map!\n");
125 return -EINVAL;
128 #ifdef I915_HAVE_BUFFER
129 dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
130 #endif
132 dev_priv->sarea_priv = (drm_i915_sarea_t *)
133 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
135 dev_priv->ring.Start = init->ring_start;
136 dev_priv->ring.End = init->ring_end;
137 dev_priv->ring.Size = init->ring_size;
138 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
140 dev_priv->ring.map.offset = init->ring_start;
141 dev_priv->ring.map.size = init->ring_size;
142 dev_priv->ring.map.type = 0;
143 dev_priv->ring.map.flags = 0;
144 dev_priv->ring.map.mtrr = 0;
146 drm_core_ioremap(&dev_priv->ring.map, dev);
148 if (dev_priv->ring.map.handle == NULL) {
149 i915_dma_cleanup(dev);
150 DRM_ERROR("can not ioremap virtual address for"
151 " ring buffer\n");
152 return -ENOMEM;
155 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
157 dev_priv->cpp = init->cpp;
158 dev_priv->sarea_priv->pf_current_page = 0;
160 /* We are using separate values as placeholders for mechanisms for
161 * private backbuffer/depthbuffer usage.
163 dev_priv->use_mi_batchbuffer_start = 0;
164 if (IS_I965G(dev)) /* 965 doesn't support older method */
165 dev_priv->use_mi_batchbuffer_start = 1;
167 /* Allow hardware batchbuffers unless told otherwise.
169 dev_priv->allow_batchbuffer = 1;
171 /* Enable vblank on pipe A for older X servers
173 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
175 /* Program Hardware Status Page */
176 if (!IS_G33(dev)) {
177 dev_priv->status_page_dmah =
178 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
180 if (!dev_priv->status_page_dmah) {
181 i915_dma_cleanup(dev);
182 DRM_ERROR("Can not allocate hardware status page\n");
183 return -ENOMEM;
185 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
186 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
188 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
190 I915_WRITE(0x02080, dev_priv->dma_status_page);
192 DRM_DEBUG("Enabled hardware status page\n");
193 #ifdef I915_HAVE_BUFFER
194 mutex_init(&dev_priv->cmdbuf_mutex);
195 #endif
196 return 0;
199 static int i915_dma_resume(struct drm_device * dev)
201 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
203 DRM_DEBUG("\n");
205 if (!dev_priv->sarea) {
206 DRM_ERROR("can not find sarea!\n");
207 return -EINVAL;
210 if (!dev_priv->mmio_map) {
211 DRM_ERROR("can not find mmio map!\n");
212 return -EINVAL;
215 if (dev_priv->ring.map.handle == NULL) {
216 DRM_ERROR("can not ioremap virtual address for"
217 " ring buffer\n");
218 return -ENOMEM;
221 /* Program Hardware Status Page */
222 if (!dev_priv->hw_status_page) {
223 DRM_ERROR("Can not find hardware status page\n");
224 return -EINVAL;
226 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
228 if (dev_priv->status_gfx_addr != 0)
229 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
230 else
231 I915_WRITE(0x02080, dev_priv->dma_status_page);
232 DRM_DEBUG("Enabled hardware status page\n");
234 return 0;
237 static int i915_dma_init(struct drm_device *dev, void *data,
238 struct drm_file *file_priv)
240 drm_i915_init_t *init = data;
241 int retcode = 0;
243 switch (init->func) {
244 case I915_INIT_DMA:
245 retcode = i915_initialize(dev, init);
246 break;
247 case I915_CLEANUP_DMA:
248 retcode = i915_dma_cleanup(dev);
249 break;
250 case I915_RESUME_DMA:
251 retcode = i915_dma_resume(dev);
252 break;
253 default:
254 retcode = -EINVAL;
255 break;
258 return retcode;
261 /* Implement basically the same security restrictions as hardware does
262 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
264 * Most of the calculations below involve calculating the size of a
265 * particular instruction. It's important to get the size right as
266 * that tells us where the next instruction to check is. Any illegal
267 * instruction detected will be given a size of zero, which is a
268 * signal to abort the rest of the buffer.
270 static int do_validate_cmd(int cmd)
272 switch (((cmd >> 29) & 0x7)) {
273 case 0x0:
274 switch ((cmd >> 23) & 0x3f) {
275 case 0x0:
276 return 1; /* MI_NOOP */
277 case 0x4:
278 return 1; /* MI_FLUSH */
279 default:
280 return 0; /* disallow everything else */
282 break;
283 case 0x1:
284 return 0; /* reserved */
285 case 0x2:
286 return (cmd & 0xff) + 2; /* 2d commands */
287 case 0x3:
288 if (((cmd >> 24) & 0x1f) <= 0x18)
289 return 1;
291 switch ((cmd >> 24) & 0x1f) {
292 case 0x1c:
293 return 1;
294 case 0x1d:
295 switch ((cmd >> 16) & 0xff) {
296 case 0x3:
297 return (cmd & 0x1f) + 2;
298 case 0x4:
299 return (cmd & 0xf) + 2;
300 default:
301 return (cmd & 0xffff) + 2;
303 case 0x1e:
304 if (cmd & (1 << 23))
305 return (cmd & 0xffff) + 1;
306 else
307 return 1;
308 case 0x1f:
309 if ((cmd & (1 << 23)) == 0) /* inline vertices */
310 return (cmd & 0x1ffff) + 2;
311 else if (cmd & (1 << 17)) /* indirect random */
312 if ((cmd & 0xffff) == 0)
313 return 0; /* unknown length, too hard */
314 else
315 return (((cmd & 0xffff) + 1) / 2) + 1;
316 else
317 return 2; /* indirect sequential */
318 default:
319 return 0;
321 default:
322 return 0;
325 return 0;
328 static int validate_cmd(int cmd)
330 int ret = do_validate_cmd(cmd);
332 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
334 return ret;
337 static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
338 int dwords)
340 drm_i915_private_t *dev_priv = dev->dev_private;
341 int i;
342 RING_LOCALS;
344 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
345 return -EINVAL;
347 BEGIN_LP_RING((dwords+1)&~1);
349 for (i = 0; i < dwords;) {
350 int cmd, sz;
352 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
353 return -EINVAL;
355 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
356 return -EINVAL;
358 OUT_RING(cmd);
360 while (++i, --sz) {
361 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
362 sizeof(cmd))) {
363 return -EINVAL;
365 OUT_RING(cmd);
369 if (dwords & 1)
370 OUT_RING(0);
372 ADVANCE_LP_RING();
374 return 0;
377 static int i915_emit_box(struct drm_device * dev,
378 struct drm_clip_rect __user * boxes,
379 int i, int DR1, int DR4)
381 drm_i915_private_t *dev_priv = dev->dev_private;
382 struct drm_clip_rect box;
383 RING_LOCALS;
385 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
386 return -EFAULT;
389 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
390 DRM_ERROR("Bad box %d,%d..%d,%d\n",
391 box.x1, box.y1, box.x2, box.y2);
392 return -EINVAL;
395 if (IS_I965G(dev)) {
396 BEGIN_LP_RING(4);
397 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
398 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
399 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
400 OUT_RING(DR4);
401 ADVANCE_LP_RING();
402 } else {
403 BEGIN_LP_RING(6);
404 OUT_RING(GFX_OP_DRAWRECT_INFO);
405 OUT_RING(DR1);
406 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
407 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
408 OUT_RING(DR4);
409 OUT_RING(0);
410 ADVANCE_LP_RING();
413 return 0;
416 /* XXX: Emitting the counter should really be moved to part of the IRQ
417 * emit. For now, do it in both places:
420 void i915_emit_breadcrumb(struct drm_device *dev)
422 drm_i915_private_t *dev_priv = dev->dev_private;
423 RING_LOCALS;
425 if (++dev_priv->counter > BREADCRUMB_MASK) {
426 dev_priv->counter = 1;
427 DRM_DEBUG("Breadcrumb counter wrapped around\n");
430 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
432 BEGIN_LP_RING(4);
433 OUT_RING(CMD_STORE_DWORD_IDX);
434 OUT_RING(20);
435 OUT_RING(dev_priv->counter);
436 OUT_RING(0);
437 ADVANCE_LP_RING();
441 int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
443 drm_i915_private_t *dev_priv = dev->dev_private;
444 uint32_t flush_cmd = CMD_MI_FLUSH;
445 RING_LOCALS;
447 flush_cmd |= flush;
449 i915_kernel_lost_context(dev);
451 BEGIN_LP_RING(4);
452 OUT_RING(flush_cmd);
453 OUT_RING(0);
454 OUT_RING(0);
455 OUT_RING(0);
456 ADVANCE_LP_RING();
458 return 0;
462 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
463 drm_i915_cmdbuffer_t * cmd)
465 #ifdef I915_HAVE_FENCE
466 drm_i915_private_t *dev_priv = dev->dev_private;
467 #endif
468 int nbox = cmd->num_cliprects;
469 int i = 0, count, ret;
471 if (cmd->sz & 0x3) {
472 DRM_ERROR("alignment\n");
473 return -EINVAL;
476 i915_kernel_lost_context(dev);
478 count = nbox ? nbox : 1;
480 for (i = 0; i < count; i++) {
481 if (i < nbox) {
482 ret = i915_emit_box(dev, cmd->cliprects, i,
483 cmd->DR1, cmd->DR4);
484 if (ret)
485 return ret;
488 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
489 if (ret)
490 return ret;
493 i915_emit_breadcrumb(dev);
494 #ifdef I915_HAVE_FENCE
495 drm_fence_flush_old(dev, 0, dev_priv->counter);
496 #endif
497 return 0;
500 static int i915_dispatch_batchbuffer(struct drm_device * dev,
501 drm_i915_batchbuffer_t * batch)
503 drm_i915_private_t *dev_priv = dev->dev_private;
504 struct drm_clip_rect __user *boxes = batch->cliprects;
505 int nbox = batch->num_cliprects;
506 int i = 0, count;
507 RING_LOCALS;
509 if ((batch->start | batch->used) & 0x7) {
510 DRM_ERROR("alignment\n");
511 return -EINVAL;
514 i915_kernel_lost_context(dev);
516 count = nbox ? nbox : 1;
518 for (i = 0; i < count; i++) {
519 if (i < nbox) {
520 int ret = i915_emit_box(dev, boxes, i,
521 batch->DR1, batch->DR4);
522 if (ret)
523 return ret;
526 if (dev_priv->use_mi_batchbuffer_start) {
527 BEGIN_LP_RING(2);
528 if (IS_I965G(dev)) {
529 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
530 OUT_RING(batch->start);
531 } else {
532 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
533 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
535 ADVANCE_LP_RING();
537 } else {
538 BEGIN_LP_RING(4);
539 OUT_RING(MI_BATCH_BUFFER);
540 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
541 OUT_RING(batch->start + batch->used - 4);
542 OUT_RING(0);
543 ADVANCE_LP_RING();
547 i915_emit_breadcrumb(dev);
548 #ifdef I915_HAVE_FENCE
549 drm_fence_flush_old(dev, 0, dev_priv->counter);
550 #endif
551 return 0;
554 static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
556 drm_i915_private_t *dev_priv = dev->dev_private;
557 u32 num_pages, current_page, next_page, dspbase;
558 int shift = 2 * plane, x, y;
559 RING_LOCALS;
561 /* Calculate display base offset */
562 num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
563 current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
564 next_page = (current_page + 1) % num_pages;
566 switch (next_page) {
567 default:
568 case 0:
569 dspbase = dev_priv->sarea_priv->front_offset;
570 break;
571 case 1:
572 dspbase = dev_priv->sarea_priv->back_offset;
573 break;
574 case 2:
575 dspbase = dev_priv->sarea_priv->third_offset;
576 break;
579 if (plane == 0) {
580 x = dev_priv->sarea_priv->planeA_x;
581 y = dev_priv->sarea_priv->planeA_y;
582 } else {
583 x = dev_priv->sarea_priv->planeB_x;
584 y = dev_priv->sarea_priv->planeB_y;
587 dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
589 DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
590 dspbase);
592 BEGIN_LP_RING(4);
593 OUT_RING(sync ? 0 :
594 (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
595 MI_WAIT_FOR_PLANE_A_FLIP)));
596 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
597 (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
598 OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
599 OUT_RING(dspbase);
600 ADVANCE_LP_RING();
602 dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
603 dev_priv->sarea_priv->pf_current_page |= next_page << shift;
606 void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
608 drm_i915_private_t *dev_priv = dev->dev_private;
609 int i;
611 DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
612 planes, dev_priv->sarea_priv->pf_current_page);
614 i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
616 for (i = 0; i < 2; i++)
617 if (planes & (1 << i))
618 i915_do_dispatch_flip(dev, i, sync);
620 i915_emit_breadcrumb(dev);
621 #ifdef I915_HAVE_FENCE
622 if (!sync)
623 drm_fence_flush_old(dev, 0, dev_priv->counter);
624 #endif
627 static int i915_quiescent(struct drm_device *dev)
629 drm_i915_private_t *dev_priv = dev->dev_private;
631 i915_kernel_lost_context(dev);
632 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
635 static int i915_flush_ioctl(struct drm_device *dev, void *data,
636 struct drm_file *file_priv)
639 LOCK_TEST_WITH_RETURN(dev, file_priv);
641 return i915_quiescent(dev);
644 static int i915_batchbuffer(struct drm_device *dev, void *data,
645 struct drm_file *file_priv)
647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
648 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
649 dev_priv->sarea_priv;
650 drm_i915_batchbuffer_t *batch = data;
651 int ret;
653 if (!dev_priv->allow_batchbuffer) {
654 DRM_ERROR("Batchbuffer ioctl disabled\n");
655 return -EINVAL;
658 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
659 batch->start, batch->used, batch->num_cliprects);
661 LOCK_TEST_WITH_RETURN(dev, file_priv);
663 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
664 batch->num_cliprects *
665 sizeof(struct drm_clip_rect)))
666 return -EFAULT;
668 ret = i915_dispatch_batchbuffer(dev, batch);
670 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
671 return ret;
674 static int i915_cmdbuffer(struct drm_device *dev, void *data,
675 struct drm_file *file_priv)
677 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
678 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
679 dev_priv->sarea_priv;
680 drm_i915_cmdbuffer_t *cmdbuf = data;
681 int ret;
683 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
684 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
686 LOCK_TEST_WITH_RETURN(dev, file_priv);
688 if (cmdbuf->num_cliprects &&
689 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
690 cmdbuf->num_cliprects *
691 sizeof(struct drm_clip_rect))) {
692 DRM_ERROR("Fault accessing cliprects\n");
693 return -EFAULT;
696 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
697 if (ret) {
698 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
699 return ret;
702 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
703 return 0;
706 #if DRM_DEBUG_CODE
707 #define DRM_DEBUG_RELOCATION (drm_debug != 0)
708 #else
709 #define DRM_DEBUG_RELOCATION 0
710 #endif
712 #ifdef I915_HAVE_BUFFER
714 struct i915_relocatee_info {
715 struct drm_buffer_object *buf;
716 unsigned long offset;
717 u32 *data_page;
718 unsigned page_offset;
719 struct drm_bo_kmap_obj kmap;
720 int is_iomem;
723 struct drm_i915_validate_buffer {
724 struct drm_buffer_object *buffer;
725 int presumed_offset_correct;
728 static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer *buffers,
729 unsigned num_buffers)
731 while (num_buffers--)
732 drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
735 int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
736 struct drm_i915_validate_buffer *buffers,
737 struct i915_relocatee_info *relocatee,
738 uint32_t *reloc)
740 unsigned index;
741 unsigned long new_cmd_offset;
742 u32 val;
743 int ret;
745 if (reloc[2] >= num_buffers) {
746 DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
747 return -EINVAL;
751 * Short-circuit relocations that were correctly
752 * guessed by the client
754 if (buffers[reloc[2]].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
755 return 0;
757 new_cmd_offset = reloc[0];
758 if (!relocatee->data_page ||
759 !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
760 drm_bo_kunmap(&relocatee->kmap);
761 relocatee->offset = new_cmd_offset;
762 mutex_lock (&relocatee->buf->mutex);
763 ret = drm_bo_wait (relocatee->buf, 0, 0, FALSE);
764 mutex_unlock (&relocatee->buf->mutex);
765 if (ret) {
766 DRM_ERROR("Could not wait for buffer to apply relocs\n %08lx", new_cmd_offset);
767 return ret;
769 ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
770 1, &relocatee->kmap);
771 if (ret) {
772 DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset);
773 return ret;
776 relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
777 &relocatee->is_iomem);
778 relocatee->page_offset = (relocatee->offset & PAGE_MASK);
781 val = buffers[reloc[2]].buffer->offset;
782 index = (reloc[0] - relocatee->page_offset) >> 2;
784 /* add in validate */
785 val = val + reloc[1];
787 if (DRM_DEBUG_RELOCATION) {
788 if (buffers[reloc[2]].presumed_offset_correct &&
789 relocatee->data_page[index] != val) {
790 DRM_DEBUG ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
791 reloc[0], reloc[1], reloc[2], relocatee->data_page[index], val);
794 relocatee->data_page[index] = val;
795 return 0;
798 int i915_process_relocs(struct drm_file *file_priv,
799 uint32_t buf_handle,
800 uint32_t *reloc_buf_handle,
801 struct i915_relocatee_info *relocatee,
802 struct drm_i915_validate_buffer *buffers,
803 uint32_t num_buffers)
805 struct drm_device *dev = file_priv->head->dev;
806 struct drm_buffer_object *reloc_list_object;
807 uint32_t cur_handle = *reloc_buf_handle;
808 uint32_t *reloc_page;
809 int ret, reloc_is_iomem, reloc_stride;
810 uint32_t num_relocs, reloc_offset, reloc_end, reloc_page_offset, next_offset, cur_offset;
811 struct drm_bo_kmap_obj reloc_kmap;
813 memset(&reloc_kmap, 0, sizeof(reloc_kmap));
815 mutex_lock(&dev->struct_mutex);
816 reloc_list_object = drm_lookup_buffer_object(file_priv, cur_handle, 1);
817 mutex_unlock(&dev->struct_mutex);
818 if (!reloc_list_object)
819 return -EINVAL;
821 ret = drm_bo_kmap(reloc_list_object, 0, 1, &reloc_kmap);
822 if (ret) {
823 DRM_ERROR("Could not map relocation buffer.\n");
824 goto out;
827 reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem);
828 num_relocs = reloc_page[0] & 0xffff;
830 if ((reloc_page[0] >> 16) & 0xffff) {
831 DRM_ERROR("Unsupported relocation type requested\n");
832 goto out;
835 /* get next relocate buffer handle */
836 *reloc_buf_handle = reloc_page[1];
837 reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
839 DRM_DEBUG("num relocs is %d, next is %08X\n", num_relocs, reloc_page[1]);
841 reloc_page_offset = 0;
842 reloc_offset = I915_RELOC_HEADER * sizeof(uint32_t);
843 reloc_end = reloc_offset + (num_relocs * reloc_stride);
845 do {
846 next_offset = drm_bo_offset_end(reloc_offset, reloc_end);
848 do {
849 cur_offset = ((reloc_offset + reloc_page_offset) & ~PAGE_MASK) / sizeof(uint32_t);
850 ret = i915_apply_reloc(file_priv, num_buffers,
851 buffers, relocatee, &reloc_page[cur_offset]);
852 if (ret)
853 goto out;
855 reloc_offset += reloc_stride;
856 } while (reloc_offset < next_offset);
858 drm_bo_kunmap(&reloc_kmap);
860 reloc_offset = next_offset;
861 if (reloc_offset != reloc_end) {
862 ret = drm_bo_kmap(reloc_list_object, reloc_offset >> PAGE_SHIFT, 1, &reloc_kmap);
863 if (ret) {
864 DRM_ERROR("Could not map relocation buffer.\n");
865 goto out;
868 reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem);
869 reloc_page_offset = reloc_offset & ~PAGE_MASK;
872 } while (reloc_offset != reloc_end);
873 out:
874 drm_bo_kunmap(&relocatee->kmap);
875 relocatee->data_page = NULL;
877 drm_bo_kunmap(&reloc_kmap);
879 mutex_lock(&dev->struct_mutex);
880 drm_bo_usage_deref_locked(&reloc_list_object);
881 mutex_unlock(&dev->struct_mutex);
883 return ret;
886 static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
887 drm_handle_t buf_reloc_handle,
888 struct drm_i915_validate_buffer *buffers,
889 uint32_t buf_count)
891 struct drm_device *dev = file_priv->head->dev;
892 struct i915_relocatee_info relocatee;
893 int ret = 0;
894 int b;
897 * Short circuit relocations when all previous
898 * buffers offsets were correctly guessed by
899 * the client
901 if (!DRM_DEBUG_RELOCATION) {
902 for (b = 0; b < buf_count; b++)
903 if (!buffers[b].presumed_offset_correct)
904 break;
906 if (b == buf_count)
907 return 0;
910 memset(&relocatee, 0, sizeof(relocatee));
912 mutex_lock(&dev->struct_mutex);
913 relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
914 mutex_unlock(&dev->struct_mutex);
915 if (!relocatee.buf) {
916 DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
917 ret = -EINVAL;
918 goto out_err;
921 while (buf_reloc_handle) {
922 ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count);
923 if (ret) {
924 DRM_ERROR("process relocs failed\n");
925 break;
929 mutex_lock(&dev->struct_mutex);
930 drm_bo_usage_deref_locked(&relocatee.buf);
931 mutex_unlock(&dev->struct_mutex);
933 out_err:
934 return ret;
938 * Validate, add fence and relocate a block of bos from a userspace list
940 int i915_validate_buffer_list(struct drm_file *file_priv,
941 unsigned int fence_class, uint64_t data,
942 struct drm_i915_validate_buffer *buffers,
943 uint32_t *num_buffers)
945 struct drm_i915_op_arg arg;
946 struct drm_bo_op_req *req = &arg.d.req;
947 struct drm_bo_arg_rep rep;
948 unsigned long next = 0;
949 int ret = 0;
950 unsigned buf_count = 0;
951 struct drm_device *dev = file_priv->head->dev;
952 uint32_t buf_reloc_handle, buf_handle;
955 do {
956 if (buf_count >= *num_buffers) {
957 DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
958 ret = -EINVAL;
959 goto out_err;
962 buffers[buf_count].buffer = NULL;
963 buffers[buf_count].presumed_offset_correct = 0;
965 if (copy_from_user(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
966 ret = -EFAULT;
967 goto out_err;
970 if (arg.handled) {
971 data = arg.next;
972 mutex_lock(&dev->struct_mutex);
973 buffers[buf_count].buffer = drm_lookup_buffer_object(file_priv, req->arg_handle, 1);
974 mutex_unlock(&dev->struct_mutex);
975 buf_count++;
976 continue;
979 rep.ret = 0;
980 if (req->op != drm_bo_validate) {
981 DRM_ERROR
982 ("Buffer object operation wasn't \"validate\".\n");
983 rep.ret = -EINVAL;
984 goto out_err;
987 buf_handle = req->bo_req.handle;
988 buf_reloc_handle = arg.reloc_handle;
990 if (buf_reloc_handle) {
991 ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count);
992 if (ret)
993 goto out_err;
994 DRM_MEMORYBARRIER();
997 rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
998 req->bo_req.flags, req->bo_req.mask,
999 req->bo_req.hint,
1000 req->bo_req.fence_class, 0,
1001 &rep.bo_info,
1002 &buffers[buf_count].buffer);
1004 if (rep.ret) {
1005 DRM_ERROR("error on handle validate %d\n", rep.ret);
1006 goto out_err;
1009 * If the user provided a presumed offset hint, check whether
1010 * the buffer is in the same place, if so, relocations relative to
1011 * this buffer need not be performed
1013 if ((req->bo_req.hint & DRM_BO_HINT_PRESUMED_OFFSET) &&
1014 buffers[buf_count].buffer->offset == req->bo_req.presumed_offset) {
1015 buffers[buf_count].presumed_offset_correct = 1;
1018 next = arg.next;
1019 arg.handled = 1;
1020 arg.d.rep = rep;
1022 if (copy_to_user((void __user *)(unsigned long)data, &arg, sizeof(arg)))
1023 return -EFAULT;
1025 data = next;
1026 buf_count++;
1028 } while (next != 0);
1029 *num_buffers = buf_count;
1030 return 0;
1031 out_err:
1032 mutex_lock(&dev->struct_mutex);
1033 i915_dereference_buffers_locked(buffers, buf_count);
1034 mutex_unlock(&dev->struct_mutex);
1035 *num_buffers = 0;
1036 return (ret) ? ret : rep.ret;
1039 static int i915_execbuffer(struct drm_device *dev, void *data,
1040 struct drm_file *file_priv)
1042 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1043 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
1044 dev_priv->sarea_priv;
1045 struct drm_i915_execbuffer *exec_buf = data;
1046 struct _drm_i915_batchbuffer *batch = &exec_buf->batch;
1047 struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
1048 int num_buffers;
1049 int ret;
1050 struct drm_i915_validate_buffer *buffers;
1051 struct drm_fence_object *fence;
1053 if (!dev_priv->allow_batchbuffer) {
1054 DRM_ERROR("Batchbuffer ioctl disabled\n");
1055 return -EINVAL;
1059 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
1060 batch->num_cliprects *
1061 sizeof(struct drm_clip_rect)))
1062 return -EFAULT;
1064 if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
1065 return -EINVAL;
1068 ret = drm_bo_read_lock(&dev->bm.bm_lock);
1069 if (ret)
1070 return ret;
1073 * The cmdbuf_mutex makes sure the validate-submit-fence
1074 * operation is atomic.
1077 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
1078 if (ret) {
1079 drm_bo_read_unlock(&dev->bm.bm_lock);
1080 return -EAGAIN;
1083 num_buffers = exec_buf->num_buffers;
1085 buffers = drm_calloc(num_buffers, sizeof(struct drm_i915_validate_buffer), DRM_MEM_DRIVER);
1086 if (!buffers) {
1087 drm_bo_read_unlock(&dev->bm.bm_lock);
1088 mutex_unlock(&dev_priv->cmdbuf_mutex);
1089 return -ENOMEM;
1092 /* validate buffer list + fixup relocations */
1093 ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
1094 buffers, &num_buffers);
1095 if (ret)
1096 goto out_free;
1098 /* make sure all previous memory operations have passed */
1099 DRM_MEMORYBARRIER();
1100 drm_agp_chipset_flush(dev);
1102 /* submit buffer */
1103 batch->start = buffers[num_buffers-1].buffer->offset;
1105 DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
1106 batch->start, batch->used, batch->num_cliprects);
1108 ret = i915_dispatch_batchbuffer(dev, batch);
1109 if (ret)
1110 goto out_err0;
1112 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1114 /* fence */
1115 ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
1116 if (ret)
1117 goto out_err0;
1119 if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) {
1120 ret = drm_fence_add_user_object(file_priv, fence, fence_arg->flags & DRM_FENCE_FLAG_SHAREABLE);
1121 if (!ret) {
1122 fence_arg->handle = fence->base.hash.key;
1123 fence_arg->fence_class = fence->fence_class;
1124 fence_arg->type = fence->type;
1125 fence_arg->signaled = fence->signaled;
1128 drm_fence_usage_deref_unlocked(&fence);
1129 out_err0:
1131 /* handle errors */
1132 mutex_lock(&dev->struct_mutex);
1133 i915_dereference_buffers_locked(buffers, num_buffers);
1134 mutex_unlock(&dev->struct_mutex);
1136 out_free:
1137 drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER);
1139 mutex_unlock(&dev_priv->cmdbuf_mutex);
1140 drm_bo_read_unlock(&dev->bm.bm_lock);
1141 return ret;
1143 #endif
1145 static int i915_do_cleanup_pageflip(struct drm_device * dev)
1147 drm_i915_private_t *dev_priv = dev->dev_private;
1148 int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
1150 DRM_DEBUG("\n");
1152 for (i = 0, planes = 0; i < 2; i++)
1153 if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
1154 dev_priv->sarea_priv->pf_current_page =
1155 (dev_priv->sarea_priv->pf_current_page &
1156 ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
1158 planes |= 1 << i;
1161 if (planes)
1162 i915_dispatch_flip(dev, planes, 0);
1164 return 0;
1167 static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1169 drm_i915_flip_t *param = data;
1171 DRM_DEBUG("\n");
1173 LOCK_TEST_WITH_RETURN(dev, file_priv);
1175 /* This is really planes */
1176 if (param->pipes & ~0x3) {
1177 DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
1178 param->pipes);
1179 return -EINVAL;
1182 i915_dispatch_flip(dev, param->pipes, 0);
1184 return 0;
1188 static int i915_getparam(struct drm_device *dev, void *data,
1189 struct drm_file *file_priv)
1191 drm_i915_private_t *dev_priv = dev->dev_private;
1192 drm_i915_getparam_t *param = data;
1193 int value;
1195 if (!dev_priv) {
1196 DRM_ERROR("called with no initialization\n");
1197 return -EINVAL;
1200 switch (param->param) {
1201 case I915_PARAM_IRQ_ACTIVE:
1202 value = dev->irq ? 1 : 0;
1203 break;
1204 case I915_PARAM_ALLOW_BATCHBUFFER:
1205 value = dev_priv->allow_batchbuffer ? 1 : 0;
1206 break;
1207 case I915_PARAM_LAST_DISPATCH:
1208 value = READ_BREADCRUMB(dev_priv);
1209 break;
1210 default:
1211 DRM_ERROR("Unknown parameter %d\n", param->param);
1212 return -EINVAL;
1215 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1216 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1217 return -EFAULT;
1220 return 0;
1223 static int i915_setparam(struct drm_device *dev, void *data,
1224 struct drm_file *file_priv)
1226 drm_i915_private_t *dev_priv = dev->dev_private;
1227 drm_i915_setparam_t *param = data;
1229 if (!dev_priv) {
1230 DRM_ERROR("called with no initialization\n");
1231 return -EINVAL;
1234 switch (param->param) {
1235 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1236 if (!IS_I965G(dev))
1237 dev_priv->use_mi_batchbuffer_start = param->value;
1238 break;
1239 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1240 dev_priv->tex_lru_log_granularity = param->value;
1241 break;
1242 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1243 dev_priv->allow_batchbuffer = param->value;
1244 break;
1245 default:
1246 DRM_ERROR("unknown parameter %d\n", param->param);
1247 return -EINVAL;
1250 return 0;
1253 drm_i915_mmio_entry_t mmio_table[] = {
1254 [MMIO_REGS_PS_DEPTH_COUNT] = {
1255 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
1256 0x2350,
1261 static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
1263 static int i915_mmio(struct drm_device *dev, void *data,
1264 struct drm_file *file_priv)
1266 uint32_t buf[8];
1267 drm_i915_private_t *dev_priv = dev->dev_private;
1268 drm_i915_mmio_entry_t *e;
1269 drm_i915_mmio_t *mmio = data;
1270 void __iomem *base;
1271 int i;
1273 if (!dev_priv) {
1274 DRM_ERROR("called with no initialization\n");
1275 return -EINVAL;
1278 if (mmio->reg >= mmio_table_size)
1279 return -EINVAL;
1281 e = &mmio_table[mmio->reg];
1282 base = (u8 *) dev_priv->mmio_map->handle + e->offset;
1284 switch (mmio->read_write) {
1285 case I915_MMIO_READ:
1286 if (!(e->flag & I915_MMIO_MAY_READ))
1287 return -EINVAL;
1288 for (i = 0; i < e->size / 4; i++)
1289 buf[i] = I915_READ(e->offset + i * 4);
1290 if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
1291 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1292 return -EFAULT;
1294 break;
1296 case I915_MMIO_WRITE:
1297 if (!(e->flag & I915_MMIO_MAY_WRITE))
1298 return -EINVAL;
1299 if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
1300 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1301 return -EFAULT;
1303 for (i = 0; i < e->size / 4; i++)
1304 I915_WRITE(e->offset + i * 4, buf[i]);
1305 break;
1307 return 0;
1310 static int i915_set_status_page(struct drm_device *dev, void *data,
1311 struct drm_file *file_priv)
1313 drm_i915_private_t *dev_priv = dev->dev_private;
1314 drm_i915_hws_addr_t *hws = data;
1316 if (!dev_priv) {
1317 DRM_ERROR("called with no initialization\n");
1318 return -EINVAL;
1320 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1322 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
1324 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
1325 dev_priv->hws_map.size = 4*1024;
1326 dev_priv->hws_map.type = 0;
1327 dev_priv->hws_map.flags = 0;
1328 dev_priv->hws_map.mtrr = 0;
1330 drm_core_ioremap(&dev_priv->hws_map, dev);
1331 if (dev_priv->hws_map.handle == NULL) {
1332 i915_dma_cleanup(dev);
1333 dev_priv->status_gfx_addr = 0;
1334 DRM_ERROR("can not ioremap virtual address for"
1335 " G33 hw status page\n");
1336 return -ENOMEM;
1338 dev_priv->hw_status_page = dev_priv->hws_map.handle;
1340 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1341 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
1342 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
1343 dev_priv->status_gfx_addr);
1344 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1345 return 0;
1348 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1350 struct drm_i915_private *dev_priv = dev->dev_private;
1351 unsigned long base, size;
1352 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1354 /* i915 has 4 more counters */
1355 dev->counters += 4;
1356 dev->types[6] = _DRM_STAT_IRQ;
1357 dev->types[7] = _DRM_STAT_PRIMARY;
1358 dev->types[8] = _DRM_STAT_SECONDARY;
1359 dev->types[9] = _DRM_STAT_DMA;
1361 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
1362 if (dev_priv == NULL)
1363 return -ENOMEM;
1365 memset(dev_priv, 0, sizeof(drm_i915_private_t));
1367 dev->dev_private = (void *)dev_priv;
1369 /* Add register map (needed for suspend/resume) */
1370 base = drm_get_resource_start(dev, mmio_bar);
1371 size = drm_get_resource_len(dev, mmio_bar);
1373 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1374 _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1376 #ifdef __linux__
1377 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1378 intel_init_chipset_flush_compat(dev);
1379 #endif
1380 #endif
1382 return ret;
1385 int i915_driver_unload(struct drm_device *dev)
1387 struct drm_i915_private *dev_priv = dev->dev_private;
1389 if (dev_priv->mmio_map)
1390 drm_rmmap(dev, dev_priv->mmio_map);
1392 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
1393 DRM_MEM_DRIVER);
1394 #ifdef __linux__
1395 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1396 intel_fini_chipset_flush_compat(dev);
1397 #endif
1398 #endif
1399 return 0;
1402 void i915_driver_lastclose(struct drm_device * dev)
1404 drm_i915_private_t *dev_priv = dev->dev_private;
1406 if (drm_getsarea(dev) && dev_priv->sarea_priv)
1407 i915_do_cleanup_pageflip(dev);
1408 if (dev_priv->agp_heap)
1409 i915_mem_takedown(&(dev_priv->agp_heap));
1411 i915_dma_cleanup(dev);
1414 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1416 drm_i915_private_t *dev_priv = dev->dev_private;
1417 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1420 struct drm_ioctl_desc i915_ioctls[] = {
1421 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1422 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1423 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1424 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1425 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1426 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1427 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1428 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1429 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1430 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1431 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1432 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1433 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1434 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1435 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
1436 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1437 DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
1438 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1439 #ifdef I915_HAVE_BUFFER
1440 DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
1441 #endif
1444 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1447 * Determine if the device really is AGP or not.
1449 * All Intel graphics chipsets are treated as AGP, even if they are really
1450 * PCI-e.
1452 * \param dev The device to be tested.
1454 * \returns
1455 * A value of 1 is always retured to indictate every i9x5 is AGP.
1457 int i915_driver_device_is_agp(struct drm_device * dev)
1459 return 1;
1462 int i915_driver_firstopen(struct drm_device *dev)
1464 #ifdef I915_HAVE_BUFFER
1465 drm_bo_driver_init(dev);
1466 #endif
1467 return 0;