1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * $DragonFly: src/sys/dev/drm/i915_dma.c,v 1.1.2.1 2008/10/27 03:46:17 hasso Exp $
35 /* Really want an OS-independent resettable timer. Would like to have
36 * this loop run for (eg) 3 sec, but have the timer reset every time
37 * the head pointer changes, so that EBUSY only happens if the ring
38 * actually stalls for (eg) 3 seconds.
40 int i915_wait_ring(struct drm_device
* dev
, int n
, const char *caller
)
42 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
43 drm_i915_ring_buffer_t
*ring
= &(dev_priv
->ring
);
44 u32 last_head
= I915_READ(LP_RING
+ RING_HEAD
) & HEAD_ADDR
;
47 for (i
= 0; i
< 10000; i
++) {
48 ring
->head
= I915_READ(LP_RING
+ RING_HEAD
) & HEAD_ADDR
;
49 ring
->space
= ring
->head
- (ring
->tail
+ 8);
51 ring
->space
+= ring
->Size
;
55 if (ring
->head
!= last_head
)
58 last_head
= ring
->head
;
65 void i915_kernel_lost_context(struct drm_device
* dev
)
67 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
68 drm_i915_ring_buffer_t
*ring
= &(dev_priv
->ring
);
70 ring
->head
= I915_READ(LP_RING
+ RING_HEAD
) & HEAD_ADDR
;
71 ring
->tail
= I915_READ(LP_RING
+ RING_TAIL
) & TAIL_ADDR
;
72 ring
->space
= ring
->head
- (ring
->tail
+ 8);
74 ring
->space
+= ring
->Size
;
77 static int i915_dma_cleanup(struct drm_device
* dev
)
79 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
80 /* Make sure interrupts are disabled here because the uninstall ioctl
81 * may not have been called from userspace and after dev_private
82 * is freed, it's too late.
85 drm_irq_uninstall(dev
);
87 if (dev_priv
->ring
.virtual_start
) {
88 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
89 dev_priv
->ring
.virtual_start
= 0;
90 dev_priv
->ring
.map
.handle
= 0;
91 dev_priv
->ring
.map
.size
= 0;
94 if (dev_priv
->status_page_dmah
) {
95 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
96 dev_priv
->status_page_dmah
= NULL
;
97 /* Need to rewrite hardware status page */
98 I915_WRITE(0x02080, 0x1ffff000);
101 if (dev_priv
->status_gfx_addr
) {
102 dev_priv
->status_gfx_addr
= 0;
103 drm_core_ioremapfree(&dev_priv
->hws_map
, dev
);
104 I915_WRITE(0x02080, 0x1ffff000);
110 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
112 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
114 dev_priv
->sarea
= drm_getsarea(dev
);
115 if (!dev_priv
->sarea
) {
116 DRM_ERROR("can not find sarea!\n");
117 i915_dma_cleanup(dev
);
121 dev_priv
->mmio_map
= drm_core_findmap(dev
, init
->mmio_offset
);
122 if (!dev_priv
->mmio_map
) {
123 i915_dma_cleanup(dev
);
124 DRM_ERROR("can not find mmio map!\n");
128 #ifdef I915_HAVE_BUFFER
129 dev_priv
->max_validate_buffers
= I915_MAX_VALIDATE_BUFFERS
;
132 dev_priv
->sarea_priv
= (drm_i915_sarea_t
*)
133 ((u8
*) dev_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
135 dev_priv
->ring
.Start
= init
->ring_start
;
136 dev_priv
->ring
.End
= init
->ring_end
;
137 dev_priv
->ring
.Size
= init
->ring_size
;
138 dev_priv
->ring
.tail_mask
= dev_priv
->ring
.Size
- 1;
140 dev_priv
->ring
.map
.offset
= init
->ring_start
;
141 dev_priv
->ring
.map
.size
= init
->ring_size
;
142 dev_priv
->ring
.map
.type
= 0;
143 dev_priv
->ring
.map
.flags
= 0;
144 dev_priv
->ring
.map
.mtrr
= 0;
146 drm_core_ioremap(&dev_priv
->ring
.map
, dev
);
148 if (dev_priv
->ring
.map
.handle
== NULL
) {
149 i915_dma_cleanup(dev
);
150 DRM_ERROR("can not ioremap virtual address for"
155 dev_priv
->ring
.virtual_start
= dev_priv
->ring
.map
.handle
;
157 dev_priv
->cpp
= init
->cpp
;
158 dev_priv
->sarea_priv
->pf_current_page
= 0;
160 /* We are using separate values as placeholders for mechanisms for
161 * private backbuffer/depthbuffer usage.
163 dev_priv
->use_mi_batchbuffer_start
= 0;
164 if (IS_I965G(dev
)) /* 965 doesn't support older method */
165 dev_priv
->use_mi_batchbuffer_start
= 1;
167 /* Allow hardware batchbuffers unless told otherwise.
169 dev_priv
->allow_batchbuffer
= 1;
171 /* Enable vblank on pipe A for older X servers
173 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
;
175 /* Program Hardware Status Page */
177 dev_priv
->status_page_dmah
=
178 drm_pci_alloc(dev
, PAGE_SIZE
, PAGE_SIZE
, 0xffffffff);
180 if (!dev_priv
->status_page_dmah
) {
181 i915_dma_cleanup(dev
);
182 DRM_ERROR("Can not allocate hardware status page\n");
185 dev_priv
->hw_status_page
= dev_priv
->status_page_dmah
->vaddr
;
186 dev_priv
->dma_status_page
= dev_priv
->status_page_dmah
->busaddr
;
188 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
190 I915_WRITE(0x02080, dev_priv
->dma_status_page
);
192 DRM_DEBUG("Enabled hardware status page\n");
193 #ifdef I915_HAVE_BUFFER
194 mutex_init(&dev_priv
->cmdbuf_mutex
);
199 static int i915_dma_resume(struct drm_device
* dev
)
201 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
205 if (!dev_priv
->sarea
) {
206 DRM_ERROR("can not find sarea!\n");
210 if (!dev_priv
->mmio_map
) {
211 DRM_ERROR("can not find mmio map!\n");
215 if (dev_priv
->ring
.map
.handle
== NULL
) {
216 DRM_ERROR("can not ioremap virtual address for"
221 /* Program Hardware Status Page */
222 if (!dev_priv
->hw_status_page
) {
223 DRM_ERROR("Can not find hardware status page\n");
226 DRM_DEBUG("hw status page @ %p\n", dev_priv
->hw_status_page
);
228 if (dev_priv
->status_gfx_addr
!= 0)
229 I915_WRITE(0x02080, dev_priv
->status_gfx_addr
);
231 I915_WRITE(0x02080, dev_priv
->dma_status_page
);
232 DRM_DEBUG("Enabled hardware status page\n");
237 static int i915_dma_init(struct drm_device
*dev
, void *data
,
238 struct drm_file
*file_priv
)
240 drm_i915_init_t
*init
= data
;
243 switch (init
->func
) {
245 retcode
= i915_initialize(dev
, init
);
247 case I915_CLEANUP_DMA
:
248 retcode
= i915_dma_cleanup(dev
);
250 case I915_RESUME_DMA
:
251 retcode
= i915_dma_resume(dev
);
261 /* Implement basically the same security restrictions as hardware does
262 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
264 * Most of the calculations below involve calculating the size of a
265 * particular instruction. It's important to get the size right as
266 * that tells us where the next instruction to check is. Any illegal
267 * instruction detected will be given a size of zero, which is a
268 * signal to abort the rest of the buffer.
270 static int do_validate_cmd(int cmd
)
272 switch (((cmd
>> 29) & 0x7)) {
274 switch ((cmd
>> 23) & 0x3f) {
276 return 1; /* MI_NOOP */
278 return 1; /* MI_FLUSH */
280 return 0; /* disallow everything else */
284 return 0; /* reserved */
286 return (cmd
& 0xff) + 2; /* 2d commands */
288 if (((cmd
>> 24) & 0x1f) <= 0x18)
291 switch ((cmd
>> 24) & 0x1f) {
295 switch ((cmd
>> 16) & 0xff) {
297 return (cmd
& 0x1f) + 2;
299 return (cmd
& 0xf) + 2;
301 return (cmd
& 0xffff) + 2;
305 return (cmd
& 0xffff) + 1;
309 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
310 return (cmd
& 0x1ffff) + 2;
311 else if (cmd
& (1 << 17)) /* indirect random */
312 if ((cmd
& 0xffff) == 0)
313 return 0; /* unknown length, too hard */
315 return (((cmd
& 0xffff) + 1) / 2) + 1;
317 return 2; /* indirect sequential */
328 static int validate_cmd(int cmd
)
330 int ret
= do_validate_cmd(cmd
);
332 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
337 static int i915_emit_cmds(struct drm_device
*dev
, int __user
*buffer
,
340 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
344 if ((dwords
+1) * sizeof(int) >= dev_priv
->ring
.Size
- 8)
347 BEGIN_LP_RING((dwords
+1)&~1);
349 for (i
= 0; i
< dwords
;) {
352 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd
, &buffer
[i
], sizeof(cmd
)))
355 if ((sz
= validate_cmd(cmd
)) == 0 || i
+ sz
> dwords
)
361 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd
, &buffer
[i
],
377 static int i915_emit_box(struct drm_device
* dev
,
378 struct drm_clip_rect __user
* boxes
,
379 int i
, int DR1
, int DR4
)
381 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
382 struct drm_clip_rect box
;
385 if (DRM_COPY_FROM_USER_UNCHECKED(&box
, &boxes
[i
], sizeof(box
))) {
389 if (box
.y2
<= box
.y1
|| box
.x2
<= box
.x1
|| box
.y2
<= 0 || box
.x2
<= 0) {
390 DRM_ERROR("Bad box %d,%d..%d,%d\n",
391 box
.x1
, box
.y1
, box
.x2
, box
.y2
);
397 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
398 OUT_RING((box
.x1
& 0xffff) | (box
.y1
<< 16));
399 OUT_RING(((box
.x2
- 1) & 0xffff) | ((box
.y2
- 1) << 16));
404 OUT_RING(GFX_OP_DRAWRECT_INFO
);
406 OUT_RING((box
.x1
& 0xffff) | (box
.y1
<< 16));
407 OUT_RING(((box
.x2
- 1) & 0xffff) | ((box
.y2
- 1) << 16));
416 /* XXX: Emitting the counter should really be moved to part of the IRQ
417 * emit. For now, do it in both places:
420 void i915_emit_breadcrumb(struct drm_device
*dev
)
422 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
425 if (++dev_priv
->counter
> BREADCRUMB_MASK
) {
426 dev_priv
->counter
= 1;
427 DRM_DEBUG("Breadcrumb counter wrapped around\n");
430 dev_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
433 OUT_RING(CMD_STORE_DWORD_IDX
);
435 OUT_RING(dev_priv
->counter
);
441 int i915_emit_mi_flush(struct drm_device
*dev
, uint32_t flush
)
443 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
444 uint32_t flush_cmd
= CMD_MI_FLUSH
;
449 i915_kernel_lost_context(dev
);
462 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
463 drm_i915_cmdbuffer_t
* cmd
)
465 #ifdef I915_HAVE_FENCE
466 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
468 int nbox
= cmd
->num_cliprects
;
469 int i
= 0, count
, ret
;
472 DRM_ERROR("alignment\n");
476 i915_kernel_lost_context(dev
);
478 count
= nbox
? nbox
: 1;
480 for (i
= 0; i
< count
; i
++) {
482 ret
= i915_emit_box(dev
, cmd
->cliprects
, i
,
488 ret
= i915_emit_cmds(dev
, (int __user
*)cmd
->buf
, cmd
->sz
/ 4);
493 i915_emit_breadcrumb(dev
);
494 #ifdef I915_HAVE_FENCE
495 drm_fence_flush_old(dev
, 0, dev_priv
->counter
);
500 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
501 drm_i915_batchbuffer_t
* batch
)
503 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
504 struct drm_clip_rect __user
*boxes
= batch
->cliprects
;
505 int nbox
= batch
->num_cliprects
;
509 if ((batch
->start
| batch
->used
) & 0x7) {
510 DRM_ERROR("alignment\n");
514 i915_kernel_lost_context(dev
);
516 count
= nbox
? nbox
: 1;
518 for (i
= 0; i
< count
; i
++) {
520 int ret
= i915_emit_box(dev
, boxes
, i
,
521 batch
->DR1
, batch
->DR4
);
526 if (dev_priv
->use_mi_batchbuffer_start
) {
529 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
530 OUT_RING(batch
->start
);
532 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
533 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
539 OUT_RING(MI_BATCH_BUFFER
);
540 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
541 OUT_RING(batch
->start
+ batch
->used
- 4);
547 i915_emit_breadcrumb(dev
);
548 #ifdef I915_HAVE_FENCE
549 drm_fence_flush_old(dev
, 0, dev_priv
->counter
);
554 static void i915_do_dispatch_flip(struct drm_device
* dev
, int plane
, int sync
)
556 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
557 u32 num_pages
, current_page
, next_page
, dspbase
;
558 int shift
= 2 * plane
, x
, y
;
561 /* Calculate display base offset */
562 num_pages
= dev_priv
->sarea_priv
->third_handle
? 3 : 2;
563 current_page
= (dev_priv
->sarea_priv
->pf_current_page
>> shift
) & 0x3;
564 next_page
= (current_page
+ 1) % num_pages
;
569 dspbase
= dev_priv
->sarea_priv
->front_offset
;
572 dspbase
= dev_priv
->sarea_priv
->back_offset
;
575 dspbase
= dev_priv
->sarea_priv
->third_offset
;
580 x
= dev_priv
->sarea_priv
->planeA_x
;
581 y
= dev_priv
->sarea_priv
->planeA_y
;
583 x
= dev_priv
->sarea_priv
->planeB_x
;
584 y
= dev_priv
->sarea_priv
->planeB_y
;
587 dspbase
+= (y
* dev_priv
->sarea_priv
->pitch
+ x
) * dev_priv
->cpp
;
589 DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane
, current_page
,
594 (MI_WAIT_FOR_EVENT
| (plane
? MI_WAIT_FOR_PLANE_B_FLIP
:
595 MI_WAIT_FOR_PLANE_A_FLIP
)));
596 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| (sync
? 0 : ASYNC_FLIP
) |
597 (plane
? DISPLAY_PLANE_B
: DISPLAY_PLANE_A
));
598 OUT_RING(dev_priv
->sarea_priv
->pitch
* dev_priv
->cpp
);
602 dev_priv
->sarea_priv
->pf_current_page
&= ~(0x3 << shift
);
603 dev_priv
->sarea_priv
->pf_current_page
|= next_page
<< shift
;
606 void i915_dispatch_flip(struct drm_device
* dev
, int planes
, int sync
)
608 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
611 DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
612 planes
, dev_priv
->sarea_priv
->pf_current_page
);
614 i915_emit_mi_flush(dev
, MI_READ_FLUSH
| MI_EXE_FLUSH
);
616 for (i
= 0; i
< 2; i
++)
617 if (planes
& (1 << i
))
618 i915_do_dispatch_flip(dev
, i
, sync
);
620 i915_emit_breadcrumb(dev
);
621 #ifdef I915_HAVE_FENCE
623 drm_fence_flush_old(dev
, 0, dev_priv
->counter
);
627 static int i915_quiescent(struct drm_device
*dev
)
629 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
631 i915_kernel_lost_context(dev
);
632 return i915_wait_ring(dev
, dev_priv
->ring
.Size
- 8, __FUNCTION__
);
635 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
636 struct drm_file
*file_priv
)
639 LOCK_TEST_WITH_RETURN(dev
, file_priv
);
641 return i915_quiescent(dev
);
644 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
645 struct drm_file
*file_priv
)
647 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
648 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
649 dev_priv
->sarea_priv
;
650 drm_i915_batchbuffer_t
*batch
= data
;
653 if (!dev_priv
->allow_batchbuffer
) {
654 DRM_ERROR("Batchbuffer ioctl disabled\n");
658 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
659 batch
->start
, batch
->used
, batch
->num_cliprects
);
661 LOCK_TEST_WITH_RETURN(dev
, file_priv
);
663 if (batch
->num_cliprects
&& DRM_VERIFYAREA_READ(batch
->cliprects
,
664 batch
->num_cliprects
*
665 sizeof(struct drm_clip_rect
)))
668 ret
= i915_dispatch_batchbuffer(dev
, batch
);
670 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
674 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
675 struct drm_file
*file_priv
)
677 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
678 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
679 dev_priv
->sarea_priv
;
680 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
683 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
684 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
686 LOCK_TEST_WITH_RETURN(dev
, file_priv
);
688 if (cmdbuf
->num_cliprects
&&
689 DRM_VERIFYAREA_READ(cmdbuf
->cliprects
,
690 cmdbuf
->num_cliprects
*
691 sizeof(struct drm_clip_rect
))) {
692 DRM_ERROR("Fault accessing cliprects\n");
696 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
);
698 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
702 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
707 #define DRM_DEBUG_RELOCATION (drm_debug != 0)
709 #define DRM_DEBUG_RELOCATION 0
712 #ifdef I915_HAVE_BUFFER
714 struct i915_relocatee_info
{
715 struct drm_buffer_object
*buf
;
716 unsigned long offset
;
718 unsigned page_offset
;
719 struct drm_bo_kmap_obj kmap
;
723 struct drm_i915_validate_buffer
{
724 struct drm_buffer_object
*buffer
;
725 int presumed_offset_correct
;
728 static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
*buffers
,
729 unsigned num_buffers
)
731 while (num_buffers
--)
732 drm_bo_usage_deref_locked(&buffers
[num_buffers
].buffer
);
735 int i915_apply_reloc(struct drm_file
*file_priv
, int num_buffers
,
736 struct drm_i915_validate_buffer
*buffers
,
737 struct i915_relocatee_info
*relocatee
,
741 unsigned long new_cmd_offset
;
745 if (reloc
[2] >= num_buffers
) {
746 DRM_ERROR("Illegal relocation buffer %08X\n", reloc
[2]);
751 * Short-circuit relocations that were correctly
752 * guessed by the client
754 if (buffers
[reloc
[2]].presumed_offset_correct
&& !DRM_DEBUG_RELOCATION
)
757 new_cmd_offset
= reloc
[0];
758 if (!relocatee
->data_page
||
759 !drm_bo_same_page(relocatee
->offset
, new_cmd_offset
)) {
760 drm_bo_kunmap(&relocatee
->kmap
);
761 relocatee
->offset
= new_cmd_offset
;
762 mutex_lock (&relocatee
->buf
->mutex
);
763 ret
= drm_bo_wait (relocatee
->buf
, 0, 0, FALSE
);
764 mutex_unlock (&relocatee
->buf
->mutex
);
766 DRM_ERROR("Could not wait for buffer to apply relocs\n %08lx", new_cmd_offset
);
769 ret
= drm_bo_kmap(relocatee
->buf
, new_cmd_offset
>> PAGE_SHIFT
,
770 1, &relocatee
->kmap
);
772 DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset
);
776 relocatee
->data_page
= drm_bmo_virtual(&relocatee
->kmap
,
777 &relocatee
->is_iomem
);
778 relocatee
->page_offset
= (relocatee
->offset
& PAGE_MASK
);
781 val
= buffers
[reloc
[2]].buffer
->offset
;
782 index
= (reloc
[0] - relocatee
->page_offset
) >> 2;
784 /* add in validate */
785 val
= val
+ reloc
[1];
787 if (DRM_DEBUG_RELOCATION
) {
788 if (buffers
[reloc
[2]].presumed_offset_correct
&&
789 relocatee
->data_page
[index
] != val
) {
790 DRM_DEBUG ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
791 reloc
[0], reloc
[1], reloc
[2], relocatee
->data_page
[index
], val
);
794 relocatee
->data_page
[index
] = val
;
798 int i915_process_relocs(struct drm_file
*file_priv
,
800 uint32_t *reloc_buf_handle
,
801 struct i915_relocatee_info
*relocatee
,
802 struct drm_i915_validate_buffer
*buffers
,
803 uint32_t num_buffers
)
805 struct drm_device
*dev
= file_priv
->head
->dev
;
806 struct drm_buffer_object
*reloc_list_object
;
807 uint32_t cur_handle
= *reloc_buf_handle
;
808 uint32_t *reloc_page
;
809 int ret
, reloc_is_iomem
, reloc_stride
;
810 uint32_t num_relocs
, reloc_offset
, reloc_end
, reloc_page_offset
, next_offset
, cur_offset
;
811 struct drm_bo_kmap_obj reloc_kmap
;
813 memset(&reloc_kmap
, 0, sizeof(reloc_kmap
));
815 mutex_lock(&dev
->struct_mutex
);
816 reloc_list_object
= drm_lookup_buffer_object(file_priv
, cur_handle
, 1);
817 mutex_unlock(&dev
->struct_mutex
);
818 if (!reloc_list_object
)
821 ret
= drm_bo_kmap(reloc_list_object
, 0, 1, &reloc_kmap
);
823 DRM_ERROR("Could not map relocation buffer.\n");
827 reloc_page
= drm_bmo_virtual(&reloc_kmap
, &reloc_is_iomem
);
828 num_relocs
= reloc_page
[0] & 0xffff;
830 if ((reloc_page
[0] >> 16) & 0xffff) {
831 DRM_ERROR("Unsupported relocation type requested\n");
835 /* get next relocate buffer handle */
836 *reloc_buf_handle
= reloc_page
[1];
837 reloc_stride
= I915_RELOC0_STRIDE
* sizeof(uint32_t); /* may be different for other types of relocs */
839 DRM_DEBUG("num relocs is %d, next is %08X\n", num_relocs
, reloc_page
[1]);
841 reloc_page_offset
= 0;
842 reloc_offset
= I915_RELOC_HEADER
* sizeof(uint32_t);
843 reloc_end
= reloc_offset
+ (num_relocs
* reloc_stride
);
846 next_offset
= drm_bo_offset_end(reloc_offset
, reloc_end
);
849 cur_offset
= ((reloc_offset
+ reloc_page_offset
) & ~PAGE_MASK
) / sizeof(uint32_t);
850 ret
= i915_apply_reloc(file_priv
, num_buffers
,
851 buffers
, relocatee
, &reloc_page
[cur_offset
]);
855 reloc_offset
+= reloc_stride
;
856 } while (reloc_offset
< next_offset
);
858 drm_bo_kunmap(&reloc_kmap
);
860 reloc_offset
= next_offset
;
861 if (reloc_offset
!= reloc_end
) {
862 ret
= drm_bo_kmap(reloc_list_object
, reloc_offset
>> PAGE_SHIFT
, 1, &reloc_kmap
);
864 DRM_ERROR("Could not map relocation buffer.\n");
868 reloc_page
= drm_bmo_virtual(&reloc_kmap
, &reloc_is_iomem
);
869 reloc_page_offset
= reloc_offset
& ~PAGE_MASK
;
872 } while (reloc_offset
!= reloc_end
);
874 drm_bo_kunmap(&relocatee
->kmap
);
875 relocatee
->data_page
= NULL
;
877 drm_bo_kunmap(&reloc_kmap
);
879 mutex_lock(&dev
->struct_mutex
);
880 drm_bo_usage_deref_locked(&reloc_list_object
);
881 mutex_unlock(&dev
->struct_mutex
);
886 static int i915_exec_reloc(struct drm_file
*file_priv
, drm_handle_t buf_handle
,
887 drm_handle_t buf_reloc_handle
,
888 struct drm_i915_validate_buffer
*buffers
,
891 struct drm_device
*dev
= file_priv
->head
->dev
;
892 struct i915_relocatee_info relocatee
;
897 * Short circuit relocations when all previous
898 * buffers offsets were correctly guessed by
901 if (!DRM_DEBUG_RELOCATION
) {
902 for (b
= 0; b
< buf_count
; b
++)
903 if (!buffers
[b
].presumed_offset_correct
)
910 memset(&relocatee
, 0, sizeof(relocatee
));
912 mutex_lock(&dev
->struct_mutex
);
913 relocatee
.buf
= drm_lookup_buffer_object(file_priv
, buf_handle
, 1);
914 mutex_unlock(&dev
->struct_mutex
);
915 if (!relocatee
.buf
) {
916 DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle
);
921 while (buf_reloc_handle
) {
922 ret
= i915_process_relocs(file_priv
, buf_handle
, &buf_reloc_handle
, &relocatee
, buffers
, buf_count
);
924 DRM_ERROR("process relocs failed\n");
929 mutex_lock(&dev
->struct_mutex
);
930 drm_bo_usage_deref_locked(&relocatee
.buf
);
931 mutex_unlock(&dev
->struct_mutex
);
938 * Validate, add fence and relocate a block of bos from a userspace list
940 int i915_validate_buffer_list(struct drm_file
*file_priv
,
941 unsigned int fence_class
, uint64_t data
,
942 struct drm_i915_validate_buffer
*buffers
,
943 uint32_t *num_buffers
)
945 struct drm_i915_op_arg arg
;
946 struct drm_bo_op_req
*req
= &arg
.d
.req
;
947 struct drm_bo_arg_rep rep
;
948 unsigned long next
= 0;
950 unsigned buf_count
= 0;
951 struct drm_device
*dev
= file_priv
->head
->dev
;
952 uint32_t buf_reloc_handle
, buf_handle
;
956 if (buf_count
>= *num_buffers
) {
957 DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers
);
962 buffers
[buf_count
].buffer
= NULL
;
963 buffers
[buf_count
].presumed_offset_correct
= 0;
965 if (copy_from_user(&arg
, (void __user
*)(unsigned long)data
, sizeof(arg
))) {
972 mutex_lock(&dev
->struct_mutex
);
973 buffers
[buf_count
].buffer
= drm_lookup_buffer_object(file_priv
, req
->arg_handle
, 1);
974 mutex_unlock(&dev
->struct_mutex
);
980 if (req
->op
!= drm_bo_validate
) {
982 ("Buffer object operation wasn't \"validate\".\n");
987 buf_handle
= req
->bo_req
.handle
;
988 buf_reloc_handle
= arg
.reloc_handle
;
990 if (buf_reloc_handle
) {
991 ret
= i915_exec_reloc(file_priv
, buf_handle
, buf_reloc_handle
, buffers
, buf_count
);
997 rep
.ret
= drm_bo_handle_validate(file_priv
, req
->bo_req
.handle
,
998 req
->bo_req
.flags
, req
->bo_req
.mask
,
1000 req
->bo_req
.fence_class
, 0,
1002 &buffers
[buf_count
].buffer
);
1005 DRM_ERROR("error on handle validate %d\n", rep
.ret
);
1009 * If the user provided a presumed offset hint, check whether
1010 * the buffer is in the same place, if so, relocations relative to
1011 * this buffer need not be performed
1013 if ((req
->bo_req
.hint
& DRM_BO_HINT_PRESUMED_OFFSET
) &&
1014 buffers
[buf_count
].buffer
->offset
== req
->bo_req
.presumed_offset
) {
1015 buffers
[buf_count
].presumed_offset_correct
= 1;
1022 if (copy_to_user((void __user
*)(unsigned long)data
, &arg
, sizeof(arg
)))
1028 } while (next
!= 0);
1029 *num_buffers
= buf_count
;
1032 mutex_lock(&dev
->struct_mutex
);
1033 i915_dereference_buffers_locked(buffers
, buf_count
);
1034 mutex_unlock(&dev
->struct_mutex
);
1036 return (ret
) ? ret
: rep
.ret
;
1039 static int i915_execbuffer(struct drm_device
*dev
, void *data
,
1040 struct drm_file
*file_priv
)
1042 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1043 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
1044 dev_priv
->sarea_priv
;
1045 struct drm_i915_execbuffer
*exec_buf
= data
;
1046 struct _drm_i915_batchbuffer
*batch
= &exec_buf
->batch
;
1047 struct drm_fence_arg
*fence_arg
= &exec_buf
->fence_arg
;
1050 struct drm_i915_validate_buffer
*buffers
;
1051 struct drm_fence_object
*fence
;
1053 if (!dev_priv
->allow_batchbuffer
) {
1054 DRM_ERROR("Batchbuffer ioctl disabled\n");
1059 if (batch
->num_cliprects
&& DRM_VERIFYAREA_READ(batch
->cliprects
,
1060 batch
->num_cliprects
*
1061 sizeof(struct drm_clip_rect
)))
1064 if (exec_buf
->num_buffers
> dev_priv
->max_validate_buffers
)
1068 ret
= drm_bo_read_lock(&dev
->bm
.bm_lock
);
1073 * The cmdbuf_mutex makes sure the validate-submit-fence
1074 * operation is atomic.
1077 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
1079 drm_bo_read_unlock(&dev
->bm
.bm_lock
);
1083 num_buffers
= exec_buf
->num_buffers
;
1085 buffers
= drm_calloc(num_buffers
, sizeof(struct drm_i915_validate_buffer
), DRM_MEM_DRIVER
);
1087 drm_bo_read_unlock(&dev
->bm
.bm_lock
);
1088 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1092 /* validate buffer list + fixup relocations */
1093 ret
= i915_validate_buffer_list(file_priv
, 0, exec_buf
->ops_list
,
1094 buffers
, &num_buffers
);
1098 /* make sure all previous memory operations have passed */
1099 DRM_MEMORYBARRIER();
1100 drm_agp_chipset_flush(dev
);
1103 batch
->start
= buffers
[num_buffers
-1].buffer
->offset
;
1105 DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
1106 batch
->start
, batch
->used
, batch
->num_cliprects
);
1108 ret
= i915_dispatch_batchbuffer(dev
, batch
);
1112 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
1115 ret
= drm_fence_buffer_objects(dev
, NULL
, 0, NULL
, &fence
);
1119 if (!(fence_arg
->flags
& DRM_FENCE_FLAG_NO_USER
)) {
1120 ret
= drm_fence_add_user_object(file_priv
, fence
, fence_arg
->flags
& DRM_FENCE_FLAG_SHAREABLE
);
1122 fence_arg
->handle
= fence
->base
.hash
.key
;
1123 fence_arg
->fence_class
= fence
->fence_class
;
1124 fence_arg
->type
= fence
->type
;
1125 fence_arg
->signaled
= fence
->signaled
;
1128 drm_fence_usage_deref_unlocked(&fence
);
1132 mutex_lock(&dev
->struct_mutex
);
1133 i915_dereference_buffers_locked(buffers
, num_buffers
);
1134 mutex_unlock(&dev
->struct_mutex
);
1137 drm_free(buffers
, (exec_buf
->num_buffers
* sizeof(struct drm_buffer_object
*)), DRM_MEM_DRIVER
);
1139 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1140 drm_bo_read_unlock(&dev
->bm
.bm_lock
);
1145 static int i915_do_cleanup_pageflip(struct drm_device
* dev
)
1147 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1148 int i
, planes
, num_pages
= dev_priv
->sarea_priv
->third_handle
? 3 : 2;
1152 for (i
= 0, planes
= 0; i
< 2; i
++)
1153 if (dev_priv
->sarea_priv
->pf_current_page
& (0x3 << (2 * i
))) {
1154 dev_priv
->sarea_priv
->pf_current_page
=
1155 (dev_priv
->sarea_priv
->pf_current_page
&
1156 ~(0x3 << (2 * i
))) | ((num_pages
- 1) << (2 * i
));
1162 i915_dispatch_flip(dev
, planes
, 0);
1167 static int i915_flip_bufs(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
1169 drm_i915_flip_t
*param
= data
;
1173 LOCK_TEST_WITH_RETURN(dev
, file_priv
);
1175 /* This is really planes */
1176 if (param
->pipes
& ~0x3) {
1177 DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
1182 i915_dispatch_flip(dev
, param
->pipes
, 0);
1188 static int i915_getparam(struct drm_device
*dev
, void *data
,
1189 struct drm_file
*file_priv
)
1191 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1192 drm_i915_getparam_t
*param
= data
;
1196 DRM_ERROR("called with no initialization\n");
1200 switch (param
->param
) {
1201 case I915_PARAM_IRQ_ACTIVE
:
1202 value
= dev
->irq
? 1 : 0;
1204 case I915_PARAM_ALLOW_BATCHBUFFER
:
1205 value
= dev_priv
->allow_batchbuffer
? 1 : 0;
1207 case I915_PARAM_LAST_DISPATCH
:
1208 value
= READ_BREADCRUMB(dev_priv
);
1211 DRM_ERROR("Unknown parameter %d\n", param
->param
);
1215 if (DRM_COPY_TO_USER(param
->value
, &value
, sizeof(int))) {
1216 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1223 static int i915_setparam(struct drm_device
*dev
, void *data
,
1224 struct drm_file
*file_priv
)
1226 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1227 drm_i915_setparam_t
*param
= data
;
1230 DRM_ERROR("called with no initialization\n");
1234 switch (param
->param
) {
1235 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
1237 dev_priv
->use_mi_batchbuffer_start
= param
->value
;
1239 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
1240 dev_priv
->tex_lru_log_granularity
= param
->value
;
1242 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
1243 dev_priv
->allow_batchbuffer
= param
->value
;
1246 DRM_ERROR("unknown parameter %d\n", param
->param
);
1253 drm_i915_mmio_entry_t mmio_table
[] = {
1254 [MMIO_REGS_PS_DEPTH_COUNT
] = {
1255 I915_MMIO_MAY_READ
|I915_MMIO_MAY_WRITE
,
1261 static int mmio_table_size
= sizeof(mmio_table
)/sizeof(drm_i915_mmio_entry_t
);
1263 static int i915_mmio(struct drm_device
*dev
, void *data
,
1264 struct drm_file
*file_priv
)
1267 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1268 drm_i915_mmio_entry_t
*e
;
1269 drm_i915_mmio_t
*mmio
= data
;
1274 DRM_ERROR("called with no initialization\n");
1278 if (mmio
->reg
>= mmio_table_size
)
1281 e
= &mmio_table
[mmio
->reg
];
1282 base
= (u8
*) dev_priv
->mmio_map
->handle
+ e
->offset
;
1284 switch (mmio
->read_write
) {
1285 case I915_MMIO_READ
:
1286 if (!(e
->flag
& I915_MMIO_MAY_READ
))
1288 for (i
= 0; i
< e
->size
/ 4; i
++)
1289 buf
[i
] = I915_READ(e
->offset
+ i
* 4);
1290 if (DRM_COPY_TO_USER(mmio
->data
, buf
, e
->size
)) {
1291 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1296 case I915_MMIO_WRITE
:
1297 if (!(e
->flag
& I915_MMIO_MAY_WRITE
))
1299 if (DRM_COPY_FROM_USER(buf
, mmio
->data
, e
->size
)) {
1300 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1303 for (i
= 0; i
< e
->size
/ 4; i
++)
1304 I915_WRITE(e
->offset
+ i
* 4, buf
[i
]);
1310 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
1311 struct drm_file
*file_priv
)
1313 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1314 drm_i915_hws_addr_t
*hws
= data
;
1317 DRM_ERROR("called with no initialization\n");
1320 DRM_DEBUG("set status page addr 0x%08x\n", (u32
)hws
->addr
);
1322 dev_priv
->status_gfx_addr
= hws
->addr
& (0x1ffff<<12);
1324 dev_priv
->hws_map
.offset
= dev
->agp
->base
+ hws
->addr
;
1325 dev_priv
->hws_map
.size
= 4*1024;
1326 dev_priv
->hws_map
.type
= 0;
1327 dev_priv
->hws_map
.flags
= 0;
1328 dev_priv
->hws_map
.mtrr
= 0;
1330 drm_core_ioremap(&dev_priv
->hws_map
, dev
);
1331 if (dev_priv
->hws_map
.handle
== NULL
) {
1332 i915_dma_cleanup(dev
);
1333 dev_priv
->status_gfx_addr
= 0;
1334 DRM_ERROR("can not ioremap virtual address for"
1335 " G33 hw status page\n");
1338 dev_priv
->hw_status_page
= dev_priv
->hws_map
.handle
;
1340 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
1341 I915_WRITE(0x02080, dev_priv
->status_gfx_addr
);
1342 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
1343 dev_priv
->status_gfx_addr
);
1344 DRM_DEBUG("load hws at %p\n", dev_priv
->hw_status_page
);
1348 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1350 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1351 unsigned long base
, size
;
1352 int ret
= 0, mmio_bar
= IS_I9XX(dev
) ? 0 : 1;
1354 /* i915 has 4 more counters */
1356 dev
->types
[6] = _DRM_STAT_IRQ
;
1357 dev
->types
[7] = _DRM_STAT_PRIMARY
;
1358 dev
->types
[8] = _DRM_STAT_SECONDARY
;
1359 dev
->types
[9] = _DRM_STAT_DMA
;
1361 dev_priv
= drm_alloc(sizeof(drm_i915_private_t
), DRM_MEM_DRIVER
);
1362 if (dev_priv
== NULL
)
1365 memset(dev_priv
, 0, sizeof(drm_i915_private_t
));
1367 dev
->dev_private
= (void *)dev_priv
;
1369 /* Add register map (needed for suspend/resume) */
1370 base
= drm_get_resource_start(dev
, mmio_bar
);
1371 size
= drm_get_resource_len(dev
, mmio_bar
);
1373 ret
= drm_addmap(dev
, base
, size
, _DRM_REGISTERS
,
1374 _DRM_KERNEL
| _DRM_DRIVER
, &dev_priv
->mmio_map
);
1377 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1378 intel_init_chipset_flush_compat(dev
);
1385 int i915_driver_unload(struct drm_device
*dev
)
1387 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1389 if (dev_priv
->mmio_map
)
1390 drm_rmmap(dev
, dev_priv
->mmio_map
);
1392 drm_free(dev
->dev_private
, sizeof(drm_i915_private_t
),
1395 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1396 intel_fini_chipset_flush_compat(dev
);
1402 void i915_driver_lastclose(struct drm_device
* dev
)
1404 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1406 if (drm_getsarea(dev
) && dev_priv
->sarea_priv
)
1407 i915_do_cleanup_pageflip(dev
);
1408 if (dev_priv
->agp_heap
)
1409 i915_mem_takedown(&(dev_priv
->agp_heap
));
1411 i915_dma_cleanup(dev
);
1414 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
1416 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1417 i915_mem_release(dev
, file_priv
, dev_priv
->agp_heap
);
1420 struct drm_ioctl_desc i915_ioctls
[] = {
1421 DRM_IOCTL_DEF(DRM_I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1422 DRM_IOCTL_DEF(DRM_I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
1423 DRM_IOCTL_DEF(DRM_I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
1424 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
1425 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
1426 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
1427 DRM_IOCTL_DEF(DRM_I915_GETPARAM
, i915_getparam
, DRM_AUTH
),
1428 DRM_IOCTL_DEF(DRM_I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1429 DRM_IOCTL_DEF(DRM_I915_ALLOC
, i915_mem_alloc
, DRM_AUTH
),
1430 DRM_IOCTL_DEF(DRM_I915_FREE
, i915_mem_free
, DRM_AUTH
),
1431 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP
, i915_mem_init_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1432 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
1433 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP
, i915_mem_destroy_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1434 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE
, i915_vblank_pipe_set
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1435 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
1436 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
1437 DRM_IOCTL_DEF(DRM_I915_MMIO
, i915_mmio
, DRM_AUTH
),
1438 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1439 #ifdef I915_HAVE_BUFFER
1440 DRM_IOCTL_DEF(DRM_I915_EXECBUFFER
, i915_execbuffer
, DRM_AUTH
),
1444 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
1447 * Determine if the device really is AGP or not.
1449 * All Intel graphics chipsets are treated as AGP, even if they are really
1452 * \param dev The device to be tested.
1455 * A value of 1 is always retured to indictate every i9x5 is AGP.
1457 int i915_driver_device_is_agp(struct drm_device
* dev
)
1462 int i915_driver_firstopen(struct drm_device
*dev
)
1464 #ifdef I915_HAVE_BUFFER
1465 drm_bo_driver_init(dev
);