1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keithw@valinux.com>
33 #define __NO_VERSION__
36 #include <linux/interrupt.h> /* For task queue support */
37 #include <linux/pagemap.h>
39 /* in case we don't have a 2.3.99-pre6 kernel or later: */
44 #define I810_BUF_FREE 2
45 #define I810_BUF_CLIENT 1
46 #define I810_BUF_HARDWARE 0
48 #define I810_BUF_UNMAPPED 0
49 #define I810_BUF_MAPPED 1
51 #define I810_REG(reg) 2
52 #define I810_BASE(reg) ((unsigned long) \
53 dev->maplist[I810_REG(reg)]->handle)
54 #define I810_ADDR(reg) (I810_BASE(reg) + reg)
55 #define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
56 #define I810_READ(reg) I810_DEREF(reg)
57 #define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
58 #define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
59 #define I810_READ16(reg) I810_DEREF16(reg)
60 #define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
62 #define RING_LOCALS unsigned int outring, ringmask; volatile char *virt;
64 #define BEGIN_LP_RING(n) do { \
66 DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
68 if (dev_priv->ring.space < n*4) \
69 i810_wait_ring(dev, n*4); \
70 dev_priv->ring.space -= n*4; \
71 outring = dev_priv->ring.tail; \
72 ringmask = dev_priv->ring.tail_mask; \
73 virt = dev_priv->ring.virtual_start; \
76 #define ADVANCE_LP_RING() do { \
77 if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
78 dev_priv->ring.tail = outring; \
79 I810_WRITE(LP_RING + RING_TAIL, outring); \
82 #define OUT_RING(n) do { \
83 if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
84 *(volatile unsigned int *)(virt + outring) = n; \
86 outring &= ringmask; \
89 static inline void i810_print_status_page(drm_device_t
*dev
)
91 drm_device_dma_t
*dma
= dev
->dma
;
92 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
93 u32
*temp
= (u32
*)dev_priv
->hw_status_page
;
96 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp
[0]);
97 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp
[1]);
98 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp
[2]);
99 DRM_DEBUG( "hw_status: Reserved : %x\n", temp
[3]);
100 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp
[5]);
101 for(i
= 6; i
< dma
->buf_count
+ 6; i
++) {
102 DRM_DEBUG( "buffer status idx : %d used: %d\n", i
- 6, temp
[i
]);
106 static drm_buf_t
*i810_freelist_get(drm_device_t
*dev
)
108 drm_device_dma_t
*dma
= dev
->dma
;
112 /* Linear search might not be the best solution */
114 for (i
= 0; i
< dma
->buf_count
; i
++) {
115 drm_buf_t
*buf
= dma
->buflist
[ i
];
116 drm_i810_buf_priv_t
*buf_priv
= buf
->dev_private
;
117 /* In use is already a pointer */
118 used
= cmpxchg(buf_priv
->in_use
, I810_BUF_FREE
,
120 if(used
== I810_BUF_FREE
) {
127 /* This should only be called if the buffer is not sent to the hardware
128 * yet, the hardware updates in use for us once its on the ring buffer.
131 static int i810_freelist_put(drm_device_t
*dev
, drm_buf_t
*buf
)
133 drm_i810_buf_priv_t
*buf_priv
= buf
->dev_private
;
136 /* In use is already a pointer */
137 used
= cmpxchg(buf_priv
->in_use
, I810_BUF_CLIENT
, I810_BUF_FREE
);
138 if(used
!= I810_BUF_CLIENT
) {
139 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf
->idx
);
146 static struct file_operations i810_buffer_fops
= {
149 release
: i810_release
,
151 mmap
: i810_mmap_buffers
,
157 int i810_mmap_buffers(struct file
*filp
, struct vm_area_struct
*vma
)
159 drm_file_t
*priv
= filp
->private_data
;
161 drm_i810_private_t
*dev_priv
;
163 drm_i810_buf_priv_t
*buf_priv
;
167 dev_priv
= dev
->dev_private
;
168 buf
= dev_priv
->mmap_buffer
;
169 buf_priv
= buf
->dev_private
;
171 vma
->vm_flags
|= (VM_IO
| VM_DONTCOPY
);
174 buf_priv
->currently_mapped
= I810_BUF_MAPPED
;
177 if (remap_page_range(vma
->vm_start
,
179 vma
->vm_end
- vma
->vm_start
,
180 vma
->vm_page_prot
)) return -EAGAIN
;
184 static int i810_map_buffer(drm_buf_t
*buf
, struct file
*filp
)
186 drm_file_t
*priv
= filp
->private_data
;
187 drm_device_t
*dev
= priv
->dev
;
188 drm_i810_buf_priv_t
*buf_priv
= buf
->dev_private
;
189 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
190 struct file_operations
*old_fops
;
193 if(buf_priv
->currently_mapped
== I810_BUF_MAPPED
) return -EINVAL
;
195 if(VM_DONTCOPY
!= 0) {
196 down_write(¤t
->mm
->mmap_sem
);
197 old_fops
= filp
->f_op
;
198 filp
->f_op
= &i810_buffer_fops
;
199 dev_priv
->mmap_buffer
= buf
;
200 buf_priv
->virtual = (void *)do_mmap(filp
, 0, buf
->total
,
201 PROT_READ
|PROT_WRITE
,
204 dev_priv
->mmap_buffer
= NULL
;
205 filp
->f_op
= old_fops
;
206 if ((unsigned long)buf_priv
->virtual > -1024UL) {
208 DRM_DEBUG("mmap error\n");
209 retcode
= (signed int)buf_priv
->virtual;
210 buf_priv
->virtual = 0;
212 up_write(¤t
->mm
->mmap_sem
);
214 buf_priv
->virtual = buf_priv
->kernel_virtual
;
215 buf_priv
->currently_mapped
= I810_BUF_MAPPED
;
220 static int i810_unmap_buffer(drm_buf_t
*buf
)
222 drm_i810_buf_priv_t
*buf_priv
= buf
->dev_private
;
225 if(VM_DONTCOPY
!= 0) {
226 if(buf_priv
->currently_mapped
!= I810_BUF_MAPPED
)
228 down_write(¤t
->mm
->mmap_sem
);
229 retcode
= do_munmap(current
->mm
,
230 (unsigned long)buf_priv
->virtual,
231 (size_t) buf
->total
);
232 up_write(¤t
->mm
->mmap_sem
);
234 buf_priv
->currently_mapped
= I810_BUF_UNMAPPED
;
235 buf_priv
->virtual = 0;
240 static int i810_dma_get_buffer(drm_device_t
*dev
, drm_i810_dma_t
*d
,
243 drm_file_t
*priv
= filp
->private_data
;
245 drm_i810_buf_priv_t
*buf_priv
;
248 buf
= i810_freelist_get(dev
);
251 DRM_DEBUG("retcode=%d\n", retcode
);
255 retcode
= i810_map_buffer(buf
, filp
);
257 i810_freelist_put(dev
, buf
);
258 DRM_DEBUG("mapbuf failed, retcode %d\n", retcode
);
261 buf
->pid
= priv
->pid
;
262 buf_priv
= buf
->dev_private
;
264 d
->request_idx
= buf
->idx
;
265 d
->request_size
= buf
->total
;
266 d
->virtual = buf_priv
->virtual;
271 static unsigned long i810_alloc_page(drm_device_t
*dev
)
273 unsigned long address
;
275 address
= __get_free_page(GFP_KERNEL
);
279 get_page(virt_to_page(address
));
280 LockPage(virt_to_page(address
));
285 static void i810_free_page(drm_device_t
*dev
, unsigned long page
)
287 struct page
* p
= virt_to_page(page
);
297 static int i810_dma_cleanup(drm_device_t
*dev
)
299 drm_device_dma_t
*dma
= dev
->dma
;
301 if(dev
->dev_private
) {
303 drm_i810_private_t
*dev_priv
=
304 (drm_i810_private_t
*) dev
->dev_private
;
306 if(dev_priv
->ring
.virtual_start
) {
307 drm_ioremapfree((void *) dev_priv
->ring
.virtual_start
,
308 dev_priv
->ring
.Size
, dev
);
310 if(dev_priv
->hw_status_page
!= 0UL) {
311 i810_free_page(dev
, dev_priv
->hw_status_page
);
312 /* Need to rewrite hardware status page */
313 I810_WRITE(0x02080, 0x1ffff000);
315 drm_free(dev
->dev_private
, sizeof(drm_i810_private_t
),
317 dev
->dev_private
= NULL
;
319 for (i
= 0; i
< dma
->buf_count
; i
++) {
320 drm_buf_t
*buf
= dma
->buflist
[ i
];
321 drm_i810_buf_priv_t
*buf_priv
= buf
->dev_private
;
322 drm_ioremapfree(buf_priv
->kernel_virtual
, buf
->total
, dev
);
328 static int i810_wait_ring(drm_device_t
*dev
, int n
)
330 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
331 drm_i810_ring_buffer_t
*ring
= &(dev_priv
->ring
);
334 unsigned int last_head
= I810_READ(LP_RING
+ RING_HEAD
) & HEAD_ADDR
;
336 end
= jiffies
+ (HZ
*3);
337 while (ring
->space
< n
) {
340 ring
->head
= I810_READ(LP_RING
+ RING_HEAD
) & HEAD_ADDR
;
341 ring
->space
= ring
->head
- (ring
->tail
+8);
342 if (ring
->space
< 0) ring
->space
+= ring
->Size
;
344 if (ring
->head
!= last_head
)
345 end
= jiffies
+ (HZ
*3);
348 if((signed)(end
- jiffies
) <= 0) {
349 DRM_ERROR("space: %d wanted %d\n", ring
->space
, n
);
350 DRM_ERROR("lockup\n");
354 for (i
= 0 ; i
< 2000 ; i
++) ;
361 static void i810_kernel_lost_context(drm_device_t
*dev
)
363 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
364 drm_i810_ring_buffer_t
*ring
= &(dev_priv
->ring
);
366 ring
->head
= I810_READ(LP_RING
+ RING_HEAD
) & HEAD_ADDR
;
367 ring
->tail
= I810_READ(LP_RING
+ RING_TAIL
);
368 ring
->space
= ring
->head
- (ring
->tail
+8);
369 if (ring
->space
< 0) ring
->space
+= ring
->Size
;
372 static int i810_freelist_init(drm_device_t
*dev
)
374 drm_device_dma_t
*dma
= dev
->dma
;
375 drm_i810_private_t
*dev_priv
= (drm_i810_private_t
*)dev
->dev_private
;
377 u32
*hw_status
= (u32
*)(dev_priv
->hw_status_page
+ my_idx
);
380 if(dma
->buf_count
> 1019) {
381 /* Not enough space in the status page for the freelist */
385 for (i
= 0; i
< dma
->buf_count
; i
++) {
386 drm_buf_t
*buf
= dma
->buflist
[ i
];
387 drm_i810_buf_priv_t
*buf_priv
= buf
->dev_private
;
389 buf_priv
->in_use
= hw_status
++;
390 buf_priv
->my_use_idx
= my_idx
;
393 *buf_priv
->in_use
= I810_BUF_FREE
;
395 buf_priv
->kernel_virtual
= drm_ioremap(buf
->bus_address
,
401 static int i810_dma_initialize(drm_device_t
*dev
,
402 drm_i810_private_t
*dev_priv
,
403 drm_i810_init_t
*init
)
405 drm_map_t
*sarea_map
;
407 dev
->dev_private
= (void *) dev_priv
;
408 memset(dev_priv
, 0, sizeof(drm_i810_private_t
));
410 if (init
->ring_map_idx
>= dev
->map_count
||
411 init
->buffer_map_idx
>= dev
->map_count
) {
412 i810_dma_cleanup(dev
);
413 DRM_ERROR("ring_map or buffer_map are invalid\n");
417 dev_priv
->ring_map_idx
= init
->ring_map_idx
;
418 dev_priv
->buffer_map_idx
= init
->buffer_map_idx
;
419 sarea_map
= dev
->maplist
[0];
420 dev_priv
->sarea_priv
= (drm_i810_sarea_t
*)
421 ((u8
*)sarea_map
->handle
+
422 init
->sarea_priv_offset
);
424 atomic_set(&dev_priv
->flush_done
, 0);
425 init_waitqueue_head(&dev_priv
->flush_queue
);
427 dev_priv
->ring
.Start
= init
->ring_start
;
428 dev_priv
->ring
.End
= init
->ring_end
;
429 dev_priv
->ring
.Size
= init
->ring_size
;
431 dev_priv
->ring
.virtual_start
= drm_ioremap(dev
->agp
->base
+
433 init
->ring_size
, dev
);
435 dev_priv
->ring
.tail_mask
= dev_priv
->ring
.Size
- 1;
437 if (dev_priv
->ring
.virtual_start
== NULL
) {
438 i810_dma_cleanup(dev
);
439 DRM_ERROR("can not ioremap virtual address for"
444 dev_priv
->w
= init
->w
;
445 dev_priv
->h
= init
->h
;
446 dev_priv
->pitch
= init
->pitch
;
447 dev_priv
->back_offset
= init
->back_offset
;
448 dev_priv
->depth_offset
= init
->depth_offset
;
450 dev_priv
->front_di1
= init
->front_offset
| init
->pitch_bits
;
451 dev_priv
->back_di1
= init
->back_offset
| init
->pitch_bits
;
452 dev_priv
->zi1
= init
->depth_offset
| init
->pitch_bits
;
455 /* Program Hardware Status Page */
456 dev_priv
->hw_status_page
= i810_alloc_page(dev
);
457 memset((void *) dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
458 if(dev_priv
->hw_status_page
== 0UL) {
459 i810_dma_cleanup(dev
);
460 DRM_ERROR("Can not allocate hardware status page\n");
463 DRM_DEBUG("hw status page @ %lx\n", dev_priv
->hw_status_page
);
465 I810_WRITE(0x02080, virt_to_bus((void *)dev_priv
->hw_status_page
));
466 DRM_DEBUG("Enabled hardware status page\n");
468 /* Now we need to init our freelist */
469 if(i810_freelist_init(dev
) != 0) {
470 i810_dma_cleanup(dev
);
471 DRM_ERROR("Not enough space in the status page for"
478 int i810_dma_init(struct inode
*inode
, struct file
*filp
,
479 unsigned int cmd
, unsigned long arg
)
481 drm_file_t
*priv
= filp
->private_data
;
482 drm_device_t
*dev
= priv
->dev
;
483 drm_i810_private_t
*dev_priv
;
484 drm_i810_init_t init
;
487 if (copy_from_user(&init
, (drm_i810_init_t
*)arg
, sizeof(init
)))
492 dev_priv
= drm_alloc(sizeof(drm_i810_private_t
),
494 if(dev_priv
== NULL
) return -ENOMEM
;
495 retcode
= i810_dma_initialize(dev
, dev_priv
, &init
);
497 case I810_CLEANUP_DMA
:
498 retcode
= i810_dma_cleanup(dev
);
510 /* Most efficient way to verify state for the i810 is as it is
511 * emitted. Non-conformant state is silently dropped.
513 * Use 'volatile' & local var tmp to force the emitted values to be
514 * identical to the verified ones.
516 static void i810EmitContextVerified( drm_device_t
*dev
,
517 volatile unsigned int *code
)
519 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
524 BEGIN_LP_RING( I810_CTX_SETUP_SIZE
);
526 OUT_RING( GFX_OP_COLOR_FACTOR
);
527 OUT_RING( code
[I810_CTXREG_CF1
] );
529 OUT_RING( GFX_OP_STIPPLE
);
530 OUT_RING( code
[I810_CTXREG_ST1
] );
532 for ( i
= 4 ; i
< I810_CTX_SETUP_SIZE
; i
++ ) {
535 if ((tmp
& (7<<29)) == (3<<29) &&
536 (tmp
& (0x1f<<24)) < (0x1d<<24))
549 static void i810EmitTexVerified( drm_device_t
*dev
,
550 volatile unsigned int *code
)
552 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
557 BEGIN_LP_RING( I810_TEX_SETUP_SIZE
);
559 OUT_RING( GFX_OP_MAP_INFO
);
560 OUT_RING( code
[I810_TEXREG_MI1
] );
561 OUT_RING( code
[I810_TEXREG_MI2
] );
562 OUT_RING( code
[I810_TEXREG_MI3
] );
564 for ( i
= 4 ; i
< I810_TEX_SETUP_SIZE
; i
++ ) {
567 if ((tmp
& (7<<29)) == (3<<29) &&
568 (tmp
& (0x1f<<24)) < (0x1d<<24))
582 /* Need to do some additional checking when setting the dest buffer.
584 static void i810EmitDestVerified( drm_device_t
*dev
,
585 volatile unsigned int *code
)
587 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
591 BEGIN_LP_RING( I810_DEST_SETUP_SIZE
+ 2 );
593 tmp
= code
[I810_DESTREG_DI1
];
594 if (tmp
== dev_priv
->front_di1
|| tmp
== dev_priv
->back_di1
) {
595 OUT_RING( CMD_OP_DESTBUFFER_INFO
);
598 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
599 tmp
, dev_priv
->front_di1
, dev_priv
->back_di1
);
603 OUT_RING( CMD_OP_Z_BUFFER_INFO
);
604 OUT_RING( dev_priv
->zi1
);
606 OUT_RING( GFX_OP_DESTBUFFER_VARS
);
607 OUT_RING( code
[I810_DESTREG_DV1
] );
609 OUT_RING( GFX_OP_DRAWRECT_INFO
);
610 OUT_RING( code
[I810_DESTREG_DR1
] );
611 OUT_RING( code
[I810_DESTREG_DR2
] );
612 OUT_RING( code
[I810_DESTREG_DR3
] );
613 OUT_RING( code
[I810_DESTREG_DR4
] );
621 static void i810EmitState( drm_device_t
*dev
)
623 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
624 drm_i810_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
625 unsigned int dirty
= sarea_priv
->dirty
;
627 if (dirty
& I810_UPLOAD_BUFFERS
) {
628 i810EmitDestVerified( dev
, sarea_priv
->BufferState
);
629 sarea_priv
->dirty
&= ~I810_UPLOAD_BUFFERS
;
632 if (dirty
& I810_UPLOAD_CTX
) {
633 i810EmitContextVerified( dev
, sarea_priv
->ContextState
);
634 sarea_priv
->dirty
&= ~I810_UPLOAD_CTX
;
637 if (dirty
& I810_UPLOAD_TEX0
) {
638 i810EmitTexVerified( dev
, sarea_priv
->TexState
[0] );
639 sarea_priv
->dirty
&= ~I810_UPLOAD_TEX0
;
642 if (dirty
& I810_UPLOAD_TEX1
) {
643 i810EmitTexVerified( dev
, sarea_priv
->TexState
[1] );
644 sarea_priv
->dirty
&= ~I810_UPLOAD_TEX1
;
652 static void i810_dma_dispatch_clear( drm_device_t
*dev
, int flags
,
653 unsigned int clear_color
,
654 unsigned int clear_zval
)
656 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
657 drm_i810_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
658 int nbox
= sarea_priv
->nbox
;
659 drm_clip_rect_t
*pbox
= sarea_priv
->boxes
;
660 int pitch
= dev_priv
->pitch
;
665 i810_kernel_lost_context(dev
);
667 if (nbox
> I810_NR_SAREA_CLIPRECTS
)
668 nbox
= I810_NR_SAREA_CLIPRECTS
;
670 for (i
= 0 ; i
< nbox
; i
++, pbox
++) {
671 unsigned int x
= pbox
->x1
;
672 unsigned int y
= pbox
->y1
;
673 unsigned int width
= (pbox
->x2
- x
) * cpp
;
674 unsigned int height
= pbox
->y2
- y
;
675 unsigned int start
= y
* pitch
+ x
* cpp
;
677 if (pbox
->x1
> pbox
->x2
||
678 pbox
->y1
> pbox
->y2
||
679 pbox
->x2
> dev_priv
->w
||
680 pbox
->y2
> dev_priv
->h
)
683 if ( flags
& I810_FRONT
) {
684 DRM_DEBUG("clear front\n");
686 OUT_RING( BR00_BITBLT_CLIENT
|
687 BR00_OP_COLOR_BLT
| 0x3 );
688 OUT_RING( BR13_SOLID_PATTERN
| (0xF0 << 16) | pitch
);
689 OUT_RING( (height
<< 16) | width
);
691 OUT_RING( clear_color
);
696 if ( flags
& I810_BACK
) {
697 DRM_DEBUG("clear back\n");
699 OUT_RING( BR00_BITBLT_CLIENT
|
700 BR00_OP_COLOR_BLT
| 0x3 );
701 OUT_RING( BR13_SOLID_PATTERN
| (0xF0 << 16) | pitch
);
702 OUT_RING( (height
<< 16) | width
);
703 OUT_RING( dev_priv
->back_offset
+ start
);
704 OUT_RING( clear_color
);
709 if ( flags
& I810_DEPTH
) {
710 DRM_DEBUG("clear depth\n");
712 OUT_RING( BR00_BITBLT_CLIENT
|
713 BR00_OP_COLOR_BLT
| 0x3 );
714 OUT_RING( BR13_SOLID_PATTERN
| (0xF0 << 16) | pitch
);
715 OUT_RING( (height
<< 16) | width
);
716 OUT_RING( dev_priv
->depth_offset
+ start
);
717 OUT_RING( clear_zval
);
724 static void i810_dma_dispatch_swap( drm_device_t
*dev
)
726 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
727 drm_i810_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
728 int nbox
= sarea_priv
->nbox
;
729 drm_clip_rect_t
*pbox
= sarea_priv
->boxes
;
730 int pitch
= dev_priv
->pitch
;
732 int ofs
= dev_priv
->back_offset
;
736 DRM_DEBUG("swapbuffers\n");
738 i810_kernel_lost_context(dev
);
740 if (nbox
> I810_NR_SAREA_CLIPRECTS
)
741 nbox
= I810_NR_SAREA_CLIPRECTS
;
743 for (i
= 0 ; i
< nbox
; i
++, pbox
++)
745 unsigned int w
= pbox
->x2
- pbox
->x1
;
746 unsigned int h
= pbox
->y2
- pbox
->y1
;
747 unsigned int dst
= pbox
->x1
*cpp
+ pbox
->y1
*pitch
;
748 unsigned int start
= ofs
+ dst
;
750 if (pbox
->x1
> pbox
->x2
||
751 pbox
->y1
> pbox
->y2
||
752 pbox
->x2
> dev_priv
->w
||
753 pbox
->y2
> dev_priv
->h
)
756 DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
757 pbox
[i
].x1
, pbox
[i
].y1
,
758 pbox
[i
].x2
, pbox
[i
].y2
);
761 OUT_RING( BR00_BITBLT_CLIENT
| BR00_OP_SRC_COPY_BLT
| 0x4 );
762 OUT_RING( pitch
| (0xCC << 16));
763 OUT_RING( (h
<< 16) | (w
* cpp
));
772 static void i810_dma_dispatch_vertex(drm_device_t
*dev
,
777 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
778 drm_i810_buf_priv_t
*buf_priv
= buf
->dev_private
;
779 drm_i810_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
780 drm_clip_rect_t
*box
= sarea_priv
->boxes
;
781 int nbox
= sarea_priv
->nbox
;
782 unsigned long address
= (unsigned long)buf
->bus_address
;
783 unsigned long start
= address
- dev
->agp
->base
;
787 i810_kernel_lost_context(dev
);
789 if (nbox
> I810_NR_SAREA_CLIPRECTS
)
790 nbox
= I810_NR_SAREA_CLIPRECTS
;
793 u
= cmpxchg(buf_priv
->in_use
, I810_BUF_CLIENT
,
795 if(u
!= I810_BUF_CLIENT
) {
796 DRM_DEBUG("xxxx 2\n");
803 if (sarea_priv
->dirty
)
804 i810EmitState( dev
);
806 DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
807 address
, used
, nbox
);
810 DRM_DEBUG( "dispatch counter : %ld\n", dev_priv
->counter
);
811 DRM_DEBUG( "i810_dma_dispatch\n");
812 DRM_DEBUG( "start : %lx\n", start
);
813 DRM_DEBUG( "used : %d\n", used
);
814 DRM_DEBUG( "start + used - 4 : %ld\n", start
+ used
- 4);
816 if (buf_priv
->currently_mapped
== I810_BUF_MAPPED
) {
817 *(u32
*)buf_priv
->virtual = (GFX_OP_PRIMITIVE
|
818 sarea_priv
->vertex_prim
|
822 *(u32
*)((u32
)buf_priv
->virtual + used
) = 0;
826 i810_unmap_buffer(buf
);
833 OUT_RING( GFX_OP_SCISSOR
| SC_UPDATE_SCISSOR
|
835 OUT_RING( GFX_OP_SCISSOR_INFO
);
836 OUT_RING( box
[i
].x1
| (box
[i
].y1
<<16) );
837 OUT_RING( (box
[i
].x2
-1) | ((box
[i
].y2
-1)<<16) );
842 OUT_RING( CMD_OP_BATCH_BUFFER
);
843 OUT_RING( start
| BB1_PROTECTED
);
844 OUT_RING( start
+ used
- 4 );
848 } while (++i
< nbox
);
852 OUT_RING( CMD_STORE_DWORD_IDX
);
854 OUT_RING( dev_priv
->counter
);
858 OUT_RING( CMD_STORE_DWORD_IDX
);
859 OUT_RING( buf_priv
->my_use_idx
);
860 OUT_RING( I810_BUF_FREE
);
864 OUT_RING( CMD_REPORT_HEAD
);
870 /* Interrupts are only for flushing */
871 static void i810_dma_service(int irq
, void *device
, struct pt_regs
*regs
)
873 drm_device_t
*dev
= (drm_device_t
*)device
;
876 atomic_inc(&dev
->total_irq
);
877 temp
= I810_READ16(I810REG_INT_IDENTITY_R
);
878 temp
= temp
& ~(0x6000);
879 if(temp
!= 0) I810_WRITE16(I810REG_INT_IDENTITY_R
,
880 temp
); /* Clear all interrupts */
884 queue_task(&dev
->tq
, &tq_immediate
);
885 mark_bh(IMMEDIATE_BH
);
888 static void i810_dma_task_queue(void *device
)
890 drm_device_t
*dev
= (drm_device_t
*) device
;
891 drm_i810_private_t
*dev_priv
= (drm_i810_private_t
*)dev
->dev_private
;
893 atomic_set(&dev_priv
->flush_done
, 1);
894 wake_up_interruptible(&dev_priv
->flush_queue
);
897 int i810_irq_install(drm_device_t
*dev
, int irq
)
902 if (!irq
) return -EINVAL
;
904 down(&dev
->struct_sem
);
906 up(&dev
->struct_sem
);
910 up(&dev
->struct_sem
);
912 DRM_DEBUG( "Interrupt Install : %d\n", irq
);
913 DRM_DEBUG("%d\n", irq
);
915 dev
->context_flag
= 0;
916 dev
->interrupt_flag
= 0;
919 dev
->dma
->next_buffer
= NULL
;
920 dev
->dma
->next_queue
= NULL
;
921 dev
->dma
->this_buffer
= NULL
;
923 INIT_LIST_HEAD(&dev
->tq
.list
);
925 dev
->tq
.routine
= i810_dma_task_queue
;
928 /* Before installing handler */
929 temp
= I810_READ16(I810REG_HWSTAM
);
930 temp
= temp
& 0x6000;
931 I810_WRITE16(I810REG_HWSTAM
, temp
);
933 temp
= I810_READ16(I810REG_INT_MASK_R
);
934 temp
= temp
& 0x6000;
935 I810_WRITE16(I810REG_INT_MASK_R
, temp
); /* Unmask interrupts */
936 temp
= I810_READ16(I810REG_INT_ENABLE_R
);
937 temp
= temp
& 0x6000;
938 I810_WRITE16(I810REG_INT_ENABLE_R
, temp
); /* Disable all interrupts */
940 /* Install handler */
941 if ((retcode
= request_irq(dev
->irq
,
946 down(&dev
->struct_sem
);
948 up(&dev
->struct_sem
);
951 temp
= I810_READ16(I810REG_INT_ENABLE_R
);
952 temp
= temp
& 0x6000;
953 temp
= temp
| 0x0003;
954 I810_WRITE16(I810REG_INT_ENABLE_R
,
955 temp
); /* Enable bp & user interrupts */
959 int i810_irq_uninstall(drm_device_t
*dev
)
967 down(&dev
->struct_sem
);
970 up(&dev
->struct_sem
);
972 if (!irq
) return -EINVAL
;
974 DRM_DEBUG( "Interrupt UnInstall: %d\n", irq
);
975 DRM_DEBUG("%d\n", irq
);
977 temp
= I810_READ16(I810REG_INT_IDENTITY_R
);
978 temp
= temp
& ~(0x6000);
979 if(temp
!= 0) I810_WRITE16(I810REG_INT_IDENTITY_R
,
980 temp
); /* Clear all interrupts */
982 temp
= I810_READ16(I810REG_INT_ENABLE_R
);
983 temp
= temp
& 0x6000;
984 I810_WRITE16(I810REG_INT_ENABLE_R
,
985 temp
); /* Disable all interrupts */
992 int i810_control(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
995 drm_file_t
*priv
= filp
->private_data
;
996 drm_device_t
*dev
= priv
->dev
;
1000 DRM_DEBUG( "i810_control\n");
1002 if (copy_from_user(&ctl
, (drm_control_t
*)arg
, sizeof(ctl
)))
1006 case DRM_INST_HANDLER
:
1007 if ((retcode
= i810_irq_install(dev
, ctl
.irq
)))
1010 case DRM_UNINST_HANDLER
:
1011 if ((retcode
= i810_irq_uninstall(dev
)))
1020 static inline void i810_dma_emit_flush(drm_device_t
*dev
)
1022 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
1025 i810_kernel_lost_context(dev
);
1028 OUT_RING( CMD_REPORT_HEAD
);
1029 OUT_RING( GFX_OP_USER_INTERRUPT
);
1032 /* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
1033 /* atomic_set(&dev_priv->flush_done, 1); */
1034 /* wake_up_interruptible(&dev_priv->flush_queue); */
1037 static inline void i810_dma_quiescent_emit(drm_device_t
*dev
)
1039 drm_i810_private_t
*dev_priv
= dev
->dev_private
;
1042 i810_kernel_lost_context(dev
);
1045 OUT_RING( INST_PARSER_CLIENT
| INST_OP_FLUSH
| INST_FLUSH_MAP_CACHE
);
1046 OUT_RING( CMD_REPORT_HEAD
);
1048 OUT_RING( GFX_OP_USER_INTERRUPT
);
1051 /* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
1052 /* atomic_set(&dev_priv->flush_done, 1); */
1053 /* wake_up_interruptible(&dev_priv->flush_queue); */
1056 static void i810_dma_quiescent(drm_device_t
*dev
)
1058 DECLARE_WAITQUEUE(entry
, current
);
1059 drm_i810_private_t
*dev_priv
= (drm_i810_private_t
*)dev
->dev_private
;
1062 if(dev_priv
== NULL
) {
1065 atomic_set(&dev_priv
->flush_done
, 0);
1066 add_wait_queue(&dev_priv
->flush_queue
, &entry
);
1067 end
= jiffies
+ (HZ
*3);
1070 current
->state
= TASK_INTERRUPTIBLE
;
1071 i810_dma_quiescent_emit(dev
);
1072 if (atomic_read(&dev_priv
->flush_done
) == 1) break;
1073 if((signed)(end
- jiffies
) <= 0) {
1074 DRM_ERROR("lockup\n");
1077 schedule_timeout(HZ
*3);
1078 if (signal_pending(current
)) {
1083 current
->state
= TASK_RUNNING
;
1084 remove_wait_queue(&dev_priv
->flush_queue
, &entry
);
1089 static int i810_flush_queue(drm_device_t
*dev
)
1091 DECLARE_WAITQUEUE(entry
, current
);
1092 drm_i810_private_t
*dev_priv
= (drm_i810_private_t
*)dev
->dev_private
;
1093 drm_device_dma_t
*dma
= dev
->dma
;
1097 if(dev_priv
== NULL
) {
1100 atomic_set(&dev_priv
->flush_done
, 0);
1101 add_wait_queue(&dev_priv
->flush_queue
, &entry
);
1102 end
= jiffies
+ (HZ
*3);
1104 current
->state
= TASK_INTERRUPTIBLE
;
1105 i810_dma_emit_flush(dev
);
1106 if (atomic_read(&dev_priv
->flush_done
) == 1) break;
1107 if((signed)(end
- jiffies
) <= 0) {
1108 DRM_ERROR("lockup\n");
1111 schedule_timeout(HZ
*3);
1112 if (signal_pending(current
)) {
1113 ret
= -EINTR
; /* Can't restart */
1118 current
->state
= TASK_RUNNING
;
1119 remove_wait_queue(&dev_priv
->flush_queue
, &entry
);
1122 for (i
= 0; i
< dma
->buf_count
; i
++) {
1123 drm_buf_t
*buf
= dma
->buflist
[ i
];
1124 drm_i810_buf_priv_t
*buf_priv
= buf
->dev_private
;
1126 int used
= cmpxchg(buf_priv
->in_use
, I810_BUF_HARDWARE
,
1129 if (used
== I810_BUF_HARDWARE
)
1130 DRM_DEBUG("reclaimed from HARDWARE\n");
1131 if (used
== I810_BUF_CLIENT
)
1132 DRM_DEBUG("still on client HARDWARE\n");
1138 /* Must be called with the lock held */
1139 void i810_reclaim_buffers(drm_device_t
*dev
, pid_t pid
)
1141 drm_device_dma_t
*dma
= dev
->dma
;
1145 if (!dev
->dev_private
) return;
1146 if (!dma
->buflist
) return;
1148 i810_flush_queue(dev
);
1150 for (i
= 0; i
< dma
->buf_count
; i
++) {
1151 drm_buf_t
*buf
= dma
->buflist
[ i
];
1152 drm_i810_buf_priv_t
*buf_priv
= buf
->dev_private
;
1154 if (buf
->pid
== pid
&& buf_priv
) {
1155 int used
= cmpxchg(buf_priv
->in_use
, I810_BUF_CLIENT
,
1158 if (used
== I810_BUF_CLIENT
)
1159 DRM_DEBUG("reclaimed from client\n");
1160 if(buf_priv
->currently_mapped
== I810_BUF_MAPPED
)
1161 buf_priv
->currently_mapped
= I810_BUF_UNMAPPED
;
1166 int i810_lock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
1169 drm_file_t
*priv
= filp
->private_data
;
1170 drm_device_t
*dev
= priv
->dev
;
1172 DECLARE_WAITQUEUE(entry
, current
);
1176 if (copy_from_user(&lock
, (drm_lock_t
*)arg
, sizeof(lock
)))
1179 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
1180 DRM_ERROR("Process %d using kernel context %d\n",
1181 current
->pid
, lock
.context
);
1185 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
1186 lock
.context
, current
->pid
, dev
->lock
.hw_lock
->lock
,
1189 if (lock
.context
< 0) {
1196 add_wait_queue(&dev
->lock
.lock_queue
, &entry
);
1198 current
->state
= TASK_INTERRUPTIBLE
;
1199 if (!dev
->lock
.hw_lock
) {
1200 /* Device has been unregistered */
1204 if (drm_lock_take(&dev
->lock
.hw_lock
->lock
,
1206 dev
->lock
.pid
= current
->pid
;
1207 dev
->lock
.lock_time
= jiffies
;
1208 atomic_inc(&dev
->total_locks
);
1209 break; /* Got lock */
1213 atomic_inc(&dev
->total_sleeps
);
1214 DRM_DEBUG("Calling lock schedule\n");
1216 if (signal_pending(current
)) {
1221 current
->state
= TASK_RUNNING
;
1222 remove_wait_queue(&dev
->lock
.lock_queue
, &entry
);
1226 sigemptyset(&dev
->sigmask
);
1227 sigaddset(&dev
->sigmask
, SIGSTOP
);
1228 sigaddset(&dev
->sigmask
, SIGTSTP
);
1229 sigaddset(&dev
->sigmask
, SIGTTIN
);
1230 sigaddset(&dev
->sigmask
, SIGTTOU
);
1231 dev
->sigdata
.context
= lock
.context
;
1232 dev
->sigdata
.lock
= dev
->lock
.hw_lock
;
1233 block_all_signals(drm_notifier
, &dev
->sigdata
, &dev
->sigmask
);
1235 if (lock
.flags
& _DRM_LOCK_QUIESCENT
) {
1236 DRM_DEBUG("_DRM_LOCK_QUIESCENT\n");
1237 DRM_DEBUG("fred\n");
1238 i810_dma_quiescent(dev
);
1241 DRM_DEBUG("%d %s\n", lock
.context
, ret
? "interrupted" : "has lock");
1245 int i810_flush_ioctl(struct inode
*inode
, struct file
*filp
,
1246 unsigned int cmd
, unsigned long arg
)
1248 drm_file_t
*priv
= filp
->private_data
;
1249 drm_device_t
*dev
= priv
->dev
;
1251 DRM_DEBUG("i810_flush_ioctl\n");
1252 if(!_DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)) {
1253 DRM_ERROR("i810_flush_ioctl called without lock held\n");
1257 i810_flush_queue(dev
);
1262 int i810_dma_vertex(struct inode
*inode
, struct file
*filp
,
1263 unsigned int cmd
, unsigned long arg
)
1265 drm_file_t
*priv
= filp
->private_data
;
1266 drm_device_t
*dev
= priv
->dev
;
1267 drm_device_dma_t
*dma
= dev
->dma
;
1268 drm_i810_private_t
*dev_priv
= (drm_i810_private_t
*)dev
->dev_private
;
1269 u32
*hw_status
= (u32
*)dev_priv
->hw_status_page
;
1270 drm_i810_sarea_t
*sarea_priv
= (drm_i810_sarea_t
*)
1271 dev_priv
->sarea_priv
;
1272 drm_i810_vertex_t vertex
;
1274 if (copy_from_user(&vertex
, (drm_i810_vertex_t
*)arg
, sizeof(vertex
)))
1277 if(!_DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)) {
1278 DRM_ERROR("i810_dma_vertex called without lock held\n");
1282 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
1283 vertex
.idx
, vertex
.used
, vertex
.discard
);
1285 if(vertex
.idx
< 0 || vertex
.idx
> dma
->buf_count
) return -EINVAL
;
1287 i810_dma_dispatch_vertex( dev
,
1288 dma
->buflist
[ vertex
.idx
],
1289 vertex
.discard
, vertex
.used
);
1291 atomic_add(vertex
.used
, &dma
->total_bytes
);
1292 atomic_inc(&dma
->total_dmas
);
1293 sarea_priv
->last_enqueue
= dev_priv
->counter
-1;
1294 sarea_priv
->last_dispatch
= (int) hw_status
[5];
1301 int i810_clear_bufs(struct inode
*inode
, struct file
*filp
,
1302 unsigned int cmd
, unsigned long arg
)
1304 drm_file_t
*priv
= filp
->private_data
;
1305 drm_device_t
*dev
= priv
->dev
;
1306 drm_i810_clear_t clear
;
1308 if (copy_from_user(&clear
, (drm_i810_clear_t
*)arg
, sizeof(clear
)))
1311 if(!_DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)) {
1312 DRM_ERROR("i810_clear_bufs called without lock held\n");
1316 i810_dma_dispatch_clear( dev
, clear
.flags
,
1318 clear
.clear_depth
);
1322 int i810_swap_bufs(struct inode
*inode
, struct file
*filp
,
1323 unsigned int cmd
, unsigned long arg
)
1325 drm_file_t
*priv
= filp
->private_data
;
1326 drm_device_t
*dev
= priv
->dev
;
1328 DRM_DEBUG("i810_swap_bufs\n");
1330 if(!_DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)) {
1331 DRM_ERROR("i810_swap_buf called without lock held\n");
1335 i810_dma_dispatch_swap( dev
);
1339 int i810_getage(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
1342 drm_file_t
*priv
= filp
->private_data
;
1343 drm_device_t
*dev
= priv
->dev
;
1344 drm_i810_private_t
*dev_priv
= (drm_i810_private_t
*)dev
->dev_private
;
1345 u32
*hw_status
= (u32
*)dev_priv
->hw_status_page
;
1346 drm_i810_sarea_t
*sarea_priv
= (drm_i810_sarea_t
*)
1347 dev_priv
->sarea_priv
;
1349 sarea_priv
->last_dispatch
= (int) hw_status
[5];
1353 int i810_getbuf(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
1356 drm_file_t
*priv
= filp
->private_data
;
1357 drm_device_t
*dev
= priv
->dev
;
1360 drm_i810_private_t
*dev_priv
= (drm_i810_private_t
*)dev
->dev_private
;
1361 u32
*hw_status
= (u32
*)dev_priv
->hw_status_page
;
1362 drm_i810_sarea_t
*sarea_priv
= (drm_i810_sarea_t
*)
1363 dev_priv
->sarea_priv
;
1365 DRM_DEBUG("getbuf\n");
1366 if (copy_from_user(&d
, (drm_i810_dma_t
*)arg
, sizeof(d
)))
1369 if(!_DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)) {
1370 DRM_ERROR("i810_dma called without lock held\n");
1376 retcode
= i810_dma_get_buffer(dev
, &d
, filp
);
1378 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1379 current
->pid
, retcode
, d
.granted
);
1381 if (copy_to_user((drm_dma_t
*)arg
, &d
, sizeof(d
)))
1383 sarea_priv
->last_dispatch
= (int) hw_status
[5];
1388 int i810_copybuf(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
1391 drm_file_t
*priv
= filp
->private_data
;
1392 drm_device_t
*dev
= priv
->dev
;
1394 drm_i810_private_t
*dev_priv
= (drm_i810_private_t
*)dev
->dev_private
;
1395 u32
*hw_status
= (u32
*)dev_priv
->hw_status_page
;
1396 drm_i810_sarea_t
*sarea_priv
= (drm_i810_sarea_t
*)
1397 dev_priv
->sarea_priv
;
1399 drm_i810_buf_priv_t
*buf_priv
;
1400 drm_device_dma_t
*dma
= dev
->dma
;
1402 if(!_DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)) {
1403 DRM_ERROR("i810_dma called without lock held\n");
1407 if (copy_from_user(&d
, (drm_i810_copy_t
*)arg
, sizeof(d
)))
1410 if(d
.idx
< 0 || d
.idx
> dma
->buf_count
) return -EINVAL
;
1411 buf
= dma
->buflist
[ d
.idx
];
1412 buf_priv
= buf
->dev_private
;
1413 if (buf_priv
->currently_mapped
!= I810_BUF_MAPPED
) return -EPERM
;
1415 /* Stopping end users copying their data to the entire kernel
1417 if (d
.used
< 0 || d
.used
> buf
->total
)
1420 if (copy_from_user(buf_priv
->virtual, d
.address
, d
.used
))
1423 sarea_priv
->last_dispatch
= (int) hw_status
[5];
1428 int i810_docopy(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
1431 if(VM_DONTCOPY
== 0) return 1;