1 /* tdfx.c -- tdfx driver -*- linux-c -*-
2 * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
27 * Rickard E. (Rik) Faith <faith@precisioninsight.com>
28 * Daryll Strauss <daryll@precisioninsight.com>
32 #include <linux/config.h>
36 #define TDFX_NAME "tdfx"
37 #define TDFX_DESC "tdfx"
38 #define TDFX_DATE "19991009"
41 #define TDFX_PATCHLEVEL 1
43 static drm_device_t tdfx_device
;
44 drm_ctx_t tdfx_res_ctx
;
46 static struct file_operations tdfx_fops
= {
49 release
: tdfx_release
,
57 static struct miscdevice tdfx_misc
= {
58 minor
: MISC_DYNAMIC_MINOR
,
63 static drm_ioctl_desc_t tdfx_ioctls
[] = {
64 [DRM_IOCTL_NR(DRM_IOCTL_VERSION
)] = { tdfx_version
, 0, 0 },
65 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE
)] = { drm_getunique
, 0, 0 },
66 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC
)] = { drm_getmagic
, 0, 0 },
67 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID
)] = { drm_irq_busid
, 0, 1 },
69 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE
)] = { drm_setunique
, 1, 1 },
70 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK
)] = { drm_block
, 1, 1 },
71 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK
)] = { drm_unblock
, 1, 1 },
72 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC
)] = { drm_authmagic
, 1, 1 },
73 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP
)] = { drm_addmap
, 1, 1 },
75 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX
)] = { tdfx_addctx
, 1, 1 },
76 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX
)] = { tdfx_rmctx
, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX
)] = { tdfx_modctx
, 1, 1 },
78 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX
)] = { tdfx_getctx
, 1, 0 },
79 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX
)] = { tdfx_switchctx
, 1, 1 },
80 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX
)] = { tdfx_newctx
, 1, 1 },
81 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX
)] = { tdfx_resctx
, 1, 0 },
82 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW
)] = { drm_adddraw
, 1, 1 },
83 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW
)] = { drm_rmdraw
, 1, 1 },
84 [DRM_IOCTL_NR(DRM_IOCTL_LOCK
)] = { tdfx_lock
, 1, 0 },
85 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK
)] = { tdfx_unlock
, 1, 0 },
86 [DRM_IOCTL_NR(DRM_IOCTL_FINISH
)] = { drm_finish
, 1, 0 },
88 #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
91 static char *tdfx
= NULL
;
94 MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
95 MODULE_DESCRIPTION("tdfx");
96 MODULE_PARM(tdfx
, "s");
98 static int tdfx_setup(drm_device_t
*dev
)
102 atomic_set(&dev
->ioctl_count
, 0);
103 atomic_set(&dev
->vma_count
, 0);
105 atomic_set(&dev
->buf_alloc
, 0);
107 atomic_set(&dev
->total_open
, 0);
108 atomic_set(&dev
->total_close
, 0);
109 atomic_set(&dev
->total_ioctl
, 0);
110 atomic_set(&dev
->total_irq
, 0);
111 atomic_set(&dev
->total_ctx
, 0);
112 atomic_set(&dev
->total_locks
, 0);
113 atomic_set(&dev
->total_unlocks
, 0);
114 atomic_set(&dev
->total_contends
, 0);
115 atomic_set(&dev
->total_sleeps
, 0);
117 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
118 dev
->magiclist
[i
].head
= NULL
;
119 dev
->magiclist
[i
].tail
= NULL
;
124 dev
->lock
.hw_lock
= NULL
;
125 init_waitqueue_head(&dev
->lock
.lock_queue
);
126 dev
->queue_count
= 0;
127 dev
->queue_reserved
= 0;
128 dev
->queue_slots
= 0;
129 dev
->queuelist
= NULL
;
131 dev
->context_flag
= 0;
132 dev
->interrupt_flag
= 0;
135 dev
->last_context
= 0;
136 dev
->last_switch
= 0;
137 dev
->last_checked
= 0;
138 init_timer(&dev
->timer
);
139 init_waitqueue_head(&dev
->context_wait
);
144 dev
->buf_rp
= dev
->buf
;
145 dev
->buf_wp
= dev
->buf
;
146 dev
->buf_end
= dev
->buf
+ DRM_BSZ
;
147 dev
->buf_async
= NULL
;
148 init_waitqueue_head(&dev
->buf_readers
);
149 init_waitqueue_head(&dev
->buf_writers
);
151 tdfx_res_ctx
.handle
=-1;
155 /* The kernel's context could be created here, but is now created
156 in drm_dma_enqueue. This is more resource-efficient for
157 hardware that does not do DMA, but may mean that
158 drm_select_queue fails between the time the interrupt is
159 initialized and the time the queues are initialized. */
165 static int tdfx_takedown(drm_device_t
*dev
)
168 drm_magic_entry_t
*pt
, *next
;
170 drm_vma_entry_t
*vma
, *vma_next
;
174 down(&dev
->struct_sem
);
175 del_timer(&dev
->timer
);
178 drm_free(dev
->devname
, strlen(dev
->devname
)+1, DRM_MEM_DRIVER
);
183 drm_free(dev
->unique
, strlen(dev
->unique
)+1, DRM_MEM_DRIVER
);
188 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
189 for (pt
= dev
->magiclist
[i
].head
; pt
; pt
= next
) {
191 drm_free(pt
, sizeof(*pt
), DRM_MEM_MAGIC
);
193 dev
->magiclist
[i
].head
= dev
->magiclist
[i
].tail
= NULL
;
196 /* Clear vma list (only built for debugging) */
198 for (vma
= dev
->vmalist
; vma
; vma
= vma_next
) {
199 vma_next
= vma
->next
;
200 drm_free(vma
, sizeof(*vma
), DRM_MEM_VMAS
);
205 /* Clear map area and mtrr information */
207 for (i
= 0; i
< dev
->map_count
; i
++) {
208 map
= dev
->maplist
[i
];
211 case _DRM_FRAME_BUFFER
:
213 if (map
->mtrr
>= 0) {
215 retcode
= mtrr_del(map
->mtrr
,
218 DRM_DEBUG("mtrr_del = %d\n", retcode
);
221 drm_ioremapfree(map
->handle
, map
->size
);
224 drm_free_pages((unsigned long)map
->handle
,
230 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
232 drm_free(dev
->maplist
,
233 dev
->map_count
* sizeof(*dev
->maplist
),
239 if (dev
->lock
.hw_lock
) {
240 dev
->lock
.hw_lock
= NULL
; /* SHM removed */
242 wake_up_interruptible(&dev
->lock
.lock_queue
);
244 up(&dev
->struct_sem
);
249 /* tdfx_init is called via init_module at module load time, or via
250 * linux/init/main.c (this is not currently supported). */
255 drm_device_t
*dev
= &tdfx_device
;
259 memset((void *)dev
, 0, sizeof(*dev
));
260 dev
->count_lock
= SPIN_LOCK_UNLOCKED
;
261 sema_init(&dev
->struct_sem
, 1);
264 drm_parse_options(tdfx
);
267 if ((retcode
= misc_register(&tdfx_misc
))) {
268 DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME
);
271 dev
->device
= MKDEV(MISC_MAJOR
, tdfx_misc
.minor
);
272 dev
->name
= TDFX_NAME
;
277 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
288 /* tdfx_cleanup is called via cleanup_module at module unload time. */
290 void tdfx_cleanup(void)
292 drm_device_t
*dev
= &tdfx_device
;
297 if (misc_deregister(&tdfx_misc
)) {
298 DRM_ERROR("Cannot unload module\n");
300 DRM_INFO("Module unloaded\n");
305 int tdfx_version(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
308 drm_version_t version
;
311 copy_from_user_ret(&version
,
312 (drm_version_t
*)arg
,
316 #define DRM_COPY(name,value) \
317 len = strlen(value); \
318 if (len > name##_len) len = name##_len; \
319 name##_len = strlen(value); \
321 copy_to_user_ret(name, value, len, -EFAULT); \
324 version
.version_major
= TDFX_MAJOR
;
325 version
.version_minor
= TDFX_MINOR
;
326 version
.version_patchlevel
= TDFX_PATCHLEVEL
;
328 DRM_COPY(version
.name
, TDFX_NAME
);
329 DRM_COPY(version
.date
, TDFX_DATE
);
330 DRM_COPY(version
.desc
, TDFX_DESC
);
332 copy_to_user_ret((drm_version_t
*)arg
,
339 int tdfx_open(struct inode
*inode
, struct file
*filp
)
341 drm_device_t
*dev
= &tdfx_device
;
344 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
345 if (!(retcode
= drm_open_helper(inode
, filp
, dev
))) {
347 atomic_inc(&dev
->total_open
);
348 spin_lock(&dev
->count_lock
);
349 if (!dev
->open_count
++) {
350 spin_unlock(&dev
->count_lock
);
351 return tdfx_setup(dev
);
353 spin_unlock(&dev
->count_lock
);
358 int tdfx_release(struct inode
*inode
, struct file
*filp
)
360 drm_file_t
*priv
= filp
->private_data
;
361 drm_device_t
*dev
= priv
->dev
;
364 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
365 if (!(retcode
= drm_release(inode
, filp
))) {
367 atomic_inc(&dev
->total_close
);
368 spin_lock(&dev
->count_lock
);
369 if (!--dev
->open_count
) {
370 if (atomic_read(&dev
->ioctl_count
) || dev
->blocked
) {
371 DRM_ERROR("Device busy: %d %d\n",
372 atomic_read(&dev
->ioctl_count
),
374 spin_unlock(&dev
->count_lock
);
377 spin_unlock(&dev
->count_lock
);
378 return tdfx_takedown(dev
);
380 spin_unlock(&dev
->count_lock
);
385 /* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
387 int tdfx_ioctl(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
390 int nr
= DRM_IOCTL_NR(cmd
);
391 drm_file_t
*priv
= filp
->private_data
;
392 drm_device_t
*dev
= priv
->dev
;
394 drm_ioctl_desc_t
*ioctl
;
397 atomic_inc(&dev
->ioctl_count
);
398 atomic_inc(&dev
->total_ioctl
);
401 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
402 current
->pid
, cmd
, nr
, dev
->device
, priv
->authenticated
);
404 if (nr
>= TDFX_IOCTL_COUNT
) {
407 ioctl
= &tdfx_ioctls
[nr
];
411 DRM_DEBUG("no function\n");
413 } else if ((ioctl
->root_only
&& !capable(CAP_SYS_ADMIN
))
414 || (ioctl
->auth_needed
&& !priv
->authenticated
)) {
417 retcode
= (func
)(inode
, filp
, cmd
, arg
);
421 atomic_dec(&dev
->ioctl_count
);
425 int tdfx_lock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
428 drm_file_t
*priv
= filp
->private_data
;
429 drm_device_t
*dev
= priv
->dev
;
430 DECLARE_WAITQUEUE(entry
, current
);
433 #if DRM_DMA_HISTOGRAM
436 dev
->lck_start
= start
= get_cycles();
439 copy_from_user_ret(&lock
, (drm_lock_t
*)arg
, sizeof(lock
), -EFAULT
);
441 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
442 DRM_ERROR("Process %d using kernel context %d\n",
443 current
->pid
, lock
.context
);
447 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
448 lock
.context
, current
->pid
, dev
->lock
.hw_lock
->lock
,
452 /* dev->queue_count == 0 right now for
454 if (lock
.context
< 0 || lock
.context
>= dev
->queue_count
)
460 if (_DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
)
462 long j
= jiffies
- dev
->lock
.lock_time
;
464 if (lock
.context
== tdfx_res_ctx
.handle
&&
465 j
>= 0 && j
< DRM_LOCK_SLICE
) {
466 /* Can't take lock if we just had it and
467 there is contention. */
468 DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
469 lock
.context
, current
->pid
, j
,
470 dev
->lock
.lock_time
, jiffies
);
471 current
->state
= TASK_INTERRUPTIBLE
;
472 current
->policy
|= SCHED_YIELD
;
473 schedule_timeout(DRM_LOCK_SLICE
-j
);
474 DRM_DEBUG("jiffies=%d\n", jiffies
);
478 add_wait_queue(&dev
->lock
.lock_queue
, &entry
);
480 if (!dev
->lock
.hw_lock
) {
481 /* Device has been unregistered */
485 if (drm_lock_take(&dev
->lock
.hw_lock
->lock
,
487 dev
->lock
.pid
= current
->pid
;
488 dev
->lock
.lock_time
= jiffies
;
489 atomic_inc(&dev
->total_locks
);
490 break; /* Got lock */
494 atomic_inc(&dev
->total_sleeps
);
495 current
->state
= TASK_INTERRUPTIBLE
;
496 current
->policy
|= SCHED_YIELD
;
498 if (signal_pending(current
)) {
503 current
->state
= TASK_RUNNING
;
504 remove_wait_queue(&dev
->lock
.lock_queue
, &entry
);
508 if (!ret
&& dev
->last_context
!= lock
.context
&&
509 lock
.context
!= tdfx_res_ctx
.handle
&&
510 dev
->last_context
!= tdfx_res_ctx
.handle
) {
511 add_wait_queue(&dev
->context_wait
, &entry
);
512 current
->state
= TASK_INTERRUPTIBLE
;
513 /* PRE: dev->last_context != lock.context */
514 tdfx_context_switch(dev
, dev
->last_context
, lock
.context
);
515 /* POST: we will wait for the context
516 switch and will dispatch on a later call
517 when dev->last_context == lock.context
518 NOTE WE HOLD THE LOCK THROUGHOUT THIS
520 current
->policy
|= SCHED_YIELD
;
522 current
->state
= TASK_RUNNING
;
523 remove_wait_queue(&dev
->context_wait
, &entry
);
524 if (signal_pending(current
)) {
526 } else if (dev
->last_context
!= lock
.context
) {
527 DRM_ERROR("Context mismatch: %d %d\n",
528 dev
->last_context
, lock
.context
);
534 if (lock
.flags
& _DRM_LOCK_READY
) {
535 /* Wait for space in DMA/FIFO */
537 if (lock
.flags
& _DRM_LOCK_QUIESCENT
) {
538 /* Make hardware quiescent */
545 if (lock
.context
!= tdfx_res_ctx
.handle
) {
546 current
->counter
= 5;
547 current
->priority
= DEF_PRIORITY
/4;
550 DRM_DEBUG("%d %s\n", lock
.context
, ret
? "interrupted" : "has lock");
552 #if DRM_DMA_HISTOGRAM
553 atomic_inc(&dev
->histo
.lacq
[drm_histogram_slot(get_cycles() - start
)]);
560 int tdfx_unlock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
563 drm_file_t
*priv
= filp
->private_data
;
564 drm_device_t
*dev
= priv
->dev
;
567 copy_from_user_ret(&lock
, (drm_lock_t
*)arg
, sizeof(lock
), -EFAULT
);
569 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
570 DRM_ERROR("Process %d using kernel context %d\n",
571 current
->pid
, lock
.context
);
575 DRM_DEBUG("%d frees lock (%d holds)\n",
577 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
578 atomic_inc(&dev
->total_unlocks
);
579 if (_DRM_LOCK_IS_CONT(dev
->lock
.hw_lock
->lock
))
580 atomic_inc(&dev
->total_contends
);
581 drm_lock_transfer(dev
, &dev
->lock
.hw_lock
->lock
, DRM_KERNEL_CONTEXT
);
582 /* FIXME: Try to send data to card here */
583 if (!dev
->context_flag
) {
584 if (drm_lock_free(dev
, &dev
->lock
.hw_lock
->lock
,
585 DRM_KERNEL_CONTEXT
)) {
590 if (lock
.context
!= tdfx_res_ctx
.handle
) {
591 current
->counter
= 5;
592 current
->priority
= DEF_PRIORITY
;
598 module_init(tdfx_init
);
599 module_exit(tdfx_cleanup
);
603 * tdfx_setup is called by the kernel to parse command-line options passed
604 * via the boot-loader (e.g., LILO). It calls the insmod option routine,
607 static int __init
tdfx_options(char *str
)
609 drm_parse_options(str
);
613 __setup("tdfx=", tdfx_options
);