1 /* tdfx.c -- tdfx driver -*- linux-c -*-
2 * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
27 * Rickard E. (Rik) Faith <faith@precisioninsight.com>
28 * Daryll Strauss <daryll@precisioninsight.com>
35 #define TDFX_NAME "tdfx"
36 #define TDFX_DESC "tdfx"
37 #define TDFX_DATE "19991009"
40 #define TDFX_PATCHLEVEL 1
42 static drm_device_t tdfx_device
;
43 drm_ctx_t tdfx_res_ctx
;
45 static struct file_operations tdfx_fops
= {
48 release
: tdfx_release
,
56 static struct miscdevice tdfx_misc
= {
57 minor
: MISC_DYNAMIC_MINOR
,
62 static drm_ioctl_desc_t tdfx_ioctls
[] = {
63 [DRM_IOCTL_NR(DRM_IOCTL_VERSION
)] = { tdfx_version
, 0, 0 },
64 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE
)] = { drm_getunique
, 0, 0 },
65 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC
)] = { drm_getmagic
, 0, 0 },
66 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID
)] = { drm_irq_busid
, 0, 1 },
68 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE
)] = { drm_setunique
, 1, 1 },
69 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK
)] = { drm_block
, 1, 1 },
70 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK
)] = { drm_unblock
, 1, 1 },
71 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC
)] = { drm_authmagic
, 1, 1 },
72 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP
)] = { drm_addmap
, 1, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX
)] = { tdfx_addctx
, 1, 1 },
75 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX
)] = { tdfx_rmctx
, 1, 1 },
76 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX
)] = { tdfx_modctx
, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX
)] = { tdfx_getctx
, 1, 0 },
78 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX
)] = { tdfx_switchctx
, 1, 1 },
79 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX
)] = { tdfx_newctx
, 1, 1 },
80 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX
)] = { tdfx_resctx
, 1, 0 },
81 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW
)] = { drm_adddraw
, 1, 1 },
82 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW
)] = { drm_rmdraw
, 1, 1 },
83 [DRM_IOCTL_NR(DRM_IOCTL_LOCK
)] = { tdfx_lock
, 1, 0 },
84 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK
)] = { tdfx_unlock
, 1, 0 },
85 [DRM_IOCTL_NR(DRM_IOCTL_FINISH
)] = { drm_finish
, 1, 0 },
87 #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
90 static char *tdfx
= NULL
;
93 MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
94 MODULE_DESCRIPTION("tdfx");
95 MODULE_PARM(tdfx
, "s");
97 static int tdfx_setup(drm_device_t
*dev
)
101 atomic_set(&dev
->ioctl_count
, 0);
102 atomic_set(&dev
->vma_count
, 0);
104 atomic_set(&dev
->buf_alloc
, 0);
106 atomic_set(&dev
->total_open
, 0);
107 atomic_set(&dev
->total_close
, 0);
108 atomic_set(&dev
->total_ioctl
, 0);
109 atomic_set(&dev
->total_irq
, 0);
110 atomic_set(&dev
->total_ctx
, 0);
111 atomic_set(&dev
->total_locks
, 0);
112 atomic_set(&dev
->total_unlocks
, 0);
113 atomic_set(&dev
->total_contends
, 0);
114 atomic_set(&dev
->total_sleeps
, 0);
116 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
117 dev
->magiclist
[i
].head
= NULL
;
118 dev
->magiclist
[i
].tail
= NULL
;
123 dev
->lock
.hw_lock
= NULL
;
124 init_waitqueue_head(&dev
->lock
.lock_queue
);
125 dev
->queue_count
= 0;
126 dev
->queue_reserved
= 0;
127 dev
->queue_slots
= 0;
128 dev
->queuelist
= NULL
;
130 dev
->context_flag
= 0;
131 dev
->interrupt_flag
= 0;
134 dev
->last_context
= 0;
135 dev
->last_switch
= 0;
136 dev
->last_checked
= 0;
137 init_timer(&dev
->timer
);
138 init_waitqueue_head(&dev
->context_wait
);
143 dev
->buf_rp
= dev
->buf
;
144 dev
->buf_wp
= dev
->buf
;
145 dev
->buf_end
= dev
->buf
+ DRM_BSZ
;
146 dev
->buf_async
= NULL
;
147 init_waitqueue_head(&dev
->buf_readers
);
148 init_waitqueue_head(&dev
->buf_writers
);
150 tdfx_res_ctx
.handle
=-1;
154 /* The kernel's context could be created here, but is now created
155 in drm_dma_enqueue. This is more resource-efficient for
156 hardware that does not do DMA, but may mean that
157 drm_select_queue fails between the time the interrupt is
158 initialized and the time the queues are initialized. */
164 static int tdfx_takedown(drm_device_t
*dev
)
167 drm_magic_entry_t
*pt
, *next
;
169 drm_vma_entry_t
*vma
, *vma_next
;
173 down(&dev
->struct_sem
);
174 del_timer(&dev
->timer
);
177 drm_free(dev
->devname
, strlen(dev
->devname
)+1, DRM_MEM_DRIVER
);
182 drm_free(dev
->unique
, strlen(dev
->unique
)+1, DRM_MEM_DRIVER
);
187 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
188 for (pt
= dev
->magiclist
[i
].head
; pt
; pt
= next
) {
190 drm_free(pt
, sizeof(*pt
), DRM_MEM_MAGIC
);
192 dev
->magiclist
[i
].head
= dev
->magiclist
[i
].tail
= NULL
;
195 /* Clear vma list (only built for debugging) */
197 for (vma
= dev
->vmalist
; vma
; vma
= vma_next
) {
198 vma_next
= vma
->next
;
199 drm_free(vma
, sizeof(*vma
), DRM_MEM_VMAS
);
204 /* Clear map area and mtrr information */
206 for (i
= 0; i
< dev
->map_count
; i
++) {
207 map
= dev
->maplist
[i
];
210 case _DRM_FRAME_BUFFER
:
212 if (map
->mtrr
>= 0) {
214 retcode
= mtrr_del(map
->mtrr
,
217 DRM_DEBUG("mtrr_del = %d\n", retcode
);
220 drm_ioremapfree(map
->handle
, map
->size
);
223 drm_free_pages((unsigned long)map
->handle
,
229 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
231 drm_free(dev
->maplist
,
232 dev
->map_count
* sizeof(*dev
->maplist
),
238 if (dev
->lock
.hw_lock
) {
239 dev
->lock
.hw_lock
= NULL
; /* SHM removed */
241 wake_up_interruptible(&dev
->lock
.lock_queue
);
243 up(&dev
->struct_sem
);
248 /* tdfx_init is called via init_module at module load time, or via
249 * linux/init/main.c (this is not currently supported). */
254 drm_device_t
*dev
= &tdfx_device
;
258 memset((void *)dev
, 0, sizeof(*dev
));
259 dev
->count_lock
= SPIN_LOCK_UNLOCKED
;
260 sema_init(&dev
->struct_sem
, 1);
263 drm_parse_options(tdfx
);
266 if ((retcode
= misc_register(&tdfx_misc
))) {
267 DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME
);
270 dev
->device
= MKDEV(MISC_MAJOR
, tdfx_misc
.minor
);
271 dev
->name
= TDFX_NAME
;
276 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
287 /* tdfx_cleanup is called via cleanup_module at module unload time. */
289 void tdfx_cleanup(void)
291 drm_device_t
*dev
= &tdfx_device
;
296 if (misc_deregister(&tdfx_misc
)) {
297 DRM_ERROR("Cannot unload module\n");
299 DRM_INFO("Module unloaded\n");
304 int tdfx_version(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
307 drm_version_t version
;
310 copy_from_user_ret(&version
,
311 (drm_version_t
*)arg
,
315 #define DRM_COPY(name,value) \
316 len = strlen(value); \
317 if (len > name##_len) len = name##_len; \
318 name##_len = strlen(value); \
320 copy_to_user_ret(name, value, len, -EFAULT); \
323 version
.version_major
= TDFX_MAJOR
;
324 version
.version_minor
= TDFX_MINOR
;
325 version
.version_patchlevel
= TDFX_PATCHLEVEL
;
327 DRM_COPY(version
.name
, TDFX_NAME
);
328 DRM_COPY(version
.date
, TDFX_DATE
);
329 DRM_COPY(version
.desc
, TDFX_DESC
);
331 copy_to_user_ret((drm_version_t
*)arg
,
338 int tdfx_open(struct inode
*inode
, struct file
*filp
)
340 drm_device_t
*dev
= &tdfx_device
;
343 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
344 if (!(retcode
= drm_open_helper(inode
, filp
, dev
))) {
346 atomic_inc(&dev
->total_open
);
347 spin_lock(&dev
->count_lock
);
348 if (!dev
->open_count
++) {
349 spin_unlock(&dev
->count_lock
);
350 return tdfx_setup(dev
);
352 spin_unlock(&dev
->count_lock
);
357 int tdfx_release(struct inode
*inode
, struct file
*filp
)
359 drm_file_t
*priv
= filp
->private_data
;
360 drm_device_t
*dev
= priv
->dev
;
363 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
364 if (!(retcode
= drm_release(inode
, filp
))) {
366 atomic_inc(&dev
->total_close
);
367 spin_lock(&dev
->count_lock
);
368 if (!--dev
->open_count
) {
369 if (atomic_read(&dev
->ioctl_count
) || dev
->blocked
) {
370 DRM_ERROR("Device busy: %d %d\n",
371 atomic_read(&dev
->ioctl_count
),
373 spin_unlock(&dev
->count_lock
);
376 spin_unlock(&dev
->count_lock
);
377 return tdfx_takedown(dev
);
379 spin_unlock(&dev
->count_lock
);
384 /* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
386 int tdfx_ioctl(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
389 int nr
= DRM_IOCTL_NR(cmd
);
390 drm_file_t
*priv
= filp
->private_data
;
391 drm_device_t
*dev
= priv
->dev
;
393 drm_ioctl_desc_t
*ioctl
;
396 atomic_inc(&dev
->ioctl_count
);
397 atomic_inc(&dev
->total_ioctl
);
400 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
401 current
->pid
, cmd
, nr
, dev
->device
, priv
->authenticated
);
403 if (nr
>= TDFX_IOCTL_COUNT
) {
406 ioctl
= &tdfx_ioctls
[nr
];
410 DRM_DEBUG("no function\n");
412 } else if ((ioctl
->root_only
&& !capable(CAP_SYS_ADMIN
))
413 || (ioctl
->auth_needed
&& !priv
->authenticated
)) {
416 retcode
= (func
)(inode
, filp
, cmd
, arg
);
420 atomic_dec(&dev
->ioctl_count
);
424 int tdfx_lock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
427 drm_file_t
*priv
= filp
->private_data
;
428 drm_device_t
*dev
= priv
->dev
;
429 DECLARE_WAITQUEUE(entry
, current
);
432 #if DRM_DMA_HISTOGRAM
435 dev
->lck_start
= start
= get_cycles();
438 copy_from_user_ret(&lock
, (drm_lock_t
*)arg
, sizeof(lock
), -EFAULT
);
440 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
441 DRM_ERROR("Process %d using kernel context %d\n",
442 current
->pid
, lock
.context
);
446 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
447 lock
.context
, current
->pid
, dev
->lock
.hw_lock
->lock
,
451 /* dev->queue_count == 0 right now for
453 if (lock
.context
< 0 || lock
.context
>= dev
->queue_count
)
459 if (_DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
)
461 long j
= jiffies
- dev
->lock
.lock_time
;
463 if (lock
.context
== tdfx_res_ctx
.handle
&&
464 j
>= 0 && j
< DRM_LOCK_SLICE
) {
465 /* Can't take lock if we just had it and
466 there is contention. */
467 DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
468 lock
.context
, current
->pid
, j
,
469 dev
->lock
.lock_time
, jiffies
);
470 current
->state
= TASK_INTERRUPTIBLE
;
471 current
->policy
|= SCHED_YIELD
;
472 schedule_timeout(DRM_LOCK_SLICE
-j
);
473 DRM_DEBUG("jiffies=%d\n", jiffies
);
477 add_wait_queue(&dev
->lock
.lock_queue
, &entry
);
479 if (!dev
->lock
.hw_lock
) {
480 /* Device has been unregistered */
484 if (drm_lock_take(&dev
->lock
.hw_lock
->lock
,
486 dev
->lock
.pid
= current
->pid
;
487 dev
->lock
.lock_time
= jiffies
;
488 atomic_inc(&dev
->total_locks
);
489 break; /* Got lock */
493 atomic_inc(&dev
->total_sleeps
);
494 current
->state
= TASK_INTERRUPTIBLE
;
495 current
->policy
|= SCHED_YIELD
;
497 if (signal_pending(current
)) {
502 current
->state
= TASK_RUNNING
;
503 remove_wait_queue(&dev
->lock
.lock_queue
, &entry
);
507 if (!ret
&& dev
->last_context
!= lock
.context
&&
508 lock
.context
!= tdfx_res_ctx
.handle
&&
509 dev
->last_context
!= tdfx_res_ctx
.handle
) {
510 add_wait_queue(&dev
->context_wait
, &entry
);
511 current
->state
= TASK_INTERRUPTIBLE
;
512 /* PRE: dev->last_context != lock.context */
513 tdfx_context_switch(dev
, dev
->last_context
, lock
.context
);
514 /* POST: we will wait for the context
515 switch and will dispatch on a later call
516 when dev->last_context == lock.context
517 NOTE WE HOLD THE LOCK THROUGHOUT THIS
519 current
->policy
|= SCHED_YIELD
;
521 current
->state
= TASK_RUNNING
;
522 remove_wait_queue(&dev
->context_wait
, &entry
);
523 if (signal_pending(current
)) {
525 } else if (dev
->last_context
!= lock
.context
) {
526 DRM_ERROR("Context mismatch: %d %d\n",
527 dev
->last_context
, lock
.context
);
533 if (lock
.flags
& _DRM_LOCK_READY
) {
534 /* Wait for space in DMA/FIFO */
536 if (lock
.flags
& _DRM_LOCK_QUIESCENT
) {
537 /* Make hardware quiescent */
544 if (lock
.context
!= tdfx_res_ctx
.handle
) {
545 current
->counter
= 5;
546 current
->priority
= DEF_PRIORITY
/4;
549 DRM_DEBUG("%d %s\n", lock
.context
, ret
? "interrupted" : "has lock");
551 #if DRM_DMA_HISTOGRAM
552 atomic_inc(&dev
->histo
.lacq
[drm_histogram_slot(get_cycles() - start
)]);
559 int tdfx_unlock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
562 drm_file_t
*priv
= filp
->private_data
;
563 drm_device_t
*dev
= priv
->dev
;
566 copy_from_user_ret(&lock
, (drm_lock_t
*)arg
, sizeof(lock
), -EFAULT
);
568 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
569 DRM_ERROR("Process %d using kernel context %d\n",
570 current
->pid
, lock
.context
);
574 DRM_DEBUG("%d frees lock (%d holds)\n",
576 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
577 atomic_inc(&dev
->total_unlocks
);
578 if (_DRM_LOCK_IS_CONT(dev
->lock
.hw_lock
->lock
))
579 atomic_inc(&dev
->total_contends
);
580 drm_lock_transfer(dev
, &dev
->lock
.hw_lock
->lock
, DRM_KERNEL_CONTEXT
);
581 /* FIXME: Try to send data to card here */
582 if (!dev
->context_flag
) {
583 if (drm_lock_free(dev
, &dev
->lock
.hw_lock
->lock
,
584 DRM_KERNEL_CONTEXT
)) {
589 if (lock
.context
!= tdfx_res_ctx
.handle
) {
590 current
->counter
= 5;
591 current
->priority
= DEF_PRIORITY
;
597 module_init(tdfx_init
);
598 module_exit(tdfx_cleanup
);
602 * tdfx_setup is called by the kernel to parse command-line options passed
603 * via the boot-loader (e.g., LILO). It calls the insmod option routine,
606 static int __init
tdfx_options(char *str
)
608 drm_parse_options(str
);
612 __setup("tdfx=", tdfx_options
);