1 /* tdfx.c -- tdfx driver -*- linux-c -*-
2 * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
3 * Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
32 #include <linux/config.h>
35 EXPORT_SYMBOL(tdfx_init
);
36 EXPORT_SYMBOL(tdfx_cleanup
);
38 #define TDFX_NAME "tdfx"
39 #define TDFX_DESC "tdfx"
40 #define TDFX_DATE "19991009"
43 #define TDFX_PATCHLEVEL 1
45 static drm_device_t tdfx_device
;
46 drm_ctx_t tdfx_res_ctx
;
48 static struct file_operations tdfx_fops
= {
51 release
: tdfx_release
,
58 static struct miscdevice tdfx_misc
= {
59 minor
: MISC_DYNAMIC_MINOR
,
64 static drm_ioctl_desc_t tdfx_ioctls
[] = {
65 [DRM_IOCTL_NR(DRM_IOCTL_VERSION
)] = { tdfx_version
, 0, 0 },
66 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE
)] = { drm_getunique
, 0, 0 },
67 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC
)] = { drm_getmagic
, 0, 0 },
68 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID
)] = { drm_irq_busid
, 0, 1 },
70 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE
)] = { drm_setunique
, 1, 1 },
71 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK
)] = { drm_block
, 1, 1 },
72 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK
)] = { drm_unblock
, 1, 1 },
73 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC
)] = { drm_authmagic
, 1, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP
)] = { drm_addmap
, 1, 1 },
76 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX
)] = { tdfx_addctx
, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX
)] = { tdfx_rmctx
, 1, 1 },
78 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX
)] = { tdfx_modctx
, 1, 1 },
79 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX
)] = { tdfx_getctx
, 1, 0 },
80 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX
)] = { tdfx_switchctx
, 1, 1 },
81 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX
)] = { tdfx_newctx
, 1, 1 },
82 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX
)] = { tdfx_resctx
, 1, 0 },
83 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW
)] = { drm_adddraw
, 1, 1 },
84 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW
)] = { drm_rmdraw
, 1, 1 },
85 [DRM_IOCTL_NR(DRM_IOCTL_LOCK
)] = { tdfx_lock
, 1, 0 },
86 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK
)] = { tdfx_unlock
, 1, 0 },
87 [DRM_IOCTL_NR(DRM_IOCTL_FINISH
)] = { drm_finish
, 1, 0 },
89 #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
92 static char *tdfx
= NULL
;
95 MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
96 MODULE_DESCRIPTION("tdfx");
97 MODULE_PARM(tdfx
, "s");
99 static int tdfx_setup(drm_device_t
*dev
)
103 atomic_set(&dev
->ioctl_count
, 0);
104 atomic_set(&dev
->vma_count
, 0);
106 atomic_set(&dev
->buf_alloc
, 0);
108 atomic_set(&dev
->total_open
, 0);
109 atomic_set(&dev
->total_close
, 0);
110 atomic_set(&dev
->total_ioctl
, 0);
111 atomic_set(&dev
->total_irq
, 0);
112 atomic_set(&dev
->total_ctx
, 0);
113 atomic_set(&dev
->total_locks
, 0);
114 atomic_set(&dev
->total_unlocks
, 0);
115 atomic_set(&dev
->total_contends
, 0);
116 atomic_set(&dev
->total_sleeps
, 0);
118 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
119 dev
->magiclist
[i
].head
= NULL
;
120 dev
->magiclist
[i
].tail
= NULL
;
125 dev
->lock
.hw_lock
= NULL
;
126 init_waitqueue_head(&dev
->lock
.lock_queue
);
127 dev
->queue_count
= 0;
128 dev
->queue_reserved
= 0;
129 dev
->queue_slots
= 0;
130 dev
->queuelist
= NULL
;
132 dev
->context_flag
= 0;
133 dev
->interrupt_flag
= 0;
136 dev
->last_context
= 0;
137 dev
->last_switch
= 0;
138 dev
->last_checked
= 0;
139 init_timer(&dev
->timer
);
140 init_waitqueue_head(&dev
->context_wait
);
145 dev
->buf_rp
= dev
->buf
;
146 dev
->buf_wp
= dev
->buf
;
147 dev
->buf_end
= dev
->buf
+ DRM_BSZ
;
148 dev
->buf_async
= NULL
;
149 init_waitqueue_head(&dev
->buf_readers
);
150 init_waitqueue_head(&dev
->buf_writers
);
152 tdfx_res_ctx
.handle
=-1;
156 /* The kernel's context could be created here, but is now created
157 in drm_dma_enqueue. This is more resource-efficient for
158 hardware that does not do DMA, but may mean that
159 drm_select_queue fails between the time the interrupt is
160 initialized and the time the queues are initialized. */
166 static int tdfx_takedown(drm_device_t
*dev
)
169 drm_magic_entry_t
*pt
, *next
;
171 drm_vma_entry_t
*vma
, *vma_next
;
175 down(&dev
->struct_sem
);
176 del_timer(&dev
->timer
);
179 drm_free(dev
->devname
, strlen(dev
->devname
)+1, DRM_MEM_DRIVER
);
184 drm_free(dev
->unique
, strlen(dev
->unique
)+1, DRM_MEM_DRIVER
);
189 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
190 for (pt
= dev
->magiclist
[i
].head
; pt
; pt
= next
) {
192 drm_free(pt
, sizeof(*pt
), DRM_MEM_MAGIC
);
194 dev
->magiclist
[i
].head
= dev
->magiclist
[i
].tail
= NULL
;
197 /* Clear vma list (only built for debugging) */
199 for (vma
= dev
->vmalist
; vma
; vma
= vma_next
) {
200 vma_next
= vma
->next
;
201 drm_free(vma
, sizeof(*vma
), DRM_MEM_VMAS
);
206 /* Clear map area and mtrr information */
208 for (i
= 0; i
< dev
->map_count
; i
++) {
209 map
= dev
->maplist
[i
];
212 case _DRM_FRAME_BUFFER
:
214 if (map
->mtrr
>= 0) {
216 retcode
= mtrr_del(map
->mtrr
,
219 DRM_DEBUG("mtrr_del = %d\n", retcode
);
222 drm_ioremapfree(map
->handle
, map
->size
);
225 drm_free_pages((unsigned long)map
->handle
,
231 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
233 drm_free(dev
->maplist
,
234 dev
->map_count
* sizeof(*dev
->maplist
),
240 if (dev
->lock
.hw_lock
) {
241 dev
->lock
.hw_lock
= NULL
; /* SHM removed */
243 wake_up_interruptible(&dev
->lock
.lock_queue
);
245 up(&dev
->struct_sem
);
250 /* tdfx_init is called via init_module at module load time, or via
251 * linux/init/main.c (this is not currently supported). */
256 drm_device_t
*dev
= &tdfx_device
;
260 memset((void *)dev
, 0, sizeof(*dev
));
261 dev
->count_lock
= SPIN_LOCK_UNLOCKED
;
262 sema_init(&dev
->struct_sem
, 1);
265 drm_parse_options(tdfx
);
268 if ((retcode
= misc_register(&tdfx_misc
))) {
269 DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME
);
272 dev
->device
= MKDEV(MISC_MAJOR
, tdfx_misc
.minor
);
273 dev
->name
= TDFX_NAME
;
278 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
289 /* tdfx_cleanup is called via cleanup_module at module unload time. */
291 void tdfx_cleanup(void)
293 drm_device_t
*dev
= &tdfx_device
;
298 if (misc_deregister(&tdfx_misc
)) {
299 DRM_ERROR("Cannot unload module\n");
301 DRM_INFO("Module unloaded\n");
306 int tdfx_version(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
309 drm_version_t version
;
312 copy_from_user_ret(&version
,
313 (drm_version_t
*)arg
,
317 #define DRM_COPY(name,value) \
318 len = strlen(value); \
319 if (len > name##_len) len = name##_len; \
320 name##_len = strlen(value); \
322 copy_to_user_ret(name, value, len, -EFAULT); \
325 version
.version_major
= TDFX_MAJOR
;
326 version
.version_minor
= TDFX_MINOR
;
327 version
.version_patchlevel
= TDFX_PATCHLEVEL
;
329 DRM_COPY(version
.name
, TDFX_NAME
);
330 DRM_COPY(version
.date
, TDFX_DATE
);
331 DRM_COPY(version
.desc
, TDFX_DESC
);
333 copy_to_user_ret((drm_version_t
*)arg
,
340 int tdfx_open(struct inode
*inode
, struct file
*filp
)
342 drm_device_t
*dev
= &tdfx_device
;
345 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
346 if (!(retcode
= drm_open_helper(inode
, filp
, dev
))) {
348 atomic_inc(&dev
->total_open
);
349 spin_lock(&dev
->count_lock
);
350 if (!dev
->open_count
++) {
351 spin_unlock(&dev
->count_lock
);
352 return tdfx_setup(dev
);
354 spin_unlock(&dev
->count_lock
);
359 int tdfx_release(struct inode
*inode
, struct file
*filp
)
361 drm_file_t
*priv
= filp
->private_data
;
362 drm_device_t
*dev
= priv
->dev
;
365 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
366 if (!(retcode
= drm_release(inode
, filp
))) {
368 atomic_inc(&dev
->total_close
);
369 spin_lock(&dev
->count_lock
);
370 if (!--dev
->open_count
) {
371 if (atomic_read(&dev
->ioctl_count
) || dev
->blocked
) {
372 DRM_ERROR("Device busy: %d %d\n",
373 atomic_read(&dev
->ioctl_count
),
375 spin_unlock(&dev
->count_lock
);
378 spin_unlock(&dev
->count_lock
);
379 return tdfx_takedown(dev
);
381 spin_unlock(&dev
->count_lock
);
386 /* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
388 int tdfx_ioctl(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
391 int nr
= DRM_IOCTL_NR(cmd
);
392 drm_file_t
*priv
= filp
->private_data
;
393 drm_device_t
*dev
= priv
->dev
;
395 drm_ioctl_desc_t
*ioctl
;
398 atomic_inc(&dev
->ioctl_count
);
399 atomic_inc(&dev
->total_ioctl
);
402 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
403 current
->pid
, cmd
, nr
, dev
->device
, priv
->authenticated
);
405 if (nr
>= TDFX_IOCTL_COUNT
) {
408 ioctl
= &tdfx_ioctls
[nr
];
412 DRM_DEBUG("no function\n");
414 } else if ((ioctl
->root_only
&& !capable(CAP_SYS_ADMIN
))
415 || (ioctl
->auth_needed
&& !priv
->authenticated
)) {
418 retcode
= (func
)(inode
, filp
, cmd
, arg
);
422 atomic_dec(&dev
->ioctl_count
);
426 int tdfx_lock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
429 drm_file_t
*priv
= filp
->private_data
;
430 drm_device_t
*dev
= priv
->dev
;
431 DECLARE_WAITQUEUE(entry
, current
);
434 #if DRM_DMA_HISTOGRAM
437 dev
->lck_start
= start
= get_cycles();
440 copy_from_user_ret(&lock
, (drm_lock_t
*)arg
, sizeof(lock
), -EFAULT
);
442 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
443 DRM_ERROR("Process %d using kernel context %d\n",
444 current
->pid
, lock
.context
);
448 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
449 lock
.context
, current
->pid
, dev
->lock
.hw_lock
->lock
,
453 /* dev->queue_count == 0 right now for
455 if (lock
.context
< 0 || lock
.context
>= dev
->queue_count
)
461 if (_DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
)
463 long j
= jiffies
- dev
->lock
.lock_time
;
465 if (lock
.context
== tdfx_res_ctx
.handle
&&
466 j
>= 0 && j
< DRM_LOCK_SLICE
) {
467 /* Can't take lock if we just had it and
468 there is contention. */
469 DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
470 lock
.context
, current
->pid
, j
,
471 dev
->lock
.lock_time
, jiffies
);
472 current
->state
= TASK_INTERRUPTIBLE
;
473 current
->policy
|= SCHED_YIELD
;
474 schedule_timeout(DRM_LOCK_SLICE
-j
);
475 DRM_DEBUG("jiffies=%d\n", jiffies
);
479 add_wait_queue(&dev
->lock
.lock_queue
, &entry
);
481 if (!dev
->lock
.hw_lock
) {
482 /* Device has been unregistered */
486 if (drm_lock_take(&dev
->lock
.hw_lock
->lock
,
488 dev
->lock
.pid
= current
->pid
;
489 dev
->lock
.lock_time
= jiffies
;
490 atomic_inc(&dev
->total_locks
);
491 break; /* Got lock */
495 atomic_inc(&dev
->total_sleeps
);
496 current
->state
= TASK_INTERRUPTIBLE
;
497 current
->policy
|= SCHED_YIELD
;
499 if (signal_pending(current
)) {
504 current
->state
= TASK_RUNNING
;
505 remove_wait_queue(&dev
->lock
.lock_queue
, &entry
);
509 if (!ret
&& dev
->last_context
!= lock
.context
&&
510 lock
.context
!= tdfx_res_ctx
.handle
&&
511 dev
->last_context
!= tdfx_res_ctx
.handle
) {
512 add_wait_queue(&dev
->context_wait
, &entry
);
513 current
->state
= TASK_INTERRUPTIBLE
;
514 /* PRE: dev->last_context != lock.context */
515 tdfx_context_switch(dev
, dev
->last_context
, lock
.context
);
516 /* POST: we will wait for the context
517 switch and will dispatch on a later call
518 when dev->last_context == lock.context
519 NOTE WE HOLD THE LOCK THROUGHOUT THIS
521 current
->policy
|= SCHED_YIELD
;
523 current
->state
= TASK_RUNNING
;
524 remove_wait_queue(&dev
->context_wait
, &entry
);
525 if (signal_pending(current
)) {
527 } else if (dev
->last_context
!= lock
.context
) {
528 DRM_ERROR("Context mismatch: %d %d\n",
529 dev
->last_context
, lock
.context
);
535 if (lock
.flags
& _DRM_LOCK_READY
) {
536 /* Wait for space in DMA/FIFO */
538 if (lock
.flags
& _DRM_LOCK_QUIESCENT
) {
539 /* Make hardware quiescent */
545 DRM_DEBUG("%d %s\n", lock
.context
, ret
? "interrupted" : "has lock");
547 #if DRM_DMA_HISTOGRAM
548 atomic_inc(&dev
->histo
.lacq
[drm_histogram_slot(get_cycles() - start
)]);
555 int tdfx_unlock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
558 drm_file_t
*priv
= filp
->private_data
;
559 drm_device_t
*dev
= priv
->dev
;
562 copy_from_user_ret(&lock
, (drm_lock_t
*)arg
, sizeof(lock
), -EFAULT
);
564 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
565 DRM_ERROR("Process %d using kernel context %d\n",
566 current
->pid
, lock
.context
);
570 DRM_DEBUG("%d frees lock (%d holds)\n",
572 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
573 atomic_inc(&dev
->total_unlocks
);
574 if (_DRM_LOCK_IS_CONT(dev
->lock
.hw_lock
->lock
))
575 atomic_inc(&dev
->total_contends
);
576 drm_lock_transfer(dev
, &dev
->lock
.hw_lock
->lock
, DRM_KERNEL_CONTEXT
);
577 /* FIXME: Try to send data to card here */
578 if (!dev
->context_flag
) {
579 if (drm_lock_free(dev
, &dev
->lock
.hw_lock
->lock
,
580 DRM_KERNEL_CONTEXT
)) {
588 module_init(tdfx_init
);
589 module_exit(tdfx_cleanup
);
593 * tdfx_setup is called by the kernel to parse command-line options passed
594 * via the boot-loader (e.g., LILO). It calls the insmod option routine,
597 static int __init
tdfx_options(char *str
)
599 drm_parse_options(str
);
603 __setup("tdfx=", tdfx_options
);