1 /* tdfx.c -- tdfx driver -*- linux-c -*-
2 * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Daryll Strauss <daryll@valinux.com>
33 #include <linux/config.h>
37 #define TDFX_NAME "tdfx"
38 #define TDFX_DESC "3dfx Banshee/Voodoo3+"
39 #define TDFX_DATE "20000719"
42 #define TDFX_PATCHLEVEL 0
44 static drm_device_t tdfx_device
;
45 drm_ctx_t tdfx_res_ctx
;
47 static struct file_operations tdfx_fops
= {
48 #if LINUX_VERSION_CODE >= 0x020400
49 /* This started being used during 2.4.0-test */
54 release
: tdfx_release
,
62 static struct miscdevice tdfx_misc
= {
63 minor
: MISC_DYNAMIC_MINOR
,
68 static drm_ioctl_desc_t tdfx_ioctls
[] = {
69 [DRM_IOCTL_NR(DRM_IOCTL_VERSION
)] = { tdfx_version
, 0, 0 },
70 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE
)] = { drm_getunique
, 0, 0 },
71 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC
)] = { drm_getmagic
, 0, 0 },
72 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID
)] = { drm_irq_busid
, 0, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE
)] = { drm_setunique
, 1, 1 },
75 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK
)] = { drm_block
, 1, 1 },
76 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK
)] = { drm_unblock
, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC
)] = { drm_authmagic
, 1, 1 },
78 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP
)] = { drm_addmap
, 1, 1 },
80 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX
)] = { tdfx_addctx
, 1, 1 },
81 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX
)] = { tdfx_rmctx
, 1, 1 },
82 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX
)] = { tdfx_modctx
, 1, 1 },
83 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX
)] = { tdfx_getctx
, 1, 0 },
84 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX
)] = { tdfx_switchctx
, 1, 1 },
85 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX
)] = { tdfx_newctx
, 1, 1 },
86 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX
)] = { tdfx_resctx
, 1, 0 },
87 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW
)] = { drm_adddraw
, 1, 1 },
88 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW
)] = { drm_rmdraw
, 1, 1 },
89 [DRM_IOCTL_NR(DRM_IOCTL_LOCK
)] = { tdfx_lock
, 1, 0 },
90 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK
)] = { tdfx_unlock
, 1, 0 },
91 [DRM_IOCTL_NR(DRM_IOCTL_FINISH
)] = { drm_finish
, 1, 0 },
92 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
93 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE
)] = {drm_agp_acquire
, 1, 1},
94 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE
)] = {drm_agp_release
, 1, 1},
95 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE
)] = {drm_agp_enable
, 1, 1},
96 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO
)] = {drm_agp_info
, 1, 1},
97 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC
)] = {drm_agp_alloc
, 1, 1},
98 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE
)] = {drm_agp_free
, 1, 1},
99 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND
)] = {drm_agp_unbind
, 1, 1},
100 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND
)] = {drm_agp_bind
, 1, 1},
103 #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
106 static char *tdfx
= NULL
;
109 MODULE_AUTHOR("VA Linux Systems, Inc.");
110 MODULE_DESCRIPTION("tdfx");
111 MODULE_PARM(tdfx
, "s");
114 /* tdfx_options is called by the kernel to parse command-line options
115 * passed via the boot-loader (e.g., LILO). It calls the insmod option
116 * routine, drm_parse_drm.
119 static int __init
tdfx_options(char *str
)
121 drm_parse_options(str
);
125 __setup("tdfx=", tdfx_options
);
128 static int tdfx_setup(drm_device_t
*dev
)
132 atomic_set(&dev
->ioctl_count
, 0);
133 atomic_set(&dev
->vma_count
, 0);
135 atomic_set(&dev
->buf_alloc
, 0);
137 atomic_set(&dev
->total_open
, 0);
138 atomic_set(&dev
->total_close
, 0);
139 atomic_set(&dev
->total_ioctl
, 0);
140 atomic_set(&dev
->total_irq
, 0);
141 atomic_set(&dev
->total_ctx
, 0);
142 atomic_set(&dev
->total_locks
, 0);
143 atomic_set(&dev
->total_unlocks
, 0);
144 atomic_set(&dev
->total_contends
, 0);
145 atomic_set(&dev
->total_sleeps
, 0);
147 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
148 dev
->magiclist
[i
].head
= NULL
;
149 dev
->magiclist
[i
].tail
= NULL
;
154 dev
->lock
.hw_lock
= NULL
;
155 init_waitqueue_head(&dev
->lock
.lock_queue
);
156 dev
->queue_count
= 0;
157 dev
->queue_reserved
= 0;
158 dev
->queue_slots
= 0;
159 dev
->queuelist
= NULL
;
161 dev
->context_flag
= 0;
162 dev
->interrupt_flag
= 0;
165 dev
->last_context
= 0;
166 dev
->last_switch
= 0;
167 dev
->last_checked
= 0;
168 init_timer(&dev
->timer
);
169 init_waitqueue_head(&dev
->context_wait
);
174 dev
->buf_rp
= dev
->buf
;
175 dev
->buf_wp
= dev
->buf
;
176 dev
->buf_end
= dev
->buf
+ DRM_BSZ
;
177 dev
->buf_async
= NULL
;
178 init_waitqueue_head(&dev
->buf_readers
);
179 init_waitqueue_head(&dev
->buf_writers
);
181 tdfx_res_ctx
.handle
=-1;
185 /* The kernel's context could be created here, but is now created
186 in drm_dma_enqueue. This is more resource-efficient for
187 hardware that does not do DMA, but may mean that
188 drm_select_queue fails between the time the interrupt is
189 initialized and the time the queues are initialized. */
195 static int tdfx_takedown(drm_device_t
*dev
)
198 drm_magic_entry_t
*pt
, *next
;
200 drm_vma_entry_t
*vma
, *vma_next
;
204 down(&dev
->struct_sem
);
205 del_timer(&dev
->timer
);
208 drm_free(dev
->devname
, strlen(dev
->devname
)+1, DRM_MEM_DRIVER
);
213 drm_free(dev
->unique
, strlen(dev
->unique
)+1, DRM_MEM_DRIVER
);
218 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
219 for (pt
= dev
->magiclist
[i
].head
; pt
; pt
= next
) {
221 drm_free(pt
, sizeof(*pt
), DRM_MEM_MAGIC
);
223 dev
->magiclist
[i
].head
= dev
->magiclist
[i
].tail
= NULL
;
225 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
226 /* Clear AGP information */
229 drm_agp_mem_t
*temp_next
;
231 temp
= dev
->agp
->memory
;
232 while(temp
!= NULL
) {
233 temp_next
= temp
->next
;
234 drm_free_agp(temp
->memory
, temp
->pages
);
235 drm_free(temp
, sizeof(*temp
), DRM_MEM_AGPLISTS
);
238 if (dev
->agp
->acquired
) (*drm_agp
.release
)();
241 /* Clear vma list (only built for debugging) */
243 for (vma
= dev
->vmalist
; vma
; vma
= vma_next
) {
244 vma_next
= vma
->next
;
245 drm_free(vma
, sizeof(*vma
), DRM_MEM_VMAS
);
250 /* Clear map area and mtrr information */
252 for (i
= 0; i
< dev
->map_count
; i
++) {
253 map
= dev
->maplist
[i
];
256 case _DRM_FRAME_BUFFER
:
258 if (map
->mtrr
>= 0) {
260 retcode
= mtrr_del(map
->mtrr
,
263 DRM_DEBUG("mtrr_del = %d\n", retcode
);
266 drm_ioremapfree(map
->handle
, map
->size
);
269 drm_free_pages((unsigned long)map
->handle
,
275 /* Do nothing here, because this is all
276 handled in the AGP/GART driver. */
279 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
281 drm_free(dev
->maplist
,
282 dev
->map_count
* sizeof(*dev
->maplist
),
288 if (dev
->lock
.hw_lock
) {
289 dev
->lock
.hw_lock
= NULL
; /* SHM removed */
291 wake_up_interruptible(&dev
->lock
.lock_queue
);
293 up(&dev
->struct_sem
);
298 /* tdfx_init is called via init_module at module load time, or via
299 * linux/init/main.c (this is not currently supported). */
301 static int tdfx_init(void)
304 drm_device_t
*dev
= &tdfx_device
;
308 memset((void *)dev
, 0, sizeof(*dev
));
309 dev
->count_lock
= SPIN_LOCK_UNLOCKED
;
310 sema_init(&dev
->struct_sem
, 1);
313 drm_parse_options(tdfx
);
316 if ((retcode
= misc_register(&tdfx_misc
))) {
317 DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME
);
320 dev
->device
= MKDEV(MISC_MAJOR
, tdfx_misc
.minor
);
321 dev
->name
= TDFX_NAME
;
325 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
326 dev
->agp
= drm_agp_init();
328 if((retcode
= drm_ctxbitmap_init(dev
))) {
329 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
331 misc_deregister(&tdfx_misc
);
336 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
347 /* tdfx_cleanup is called via cleanup_module at module unload time. */
349 static void tdfx_cleanup(void)
351 drm_device_t
*dev
= &tdfx_device
;
356 if (misc_deregister(&tdfx_misc
)) {
357 DRM_ERROR("Cannot unload module\n");
359 DRM_INFO("Module unloaded\n");
361 drm_ctxbitmap_cleanup(dev
);
363 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
366 drm_free(dev
->agp
, sizeof(*dev
->agp
), DRM_MEM_AGPLISTS
);
372 module_init(tdfx_init
);
373 module_exit(tdfx_cleanup
);
376 int tdfx_version(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
379 drm_version_t version
;
382 copy_from_user_ret(&version
,
383 (drm_version_t
*)arg
,
387 #define DRM_COPY(name,value) \
388 len = strlen(value); \
389 if (len > name##_len) len = name##_len; \
390 name##_len = strlen(value); \
392 copy_to_user_ret(name, value, len, -EFAULT); \
395 version
.version_major
= TDFX_MAJOR
;
396 version
.version_minor
= TDFX_MINOR
;
397 version
.version_patchlevel
= TDFX_PATCHLEVEL
;
399 DRM_COPY(version
.name
, TDFX_NAME
);
400 DRM_COPY(version
.date
, TDFX_DATE
);
401 DRM_COPY(version
.desc
, TDFX_DESC
);
403 copy_to_user_ret((drm_version_t
*)arg
,
410 int tdfx_open(struct inode
*inode
, struct file
*filp
)
412 drm_device_t
*dev
= &tdfx_device
;
415 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
416 if (!(retcode
= drm_open_helper(inode
, filp
, dev
))) {
417 #if LINUX_VERSION_CODE < 0x020333
418 MOD_INC_USE_COUNT
; /* Needed before Linux 2.3.51 */
420 atomic_inc(&dev
->total_open
);
421 spin_lock(&dev
->count_lock
);
422 if (!dev
->open_count
++) {
423 spin_unlock(&dev
->count_lock
);
424 return tdfx_setup(dev
);
426 spin_unlock(&dev
->count_lock
);
431 int tdfx_release(struct inode
*inode
, struct file
*filp
)
433 drm_file_t
*priv
= filp
->private_data
;
440 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
441 if (!(retcode
= drm_release(inode
, filp
))) {
442 #if LINUX_VERSION_CODE < 0x020333
443 MOD_DEC_USE_COUNT
; /* Needed before Linux 2.3.51 */
445 atomic_inc(&dev
->total_close
);
446 spin_lock(&dev
->count_lock
);
447 if (!--dev
->open_count
) {
448 if (atomic_read(&dev
->ioctl_count
) || dev
->blocked
) {
449 DRM_ERROR("Device busy: %d %d\n",
450 atomic_read(&dev
->ioctl_count
),
452 spin_unlock(&dev
->count_lock
);
456 spin_unlock(&dev
->count_lock
);
458 return tdfx_takedown(dev
);
460 spin_unlock(&dev
->count_lock
);
467 /* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
469 int tdfx_ioctl(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
472 int nr
= DRM_IOCTL_NR(cmd
);
473 drm_file_t
*priv
= filp
->private_data
;
474 drm_device_t
*dev
= priv
->dev
;
476 drm_ioctl_desc_t
*ioctl
;
479 atomic_inc(&dev
->ioctl_count
);
480 atomic_inc(&dev
->total_ioctl
);
483 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
484 current
->pid
, cmd
, nr
, dev
->device
, priv
->authenticated
);
486 if (nr
>= TDFX_IOCTL_COUNT
) {
489 ioctl
= &tdfx_ioctls
[nr
];
493 DRM_DEBUG("no function\n");
495 } else if ((ioctl
->root_only
&& !capable(CAP_SYS_ADMIN
))
496 || (ioctl
->auth_needed
&& !priv
->authenticated
)) {
499 retcode
= (func
)(inode
, filp
, cmd
, arg
);
503 atomic_dec(&dev
->ioctl_count
);
507 int tdfx_lock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
510 drm_file_t
*priv
= filp
->private_data
;
511 drm_device_t
*dev
= priv
->dev
;
512 DECLARE_WAITQUEUE(entry
, current
);
515 #if DRM_DMA_HISTOGRAM
518 dev
->lck_start
= start
= get_cycles();
521 copy_from_user_ret(&lock
, (drm_lock_t
*)arg
, sizeof(lock
), -EFAULT
);
523 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
524 DRM_ERROR("Process %d using kernel context %d\n",
525 current
->pid
, lock
.context
);
529 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
530 lock
.context
, current
->pid
, dev
->lock
.hw_lock
->lock
,
534 /* dev->queue_count == 0 right now for
536 if (lock
.context
< 0 || lock
.context
>= dev
->queue_count
)
542 if (_DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
)
544 long j
= jiffies
- dev
->lock
.lock_time
;
546 if (lock
.context
== tdfx_res_ctx
.handle
&&
547 j
>= 0 && j
< DRM_LOCK_SLICE
) {
548 /* Can't take lock if we just had it and
549 there is contention. */
550 DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
551 lock
.context
, current
->pid
, j
,
552 dev
->lock
.lock_time
, jiffies
);
553 current
->state
= TASK_INTERRUPTIBLE
;
554 current
->policy
|= SCHED_YIELD
;
555 schedule_timeout(DRM_LOCK_SLICE
-j
);
556 DRM_DEBUG("jiffies=%d\n", jiffies
);
560 add_wait_queue(&dev
->lock
.lock_queue
, &entry
);
562 if (!dev
->lock
.hw_lock
) {
563 /* Device has been unregistered */
567 if (drm_lock_take(&dev
->lock
.hw_lock
->lock
,
569 dev
->lock
.pid
= current
->pid
;
570 dev
->lock
.lock_time
= jiffies
;
571 atomic_inc(&dev
->total_locks
);
572 break; /* Got lock */
576 atomic_inc(&dev
->total_sleeps
);
577 current
->state
= TASK_INTERRUPTIBLE
;
579 current
->policy
|= SCHED_YIELD
;
582 if (signal_pending(current
)) {
587 current
->state
= TASK_RUNNING
;
588 remove_wait_queue(&dev
->lock
.lock_queue
, &entry
);
592 if (!ret
&& dev
->last_context
!= lock
.context
&&
593 lock
.context
!= tdfx_res_ctx
.handle
&&
594 dev
->last_context
!= tdfx_res_ctx
.handle
) {
595 add_wait_queue(&dev
->context_wait
, &entry
);
596 current
->state
= TASK_INTERRUPTIBLE
;
597 /* PRE: dev->last_context != lock.context */
598 tdfx_context_switch(dev
, dev
->last_context
, lock
.context
);
599 /* POST: we will wait for the context
600 switch and will dispatch on a later call
601 when dev->last_context == lock.context
602 NOTE WE HOLD THE LOCK THROUGHOUT THIS
604 current
->policy
|= SCHED_YIELD
;
606 current
->state
= TASK_RUNNING
;
607 remove_wait_queue(&dev
->context_wait
, &entry
);
608 if (signal_pending(current
)) {
610 } else if (dev
->last_context
!= lock
.context
) {
611 DRM_ERROR("Context mismatch: %d %d\n",
612 dev
->last_context
, lock
.context
);
618 if (lock
.flags
& _DRM_LOCK_READY
) {
619 /* Wait for space in DMA/FIFO */
621 if (lock
.flags
& _DRM_LOCK_QUIESCENT
) {
622 /* Make hardware quiescent */
629 #if LINUX_VERSION_CODE < 0x020400
630 if (lock
.context
!= tdfx_res_ctx
.handle
) {
631 current
->counter
= 5;
632 current
->priority
= DEF_PRIORITY
/4;
635 DRM_DEBUG("%d %s\n", lock
.context
, ret
? "interrupted" : "has lock");
637 #if DRM_DMA_HISTOGRAM
638 atomic_inc(&dev
->histo
.lacq
[drm_histogram_slot(get_cycles() - start
)]);
645 int tdfx_unlock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
648 drm_file_t
*priv
= filp
->private_data
;
649 drm_device_t
*dev
= priv
->dev
;
652 copy_from_user_ret(&lock
, (drm_lock_t
*)arg
, sizeof(lock
), -EFAULT
);
654 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
655 DRM_ERROR("Process %d using kernel context %d\n",
656 current
->pid
, lock
.context
);
660 DRM_DEBUG("%d frees lock (%d holds)\n",
662 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
663 atomic_inc(&dev
->total_unlocks
);
664 if (_DRM_LOCK_IS_CONT(dev
->lock
.hw_lock
->lock
))
665 atomic_inc(&dev
->total_contends
);
666 drm_lock_transfer(dev
, &dev
->lock
.hw_lock
->lock
, DRM_KERNEL_CONTEXT
);
667 /* FIXME: Try to send data to card here */
668 if (!dev
->context_flag
) {
669 if (drm_lock_free(dev
, &dev
->lock
.hw_lock
->lock
,
670 DRM_KERNEL_CONTEXT
)) {
675 #if LINUX_VERSION_CODE < 0x020400
676 if (lock
.context
!= tdfx_res_ctx
.handle
) {
677 current
->counter
= 5;
678 current
->priority
= DEF_PRIORITY
;