Merge with 2.3.99-pre1.
[linux-2.6/linux-mips.git] / drivers / char / drm / tdfx_drv.c
blob82b2ac9a258807d8325157215c6f0629b9fa5719
1 /* tdfx.c -- tdfx driver -*- linux-c -*-
2 * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
26 * Authors:
27 * Rickard E. (Rik) Faith <faith@precisioninsight.com>
28 * Daryll Strauss <daryll@precisioninsight.com>
32 #include "drmP.h"
33 #include "tdfx_drv.h"
35 #define TDFX_NAME "tdfx"
36 #define TDFX_DESC "tdfx"
37 #define TDFX_DATE "19991009"
38 #define TDFX_MAJOR 0
39 #define TDFX_MINOR 0
40 #define TDFX_PATCHLEVEL 1
42 static drm_device_t tdfx_device;
43 drm_ctx_t tdfx_res_ctx;
45 static struct file_operations tdfx_fops = {
46 open: tdfx_open,
47 flush: drm_flush,
48 release: tdfx_release,
49 ioctl: tdfx_ioctl,
50 mmap: drm_mmap,
51 read: drm_read,
52 fasync: drm_fasync,
53 poll: drm_poll,
56 static struct miscdevice tdfx_misc = {
57 minor: MISC_DYNAMIC_MINOR,
58 name: TDFX_NAME,
59 fops: &tdfx_fops,
62 static drm_ioctl_desc_t tdfx_ioctls[] = {
63 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
64 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
65 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
66 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
68 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
69 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
70 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
71 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
72 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
75 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
76 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
78 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
79 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
80 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
81 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
82 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
83 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
84 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
85 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
87 #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
89 #ifdef MODULE
90 static char *tdfx = NULL;
91 #endif
93 MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
94 MODULE_DESCRIPTION("tdfx");
95 MODULE_PARM(tdfx, "s");
97 static int tdfx_setup(drm_device_t *dev)
99 int i;
101 atomic_set(&dev->ioctl_count, 0);
102 atomic_set(&dev->vma_count, 0);
103 dev->buf_use = 0;
104 atomic_set(&dev->buf_alloc, 0);
106 atomic_set(&dev->total_open, 0);
107 atomic_set(&dev->total_close, 0);
108 atomic_set(&dev->total_ioctl, 0);
109 atomic_set(&dev->total_irq, 0);
110 atomic_set(&dev->total_ctx, 0);
111 atomic_set(&dev->total_locks, 0);
112 atomic_set(&dev->total_unlocks, 0);
113 atomic_set(&dev->total_contends, 0);
114 atomic_set(&dev->total_sleeps, 0);
116 for (i = 0; i < DRM_HASH_SIZE; i++) {
117 dev->magiclist[i].head = NULL;
118 dev->magiclist[i].tail = NULL;
120 dev->maplist = NULL;
121 dev->map_count = 0;
122 dev->vmalist = NULL;
123 dev->lock.hw_lock = NULL;
124 init_waitqueue_head(&dev->lock.lock_queue);
125 dev->queue_count = 0;
126 dev->queue_reserved = 0;
127 dev->queue_slots = 0;
128 dev->queuelist = NULL;
129 dev->irq = 0;
130 dev->context_flag = 0;
131 dev->interrupt_flag = 0;
132 dev->dma = 0;
133 dev->dma_flag = 0;
134 dev->last_context = 0;
135 dev->last_switch = 0;
136 dev->last_checked = 0;
137 init_timer(&dev->timer);
138 init_waitqueue_head(&dev->context_wait);
140 dev->ctx_start = 0;
141 dev->lck_start = 0;
143 dev->buf_rp = dev->buf;
144 dev->buf_wp = dev->buf;
145 dev->buf_end = dev->buf + DRM_BSZ;
146 dev->buf_async = NULL;
147 init_waitqueue_head(&dev->buf_readers);
148 init_waitqueue_head(&dev->buf_writers);
150 tdfx_res_ctx.handle=-1;
152 DRM_DEBUG("\n");
154 /* The kernel's context could be created here, but is now created
155 in drm_dma_enqueue. This is more resource-efficient for
156 hardware that does not do DMA, but may mean that
157 drm_select_queue fails between the time the interrupt is
158 initialized and the time the queues are initialized. */
160 return 0;
164 static int tdfx_takedown(drm_device_t *dev)
166 int i;
167 drm_magic_entry_t *pt, *next;
168 drm_map_t *map;
169 drm_vma_entry_t *vma, *vma_next;
171 DRM_DEBUG("\n");
173 down(&dev->struct_sem);
174 del_timer(&dev->timer);
176 if (dev->devname) {
177 drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
178 dev->devname = NULL;
181 if (dev->unique) {
182 drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
183 dev->unique = NULL;
184 dev->unique_len = 0;
186 /* Clear pid list */
187 for (i = 0; i < DRM_HASH_SIZE; i++) {
188 for (pt = dev->magiclist[i].head; pt; pt = next) {
189 next = pt->next;
190 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
192 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
195 /* Clear vma list (only built for debugging) */
196 if (dev->vmalist) {
197 for (vma = dev->vmalist; vma; vma = vma_next) {
198 vma_next = vma->next;
199 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
201 dev->vmalist = NULL;
204 /* Clear map area and mtrr information */
205 if (dev->maplist) {
206 for (i = 0; i < dev->map_count; i++) {
207 map = dev->maplist[i];
208 switch (map->type) {
209 case _DRM_REGISTERS:
210 case _DRM_FRAME_BUFFER:
211 #ifdef CONFIG_MTRR
212 if (map->mtrr >= 0) {
213 int retcode;
214 retcode = mtrr_del(map->mtrr,
215 map->offset,
216 map->size);
217 DRM_DEBUG("mtrr_del = %d\n", retcode);
219 #endif
220 drm_ioremapfree(map->handle, map->size);
221 break;
222 case _DRM_SHM:
223 drm_free_pages((unsigned long)map->handle,
224 drm_order(map->size)
225 - PAGE_SHIFT,
226 DRM_MEM_SAREA);
227 break;
229 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
231 drm_free(dev->maplist,
232 dev->map_count * sizeof(*dev->maplist),
233 DRM_MEM_MAPS);
234 dev->maplist = NULL;
235 dev->map_count = 0;
238 if (dev->lock.hw_lock) {
239 dev->lock.hw_lock = NULL; /* SHM removed */
240 dev->lock.pid = 0;
241 wake_up_interruptible(&dev->lock.lock_queue);
243 up(&dev->struct_sem);
245 return 0;
248 /* tdfx_init is called via init_module at module load time, or via
249 * linux/init/main.c (this is not currently supported). */
251 int tdfx_init(void)
253 int retcode;
254 drm_device_t *dev = &tdfx_device;
256 DRM_DEBUG("\n");
258 memset((void *)dev, 0, sizeof(*dev));
259 dev->count_lock = SPIN_LOCK_UNLOCKED;
260 sema_init(&dev->struct_sem, 1);
262 #ifdef MODULE
263 drm_parse_options(tdfx);
264 #endif
266 if ((retcode = misc_register(&tdfx_misc))) {
267 DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME);
268 return retcode;
270 dev->device = MKDEV(MISC_MAJOR, tdfx_misc.minor);
271 dev->name = TDFX_NAME;
273 drm_mem_init();
274 drm_proc_init(dev);
276 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
277 TDFX_NAME,
278 TDFX_MAJOR,
279 TDFX_MINOR,
280 TDFX_PATCHLEVEL,
281 TDFX_DATE,
282 tdfx_misc.minor);
284 return 0;
287 /* tdfx_cleanup is called via cleanup_module at module unload time. */
289 void tdfx_cleanup(void)
291 drm_device_t *dev = &tdfx_device;
293 DRM_DEBUG("\n");
295 drm_proc_cleanup();
296 if (misc_deregister(&tdfx_misc)) {
297 DRM_ERROR("Cannot unload module\n");
298 } else {
299 DRM_INFO("Module unloaded\n");
301 tdfx_takedown(dev);
304 int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
305 unsigned long arg)
307 drm_version_t version;
308 int len;
310 copy_from_user_ret(&version,
311 (drm_version_t *)arg,
312 sizeof(version),
313 -EFAULT);
315 #define DRM_COPY(name,value) \
316 len = strlen(value); \
317 if (len > name##_len) len = name##_len; \
318 name##_len = strlen(value); \
319 if (len && name) { \
320 copy_to_user_ret(name, value, len, -EFAULT); \
323 version.version_major = TDFX_MAJOR;
324 version.version_minor = TDFX_MINOR;
325 version.version_patchlevel = TDFX_PATCHLEVEL;
327 DRM_COPY(version.name, TDFX_NAME);
328 DRM_COPY(version.date, TDFX_DATE);
329 DRM_COPY(version.desc, TDFX_DESC);
331 copy_to_user_ret((drm_version_t *)arg,
332 &version,
333 sizeof(version),
334 -EFAULT);
335 return 0;
338 int tdfx_open(struct inode *inode, struct file *filp)
340 drm_device_t *dev = &tdfx_device;
341 int retcode = 0;
343 DRM_DEBUG("open_count = %d\n", dev->open_count);
344 if (!(retcode = drm_open_helper(inode, filp, dev))) {
345 MOD_INC_USE_COUNT;
346 atomic_inc(&dev->total_open);
347 spin_lock(&dev->count_lock);
348 if (!dev->open_count++) {
349 spin_unlock(&dev->count_lock);
350 return tdfx_setup(dev);
352 spin_unlock(&dev->count_lock);
354 return retcode;
357 int tdfx_release(struct inode *inode, struct file *filp)
359 drm_file_t *priv = filp->private_data;
360 drm_device_t *dev = priv->dev;
361 int retcode = 0;
363 DRM_DEBUG("open_count = %d\n", dev->open_count);
364 if (!(retcode = drm_release(inode, filp))) {
365 MOD_DEC_USE_COUNT;
366 atomic_inc(&dev->total_close);
367 spin_lock(&dev->count_lock);
368 if (!--dev->open_count) {
369 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
370 DRM_ERROR("Device busy: %d %d\n",
371 atomic_read(&dev->ioctl_count),
372 dev->blocked);
373 spin_unlock(&dev->count_lock);
374 return -EBUSY;
376 spin_unlock(&dev->count_lock);
377 return tdfx_takedown(dev);
379 spin_unlock(&dev->count_lock);
381 return retcode;
384 /* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
386 int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
387 unsigned long arg)
389 int nr = DRM_IOCTL_NR(cmd);
390 drm_file_t *priv = filp->private_data;
391 drm_device_t *dev = priv->dev;
392 int retcode = 0;
393 drm_ioctl_desc_t *ioctl;
394 drm_ioctl_t *func;
396 atomic_inc(&dev->ioctl_count);
397 atomic_inc(&dev->total_ioctl);
398 ++priv->ioctl_count;
400 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
401 current->pid, cmd, nr, dev->device, priv->authenticated);
403 if (nr >= TDFX_IOCTL_COUNT) {
404 retcode = -EINVAL;
405 } else {
406 ioctl = &tdfx_ioctls[nr];
407 func = ioctl->func;
409 if (!func) {
410 DRM_DEBUG("no function\n");
411 retcode = -EINVAL;
412 } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
413 || (ioctl->auth_needed && !priv->authenticated)) {
414 retcode = -EACCES;
415 } else {
416 retcode = (func)(inode, filp, cmd, arg);
420 atomic_dec(&dev->ioctl_count);
421 return retcode;
424 int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
425 unsigned long arg)
427 drm_file_t *priv = filp->private_data;
428 drm_device_t *dev = priv->dev;
429 DECLARE_WAITQUEUE(entry, current);
430 int ret = 0;
431 drm_lock_t lock;
432 #if DRM_DMA_HISTOGRAM
433 cycles_t start;
435 dev->lck_start = start = get_cycles();
436 #endif
438 copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
440 if (lock.context == DRM_KERNEL_CONTEXT) {
441 DRM_ERROR("Process %d using kernel context %d\n",
442 current->pid, lock.context);
443 return -EINVAL;
446 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
447 lock.context, current->pid, dev->lock.hw_lock->lock,
448 lock.flags);
450 #if 0
451 /* dev->queue_count == 0 right now for
452 tdfx. FIXME? */
453 if (lock.context < 0 || lock.context >= dev->queue_count)
454 return -EINVAL;
455 #endif
457 if (!ret) {
458 #if 0
459 if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
460 != lock.context) {
461 long j = jiffies - dev->lock.lock_time;
463 if (lock.context == tdfx_res_ctx.handle &&
464 j >= 0 && j < DRM_LOCK_SLICE) {
465 /* Can't take lock if we just had it and
466 there is contention. */
467 DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
468 lock.context, current->pid, j,
469 dev->lock.lock_time, jiffies);
470 current->state = TASK_INTERRUPTIBLE;
471 current->policy |= SCHED_YIELD;
472 schedule_timeout(DRM_LOCK_SLICE-j);
473 DRM_DEBUG("jiffies=%d\n", jiffies);
476 #endif
477 add_wait_queue(&dev->lock.lock_queue, &entry);
478 for (;;) {
479 if (!dev->lock.hw_lock) {
480 /* Device has been unregistered */
481 ret = -EINTR;
482 break;
484 if (drm_lock_take(&dev->lock.hw_lock->lock,
485 lock.context)) {
486 dev->lock.pid = current->pid;
487 dev->lock.lock_time = jiffies;
488 atomic_inc(&dev->total_locks);
489 break; /* Got lock */
492 /* Contention */
493 atomic_inc(&dev->total_sleeps);
494 current->state = TASK_INTERRUPTIBLE;
495 current->policy |= SCHED_YIELD;
496 schedule();
497 if (signal_pending(current)) {
498 ret = -ERESTARTSYS;
499 break;
502 current->state = TASK_RUNNING;
503 remove_wait_queue(&dev->lock.lock_queue, &entry);
506 #if 0
507 if (!ret && dev->last_context != lock.context &&
508 lock.context != tdfx_res_ctx.handle &&
509 dev->last_context != tdfx_res_ctx.handle) {
510 add_wait_queue(&dev->context_wait, &entry);
511 current->state = TASK_INTERRUPTIBLE;
512 /* PRE: dev->last_context != lock.context */
513 tdfx_context_switch(dev, dev->last_context, lock.context);
514 /* POST: we will wait for the context
515 switch and will dispatch on a later call
516 when dev->last_context == lock.context
517 NOTE WE HOLD THE LOCK THROUGHOUT THIS
518 TIME! */
519 current->policy |= SCHED_YIELD;
520 schedule();
521 current->state = TASK_RUNNING;
522 remove_wait_queue(&dev->context_wait, &entry);
523 if (signal_pending(current)) {
524 ret = -EINTR;
525 } else if (dev->last_context != lock.context) {
526 DRM_ERROR("Context mismatch: %d %d\n",
527 dev->last_context, lock.context);
530 #endif
532 if (!ret) {
533 if (lock.flags & _DRM_LOCK_READY) {
534 /* Wait for space in DMA/FIFO */
536 if (lock.flags & _DRM_LOCK_QUIESCENT) {
537 /* Make hardware quiescent */
538 #if 0
539 tdfx_quiescent(dev);
540 #endif
544 if (lock.context != tdfx_res_ctx.handle) {
545 current->counter = 5;
546 current->priority = DEF_PRIORITY/4;
549 DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
551 #if DRM_DMA_HISTOGRAM
552 atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
553 #endif
555 return ret;
559 int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
560 unsigned long arg)
562 drm_file_t *priv = filp->private_data;
563 drm_device_t *dev = priv->dev;
564 drm_lock_t lock;
566 copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
568 if (lock.context == DRM_KERNEL_CONTEXT) {
569 DRM_ERROR("Process %d using kernel context %d\n",
570 current->pid, lock.context);
571 return -EINVAL;
574 DRM_DEBUG("%d frees lock (%d holds)\n",
575 lock.context,
576 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
577 atomic_inc(&dev->total_unlocks);
578 if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
579 atomic_inc(&dev->total_contends);
580 drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
581 /* FIXME: Try to send data to card here */
582 if (!dev->context_flag) {
583 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
584 DRM_KERNEL_CONTEXT)) {
585 DRM_ERROR("\n");
589 if (lock.context != tdfx_res_ctx.handle) {
590 current->counter = 5;
591 current->priority = DEF_PRIORITY;
594 return 0;
597 module_init(tdfx_init);
598 module_exit(tdfx_cleanup);
600 #ifndef MODULE
602 * tdfx_setup is called by the kernel to parse command-line options passed
603 * via the boot-loader (e.g., LILO). It calls the insmod option routine,
604 * drm_parse_options.
606 static int __init tdfx_options(char *str)
608 drm_parse_options(str);
609 return 1;
612 __setup("tdfx=", tdfx_options);
613 #endif