Import 2.3.99pre2-1
[davej-history.git] / drivers / char / drm / tdfx_drv.c
blob7934c66b9ec4287e0f52d29af253e396959e6702
1 /* tdfx.c -- tdfx driver -*- linux-c -*-
2 * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
26 * Authors:
27 * Rickard E. (Rik) Faith <faith@precisioninsight.com>
28 * Daryll Strauss <daryll@precisioninsight.com>
32 #include <linux/config.h>
33 #include "drmP.h"
34 #include "tdfx_drv.h"
36 #define TDFX_NAME "tdfx"
37 #define TDFX_DESC "tdfx"
38 #define TDFX_DATE "19991009"
39 #define TDFX_MAJOR 0
40 #define TDFX_MINOR 0
41 #define TDFX_PATCHLEVEL 1
43 static drm_device_t tdfx_device;
44 drm_ctx_t tdfx_res_ctx;
46 static struct file_operations tdfx_fops = {
47 open: tdfx_open,
48 flush: drm_flush,
49 release: tdfx_release,
50 ioctl: tdfx_ioctl,
51 mmap: drm_mmap,
52 read: drm_read,
53 fasync: drm_fasync,
54 poll: drm_poll,
57 static struct miscdevice tdfx_misc = {
58 minor: MISC_DYNAMIC_MINOR,
59 name: TDFX_NAME,
60 fops: &tdfx_fops,
63 static drm_ioctl_desc_t tdfx_ioctls[] = {
64 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
65 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
66 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
67 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
69 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
70 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
71 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
72 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
73 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
75 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
76 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
78 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
79 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
80 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
81 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
82 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
83 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
84 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
85 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
86 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
88 #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
90 #ifdef MODULE
91 static char *tdfx = NULL;
92 #endif
94 MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
95 MODULE_DESCRIPTION("tdfx");
96 MODULE_PARM(tdfx, "s");
98 static int tdfx_setup(drm_device_t *dev)
100 int i;
102 atomic_set(&dev->ioctl_count, 0);
103 atomic_set(&dev->vma_count, 0);
104 dev->buf_use = 0;
105 atomic_set(&dev->buf_alloc, 0);
107 atomic_set(&dev->total_open, 0);
108 atomic_set(&dev->total_close, 0);
109 atomic_set(&dev->total_ioctl, 0);
110 atomic_set(&dev->total_irq, 0);
111 atomic_set(&dev->total_ctx, 0);
112 atomic_set(&dev->total_locks, 0);
113 atomic_set(&dev->total_unlocks, 0);
114 atomic_set(&dev->total_contends, 0);
115 atomic_set(&dev->total_sleeps, 0);
117 for (i = 0; i < DRM_HASH_SIZE; i++) {
118 dev->magiclist[i].head = NULL;
119 dev->magiclist[i].tail = NULL;
121 dev->maplist = NULL;
122 dev->map_count = 0;
123 dev->vmalist = NULL;
124 dev->lock.hw_lock = NULL;
125 init_waitqueue_head(&dev->lock.lock_queue);
126 dev->queue_count = 0;
127 dev->queue_reserved = 0;
128 dev->queue_slots = 0;
129 dev->queuelist = NULL;
130 dev->irq = 0;
131 dev->context_flag = 0;
132 dev->interrupt_flag = 0;
133 dev->dma = 0;
134 dev->dma_flag = 0;
135 dev->last_context = 0;
136 dev->last_switch = 0;
137 dev->last_checked = 0;
138 init_timer(&dev->timer);
139 init_waitqueue_head(&dev->context_wait);
141 dev->ctx_start = 0;
142 dev->lck_start = 0;
144 dev->buf_rp = dev->buf;
145 dev->buf_wp = dev->buf;
146 dev->buf_end = dev->buf + DRM_BSZ;
147 dev->buf_async = NULL;
148 init_waitqueue_head(&dev->buf_readers);
149 init_waitqueue_head(&dev->buf_writers);
151 tdfx_res_ctx.handle=-1;
153 DRM_DEBUG("\n");
155 /* The kernel's context could be created here, but is now created
156 in drm_dma_enqueue. This is more resource-efficient for
157 hardware that does not do DMA, but may mean that
158 drm_select_queue fails between the time the interrupt is
159 initialized and the time the queues are initialized. */
161 return 0;
165 static int tdfx_takedown(drm_device_t *dev)
167 int i;
168 drm_magic_entry_t *pt, *next;
169 drm_map_t *map;
170 drm_vma_entry_t *vma, *vma_next;
172 DRM_DEBUG("\n");
174 down(&dev->struct_sem);
175 del_timer(&dev->timer);
177 if (dev->devname) {
178 drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
179 dev->devname = NULL;
182 if (dev->unique) {
183 drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
184 dev->unique = NULL;
185 dev->unique_len = 0;
187 /* Clear pid list */
188 for (i = 0; i < DRM_HASH_SIZE; i++) {
189 for (pt = dev->magiclist[i].head; pt; pt = next) {
190 next = pt->next;
191 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
193 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
196 /* Clear vma list (only built for debugging) */
197 if (dev->vmalist) {
198 for (vma = dev->vmalist; vma; vma = vma_next) {
199 vma_next = vma->next;
200 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
202 dev->vmalist = NULL;
205 /* Clear map area and mtrr information */
206 if (dev->maplist) {
207 for (i = 0; i < dev->map_count; i++) {
208 map = dev->maplist[i];
209 switch (map->type) {
210 case _DRM_REGISTERS:
211 case _DRM_FRAME_BUFFER:
212 #ifdef CONFIG_MTRR
213 if (map->mtrr >= 0) {
214 int retcode;
215 retcode = mtrr_del(map->mtrr,
216 map->offset,
217 map->size);
218 DRM_DEBUG("mtrr_del = %d\n", retcode);
220 #endif
221 drm_ioremapfree(map->handle, map->size);
222 break;
223 case _DRM_SHM:
224 drm_free_pages((unsigned long)map->handle,
225 drm_order(map->size)
226 - PAGE_SHIFT,
227 DRM_MEM_SAREA);
228 break;
230 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
232 drm_free(dev->maplist,
233 dev->map_count * sizeof(*dev->maplist),
234 DRM_MEM_MAPS);
235 dev->maplist = NULL;
236 dev->map_count = 0;
239 if (dev->lock.hw_lock) {
240 dev->lock.hw_lock = NULL; /* SHM removed */
241 dev->lock.pid = 0;
242 wake_up_interruptible(&dev->lock.lock_queue);
244 up(&dev->struct_sem);
246 return 0;
249 /* tdfx_init is called via init_module at module load time, or via
250 * linux/init/main.c (this is not currently supported). */
252 int tdfx_init(void)
254 int retcode;
255 drm_device_t *dev = &tdfx_device;
257 DRM_DEBUG("\n");
259 memset((void *)dev, 0, sizeof(*dev));
260 dev->count_lock = SPIN_LOCK_UNLOCKED;
261 sema_init(&dev->struct_sem, 1);
263 #ifdef MODULE
264 drm_parse_options(tdfx);
265 #endif
267 if ((retcode = misc_register(&tdfx_misc))) {
268 DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME);
269 return retcode;
271 dev->device = MKDEV(MISC_MAJOR, tdfx_misc.minor);
272 dev->name = TDFX_NAME;
274 drm_mem_init();
275 drm_proc_init(dev);
277 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
278 TDFX_NAME,
279 TDFX_MAJOR,
280 TDFX_MINOR,
281 TDFX_PATCHLEVEL,
282 TDFX_DATE,
283 tdfx_misc.minor);
285 return 0;
288 /* tdfx_cleanup is called via cleanup_module at module unload time. */
290 void tdfx_cleanup(void)
292 drm_device_t *dev = &tdfx_device;
294 DRM_DEBUG("\n");
296 drm_proc_cleanup();
297 if (misc_deregister(&tdfx_misc)) {
298 DRM_ERROR("Cannot unload module\n");
299 } else {
300 DRM_INFO("Module unloaded\n");
302 tdfx_takedown(dev);
305 int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
306 unsigned long arg)
308 drm_version_t version;
309 int len;
311 copy_from_user_ret(&version,
312 (drm_version_t *)arg,
313 sizeof(version),
314 -EFAULT);
316 #define DRM_COPY(name,value) \
317 len = strlen(value); \
318 if (len > name##_len) len = name##_len; \
319 name##_len = strlen(value); \
320 if (len && name) { \
321 copy_to_user_ret(name, value, len, -EFAULT); \
324 version.version_major = TDFX_MAJOR;
325 version.version_minor = TDFX_MINOR;
326 version.version_patchlevel = TDFX_PATCHLEVEL;
328 DRM_COPY(version.name, TDFX_NAME);
329 DRM_COPY(version.date, TDFX_DATE);
330 DRM_COPY(version.desc, TDFX_DESC);
332 copy_to_user_ret((drm_version_t *)arg,
333 &version,
334 sizeof(version),
335 -EFAULT);
336 return 0;
339 int tdfx_open(struct inode *inode, struct file *filp)
341 drm_device_t *dev = &tdfx_device;
342 int retcode = 0;
344 DRM_DEBUG("open_count = %d\n", dev->open_count);
345 if (!(retcode = drm_open_helper(inode, filp, dev))) {
346 MOD_INC_USE_COUNT;
347 atomic_inc(&dev->total_open);
348 spin_lock(&dev->count_lock);
349 if (!dev->open_count++) {
350 spin_unlock(&dev->count_lock);
351 return tdfx_setup(dev);
353 spin_unlock(&dev->count_lock);
355 return retcode;
358 int tdfx_release(struct inode *inode, struct file *filp)
360 drm_file_t *priv = filp->private_data;
361 drm_device_t *dev = priv->dev;
362 int retcode = 0;
364 DRM_DEBUG("open_count = %d\n", dev->open_count);
365 if (!(retcode = drm_release(inode, filp))) {
366 MOD_DEC_USE_COUNT;
367 atomic_inc(&dev->total_close);
368 spin_lock(&dev->count_lock);
369 if (!--dev->open_count) {
370 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
371 DRM_ERROR("Device busy: %d %d\n",
372 atomic_read(&dev->ioctl_count),
373 dev->blocked);
374 spin_unlock(&dev->count_lock);
375 return -EBUSY;
377 spin_unlock(&dev->count_lock);
378 return tdfx_takedown(dev);
380 spin_unlock(&dev->count_lock);
382 return retcode;
385 /* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
387 int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
388 unsigned long arg)
390 int nr = DRM_IOCTL_NR(cmd);
391 drm_file_t *priv = filp->private_data;
392 drm_device_t *dev = priv->dev;
393 int retcode = 0;
394 drm_ioctl_desc_t *ioctl;
395 drm_ioctl_t *func;
397 atomic_inc(&dev->ioctl_count);
398 atomic_inc(&dev->total_ioctl);
399 ++priv->ioctl_count;
401 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
402 current->pid, cmd, nr, dev->device, priv->authenticated);
404 if (nr >= TDFX_IOCTL_COUNT) {
405 retcode = -EINVAL;
406 } else {
407 ioctl = &tdfx_ioctls[nr];
408 func = ioctl->func;
410 if (!func) {
411 DRM_DEBUG("no function\n");
412 retcode = -EINVAL;
413 } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
414 || (ioctl->auth_needed && !priv->authenticated)) {
415 retcode = -EACCES;
416 } else {
417 retcode = (func)(inode, filp, cmd, arg);
421 atomic_dec(&dev->ioctl_count);
422 return retcode;
425 int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
426 unsigned long arg)
428 drm_file_t *priv = filp->private_data;
429 drm_device_t *dev = priv->dev;
430 DECLARE_WAITQUEUE(entry, current);
431 int ret = 0;
432 drm_lock_t lock;
433 #if DRM_DMA_HISTOGRAM
434 cycles_t start;
436 dev->lck_start = start = get_cycles();
437 #endif
439 copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
441 if (lock.context == DRM_KERNEL_CONTEXT) {
442 DRM_ERROR("Process %d using kernel context %d\n",
443 current->pid, lock.context);
444 return -EINVAL;
447 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
448 lock.context, current->pid, dev->lock.hw_lock->lock,
449 lock.flags);
451 #if 0
452 /* dev->queue_count == 0 right now for
453 tdfx. FIXME? */
454 if (lock.context < 0 || lock.context >= dev->queue_count)
455 return -EINVAL;
456 #endif
458 if (!ret) {
459 #if 0
460 if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
461 != lock.context) {
462 long j = jiffies - dev->lock.lock_time;
464 if (lock.context == tdfx_res_ctx.handle &&
465 j >= 0 && j < DRM_LOCK_SLICE) {
466 /* Can't take lock if we just had it and
467 there is contention. */
468 DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
469 lock.context, current->pid, j,
470 dev->lock.lock_time, jiffies);
471 current->state = TASK_INTERRUPTIBLE;
472 current->policy |= SCHED_YIELD;
473 schedule_timeout(DRM_LOCK_SLICE-j);
474 DRM_DEBUG("jiffies=%d\n", jiffies);
477 #endif
478 add_wait_queue(&dev->lock.lock_queue, &entry);
479 for (;;) {
480 if (!dev->lock.hw_lock) {
481 /* Device has been unregistered */
482 ret = -EINTR;
483 break;
485 if (drm_lock_take(&dev->lock.hw_lock->lock,
486 lock.context)) {
487 dev->lock.pid = current->pid;
488 dev->lock.lock_time = jiffies;
489 atomic_inc(&dev->total_locks);
490 break; /* Got lock */
493 /* Contention */
494 atomic_inc(&dev->total_sleeps);
495 current->state = TASK_INTERRUPTIBLE;
496 current->policy |= SCHED_YIELD;
497 schedule();
498 if (signal_pending(current)) {
499 ret = -ERESTARTSYS;
500 break;
503 current->state = TASK_RUNNING;
504 remove_wait_queue(&dev->lock.lock_queue, &entry);
507 #if 0
508 if (!ret && dev->last_context != lock.context &&
509 lock.context != tdfx_res_ctx.handle &&
510 dev->last_context != tdfx_res_ctx.handle) {
511 add_wait_queue(&dev->context_wait, &entry);
512 current->state = TASK_INTERRUPTIBLE;
513 /* PRE: dev->last_context != lock.context */
514 tdfx_context_switch(dev, dev->last_context, lock.context);
515 /* POST: we will wait for the context
516 switch and will dispatch on a later call
517 when dev->last_context == lock.context
518 NOTE WE HOLD THE LOCK THROUGHOUT THIS
519 TIME! */
520 current->policy |= SCHED_YIELD;
521 schedule();
522 current->state = TASK_RUNNING;
523 remove_wait_queue(&dev->context_wait, &entry);
524 if (signal_pending(current)) {
525 ret = -EINTR;
526 } else if (dev->last_context != lock.context) {
527 DRM_ERROR("Context mismatch: %d %d\n",
528 dev->last_context, lock.context);
531 #endif
533 if (!ret) {
534 if (lock.flags & _DRM_LOCK_READY) {
535 /* Wait for space in DMA/FIFO */
537 if (lock.flags & _DRM_LOCK_QUIESCENT) {
538 /* Make hardware quiescent */
539 #if 0
540 tdfx_quiescent(dev);
541 #endif
545 if (lock.context != tdfx_res_ctx.handle) {
546 current->counter = 5;
547 current->priority = DEF_PRIORITY/4;
550 DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
552 #if DRM_DMA_HISTOGRAM
553 atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
554 #endif
556 return ret;
560 int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
561 unsigned long arg)
563 drm_file_t *priv = filp->private_data;
564 drm_device_t *dev = priv->dev;
565 drm_lock_t lock;
567 copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
569 if (lock.context == DRM_KERNEL_CONTEXT) {
570 DRM_ERROR("Process %d using kernel context %d\n",
571 current->pid, lock.context);
572 return -EINVAL;
575 DRM_DEBUG("%d frees lock (%d holds)\n",
576 lock.context,
577 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
578 atomic_inc(&dev->total_unlocks);
579 if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
580 atomic_inc(&dev->total_contends);
581 drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
582 /* FIXME: Try to send data to card here */
583 if (!dev->context_flag) {
584 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
585 DRM_KERNEL_CONTEXT)) {
586 DRM_ERROR("\n");
590 if (lock.context != tdfx_res_ctx.handle) {
591 current->counter = 5;
592 current->priority = DEF_PRIORITY;
595 return 0;
598 module_init(tdfx_init);
599 module_exit(tdfx_cleanup);
601 #ifndef MODULE
603 * tdfx_setup is called by the kernel to parse command-line options passed
604 * via the boot-loader (e.g., LILO). It calls the insmod option routine,
605 * drm_parse_options.
607 static int __init tdfx_options(char *str)
609 drm_parse_options(str);
610 return 1;
613 __setup("tdfx=", tdfx_options);
614 #endif