Merge with Linux 2.4.0-test6-pre9.
[linux-2.6/linux-mips.git] / drivers / char / drm / i810_drv.c
blob275663a1bdb8bc46a9b6ecc22857cb4080a397d4
1 /* i810_drv.c -- I810 driver -*- linux-c -*-
2 * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
32 #include <linux/config.h>
33 #include "drmP.h"
34 #include "i810_drv.h"
36 #define I810_NAME "i810"
37 #define I810_DESC "Intel I810"
38 #define I810_DATE "20000719"
39 #define I810_MAJOR 1
40 #define I810_MINOR 1
41 #define I810_PATCHLEVEL 0
43 static drm_device_t i810_device;
44 drm_ctx_t i810_res_ctx;
46 static struct file_operations i810_fops = {
47 #if LINUX_VERSION_CODE >= 0x020400
48 /* This started being used during 2.4.0-test */
49 owner: THIS_MODULE,
50 #endif
51 open: i810_open,
52 flush: drm_flush,
53 release: i810_release,
54 ioctl: i810_ioctl,
55 mmap: drm_mmap,
56 read: drm_read,
57 fasync: drm_fasync,
58 poll: drm_poll,
61 static struct miscdevice i810_misc = {
62 minor: MISC_DYNAMIC_MINOR,
63 name: I810_NAME,
64 fops: &i810_fops,
67 static drm_ioctl_desc_t i810_ioctls[] = {
68 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { i810_version, 0, 0 },
69 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
70 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
71 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
73 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
75 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
76 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { i810_control, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
78 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
79 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { i810_addbufs, 1, 1 },
80 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { i810_markbufs, 1, 1 },
81 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { i810_infobufs, 1, 0 },
82 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { i810_freebufs, 1, 0 },
84 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { i810_addctx, 1, 1 },
85 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { i810_rmctx, 1, 1 },
86 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { i810_modctx, 1, 1 },
87 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { i810_getctx, 1, 0 },
88 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { i810_switchctx, 1, 1 },
89 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { i810_newctx, 1, 1 },
90 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { i810_resctx, 1, 0 },
91 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
92 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
94 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { i810_lock, 1, 0 },
95 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { i810_unlock, 1, 0 },
96 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
98 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
99 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
100 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
101 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
102 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
103 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
104 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
107 [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 },
108 [DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 },
109 [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 },
110 [DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl,1, 0 },
111 [DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 },
112 [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 },
113 [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 },
114 [DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 },
115 [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 },
118 #define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls)
120 #ifdef MODULE
121 static char *i810 = NULL;
122 #endif
124 MODULE_AUTHOR("VA Linux Systems, Inc.");
125 MODULE_DESCRIPTION("Intel I810");
126 MODULE_PARM(i810, "s");
128 #ifndef MODULE
129 /* i810_options is called by the kernel to parse command-line options
130 * passed via the boot-loader (e.g., LILO). It calls the insmod option
131 * routine, drm_parse_drm.
134 static int __init i810_options(char *str)
136 drm_parse_options(str);
137 return 1;
140 __setup("i810=", i810_options);
141 #endif
143 static int i810_setup(drm_device_t *dev)
145 int i;
147 atomic_set(&dev->ioctl_count, 0);
148 atomic_set(&dev->vma_count, 0);
149 dev->buf_use = 0;
150 atomic_set(&dev->buf_alloc, 0);
152 drm_dma_setup(dev);
154 atomic_set(&dev->total_open, 0);
155 atomic_set(&dev->total_close, 0);
156 atomic_set(&dev->total_ioctl, 0);
157 atomic_set(&dev->total_irq, 0);
158 atomic_set(&dev->total_ctx, 0);
159 atomic_set(&dev->total_locks, 0);
160 atomic_set(&dev->total_unlocks, 0);
161 atomic_set(&dev->total_contends, 0);
162 atomic_set(&dev->total_sleeps, 0);
164 for (i = 0; i < DRM_HASH_SIZE; i++) {
165 dev->magiclist[i].head = NULL;
166 dev->magiclist[i].tail = NULL;
168 dev->maplist = NULL;
169 dev->map_count = 0;
170 dev->vmalist = NULL;
171 dev->lock.hw_lock = NULL;
172 init_waitqueue_head(&dev->lock.lock_queue);
173 dev->queue_count = 0;
174 dev->queue_reserved = 0;
175 dev->queue_slots = 0;
176 dev->queuelist = NULL;
177 dev->irq = 0;
178 dev->context_flag = 0;
179 dev->interrupt_flag = 0;
180 dev->dma_flag = 0;
181 dev->last_context = 0;
182 dev->last_switch = 0;
183 dev->last_checked = 0;
184 init_timer(&dev->timer);
185 init_waitqueue_head(&dev->context_wait);
186 #if DRM_DMA_HISTO
187 memset(&dev->histo, 0, sizeof(dev->histo));
188 #endif
189 dev->ctx_start = 0;
190 dev->lck_start = 0;
192 dev->buf_rp = dev->buf;
193 dev->buf_wp = dev->buf;
194 dev->buf_end = dev->buf + DRM_BSZ;
195 dev->buf_async = NULL;
196 init_waitqueue_head(&dev->buf_readers);
197 init_waitqueue_head(&dev->buf_writers);
199 DRM_DEBUG("\n");
201 /* The kernel's context could be created here, but is now created
202 in drm_dma_enqueue. This is more resource-efficient for
203 hardware that does not do DMA, but may mean that
204 drm_select_queue fails between the time the interrupt is
205 initialized and the time the queues are initialized. */
207 return 0;
211 static int i810_takedown(drm_device_t *dev)
213 int i;
214 drm_magic_entry_t *pt, *next;
215 drm_map_t *map;
216 drm_vma_entry_t *vma, *vma_next;
218 DRM_DEBUG("\n");
220 if (dev->irq) i810_irq_uninstall(dev);
222 down(&dev->struct_sem);
223 del_timer(&dev->timer);
225 if (dev->devname) {
226 drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
227 dev->devname = NULL;
230 if (dev->unique) {
231 drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
232 dev->unique = NULL;
233 dev->unique_len = 0;
235 /* Clear pid list */
236 for (i = 0; i < DRM_HASH_SIZE; i++) {
237 for (pt = dev->magiclist[i].head; pt; pt = next) {
238 next = pt->next;
239 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
241 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
243 /* Clear AGP information */
244 if (dev->agp) {
245 drm_agp_mem_t *entry;
246 drm_agp_mem_t *nexte;
248 /* Remove AGP resources, but leave dev->agp
249 intact until r128_cleanup is called. */
250 for (entry = dev->agp->memory; entry; entry = nexte) {
251 nexte = entry->next;
252 if (entry->bound) drm_unbind_agp(entry->memory);
253 drm_free_agp(entry->memory, entry->pages);
254 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
256 dev->agp->memory = NULL;
258 if (dev->agp->acquired && drm_agp.release)
259 (*drm_agp.release)();
261 dev->agp->acquired = 0;
262 dev->agp->enabled = 0;
264 /* Clear vma list (only built for debugging) */
265 if (dev->vmalist) {
266 for (vma = dev->vmalist; vma; vma = vma_next) {
267 vma_next = vma->next;
268 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
270 dev->vmalist = NULL;
273 /* Clear map area and mtrr information */
274 if (dev->maplist) {
275 for (i = 0; i < dev->map_count; i++) {
276 map = dev->maplist[i];
277 switch (map->type) {
278 case _DRM_REGISTERS:
279 case _DRM_FRAME_BUFFER:
280 #ifdef CONFIG_MTRR
281 if (map->mtrr >= 0) {
282 int retcode;
283 retcode = mtrr_del(map->mtrr,
284 map->offset,
285 map->size);
286 DRM_DEBUG("mtrr_del = %d\n", retcode);
288 #endif
289 drm_ioremapfree(map->handle, map->size);
290 break;
291 case _DRM_SHM:
292 drm_free_pages((unsigned long)map->handle,
293 drm_order(map->size)
294 - PAGE_SHIFT,
295 DRM_MEM_SAREA);
296 break;
297 case _DRM_AGP:
298 break;
300 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
302 drm_free(dev->maplist,
303 dev->map_count * sizeof(*dev->maplist),
304 DRM_MEM_MAPS);
305 dev->maplist = NULL;
306 dev->map_count = 0;
309 if (dev->queuelist) {
310 for (i = 0; i < dev->queue_count; i++) {
311 drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
312 if (dev->queuelist[i]) {
313 drm_free(dev->queuelist[i],
314 sizeof(*dev->queuelist[0]),
315 DRM_MEM_QUEUES);
316 dev->queuelist[i] = NULL;
319 drm_free(dev->queuelist,
320 dev->queue_slots * sizeof(*dev->queuelist),
321 DRM_MEM_QUEUES);
322 dev->queuelist = NULL;
325 drm_dma_takedown(dev);
327 dev->queue_count = 0;
328 if (dev->lock.hw_lock) {
329 dev->lock.hw_lock = NULL; /* SHM removed */
330 dev->lock.pid = 0;
331 wake_up_interruptible(&dev->lock.lock_queue);
333 up(&dev->struct_sem);
335 return 0;
338 /* i810_init is called via init_module at module load time, or via
339 * linux/init/main.c (this is not currently supported). */
341 static int i810_init(void)
343 int retcode;
344 drm_device_t *dev = &i810_device;
346 DRM_DEBUG("\n");
348 memset((void *)dev, 0, sizeof(*dev));
349 dev->count_lock = SPIN_LOCK_UNLOCKED;
350 sema_init(&dev->struct_sem, 1);
352 #ifdef MODULE
353 drm_parse_options(i810);
354 #endif
355 DRM_DEBUG("doing misc_register\n");
356 if ((retcode = misc_register(&i810_misc))) {
357 DRM_ERROR("Cannot register \"%s\"\n", I810_NAME);
358 return retcode;
360 dev->device = MKDEV(MISC_MAJOR, i810_misc.minor);
361 dev->name = I810_NAME;
363 DRM_DEBUG("doing mem init\n");
364 drm_mem_init();
365 DRM_DEBUG("doing proc init\n");
366 drm_proc_init(dev);
367 DRM_DEBUG("doing agp init\n");
368 dev->agp = drm_agp_init();
369 if(dev->agp == NULL) {
370 DRM_INFO("The i810 drm module requires the agpgart module"
371 " to function correctly\nPlease load the agpgart"
372 " module before you load the i810 module\n");
373 drm_proc_cleanup();
374 misc_deregister(&i810_misc);
375 i810_takedown(dev);
376 return -ENOMEM;
378 DRM_DEBUG("doing ctxbitmap init\n");
379 if((retcode = drm_ctxbitmap_init(dev))) {
380 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
381 drm_proc_cleanup();
382 misc_deregister(&i810_misc);
383 i810_takedown(dev);
384 return retcode;
387 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
388 I810_NAME,
389 I810_MAJOR,
390 I810_MINOR,
391 I810_PATCHLEVEL,
392 I810_DATE,
393 i810_misc.minor);
395 return 0;
398 /* i810_cleanup is called via cleanup_module at module unload time. */
400 static void i810_cleanup(void)
402 drm_device_t *dev = &i810_device;
404 DRM_DEBUG("\n");
406 drm_proc_cleanup();
407 if (misc_deregister(&i810_misc)) {
408 DRM_ERROR("Cannot unload module\n");
409 } else {
410 DRM_INFO("Module unloaded\n");
412 drm_ctxbitmap_cleanup(dev);
413 i810_takedown(dev);
414 if (dev->agp) {
415 drm_agp_uninit();
416 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
417 dev->agp = NULL;
421 module_init(i810_init);
422 module_exit(i810_cleanup);
425 int i810_version(struct inode *inode, struct file *filp, unsigned int cmd,
426 unsigned long arg)
428 drm_version_t version;
429 int len;
431 copy_from_user_ret(&version,
432 (drm_version_t *)arg,
433 sizeof(version),
434 -EFAULT);
436 #define DRM_COPY(name,value) \
437 len = strlen(value); \
438 if (len > name##_len) len = name##_len; \
439 name##_len = strlen(value); \
440 if (len && name) { \
441 copy_to_user_ret(name, value, len, -EFAULT); \
444 version.version_major = I810_MAJOR;
445 version.version_minor = I810_MINOR;
446 version.version_patchlevel = I810_PATCHLEVEL;
448 DRM_COPY(version.name, I810_NAME);
449 DRM_COPY(version.date, I810_DATE);
450 DRM_COPY(version.desc, I810_DESC);
452 copy_to_user_ret((drm_version_t *)arg,
453 &version,
454 sizeof(version),
455 -EFAULT);
456 return 0;
459 int i810_open(struct inode *inode, struct file *filp)
461 drm_device_t *dev = &i810_device;
462 int retcode = 0;
464 DRM_DEBUG("open_count = %d\n", dev->open_count);
465 if (!(retcode = drm_open_helper(inode, filp, dev))) {
466 #if LINUX_VERSION_CODE < 0x020333
467 MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
468 #endif
469 atomic_inc(&dev->total_open);
470 spin_lock(&dev->count_lock);
471 if (!dev->open_count++) {
472 spin_unlock(&dev->count_lock);
473 return i810_setup(dev);
475 spin_unlock(&dev->count_lock);
477 return retcode;
480 int i810_release(struct inode *inode, struct file *filp)
482 drm_file_t *priv = filp->private_data;
483 drm_device_t *dev;
484 int retcode = 0;
486 lock_kernel();
487 dev = priv->dev;
488 DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n",
489 current->pid, dev->device, dev->open_count);
491 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
492 && dev->lock.pid == current->pid) {
493 i810_reclaim_buffers(dev, priv->pid);
494 DRM_ERROR("Process %d dead, freeing lock for context %d\n",
495 current->pid,
496 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
497 drm_lock_free(dev,
498 &dev->lock.hw_lock->lock,
499 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
501 /* FIXME: may require heavy-handed reset of
502 hardware at this point, possibly
503 processed via a callback to the X
504 server. */
505 } else if (dev->lock.hw_lock) {
506 /* The lock is required to reclaim buffers */
507 DECLARE_WAITQUEUE(entry, current);
508 add_wait_queue(&dev->lock.lock_queue, &entry);
509 for (;;) {
510 if (!dev->lock.hw_lock) {
511 /* Device has been unregistered */
512 retcode = -EINTR;
513 break;
515 if (drm_lock_take(&dev->lock.hw_lock->lock,
516 DRM_KERNEL_CONTEXT)) {
517 dev->lock.pid = priv->pid;
518 dev->lock.lock_time = jiffies;
519 atomic_inc(&dev->total_locks);
520 break; /* Got lock */
522 /* Contention */
523 atomic_inc(&dev->total_sleeps);
524 current->state = TASK_INTERRUPTIBLE;
525 schedule();
526 if (signal_pending(current)) {
527 retcode = -ERESTARTSYS;
528 break;
531 current->state = TASK_RUNNING;
532 remove_wait_queue(&dev->lock.lock_queue, &entry);
533 if(!retcode) {
534 i810_reclaim_buffers(dev, priv->pid);
535 drm_lock_free(dev, &dev->lock.hw_lock->lock,
536 DRM_KERNEL_CONTEXT);
539 drm_fasync(-1, filp, 0);
541 down(&dev->struct_sem);
542 if (priv->prev) priv->prev->next = priv->next;
543 else dev->file_first = priv->next;
544 if (priv->next) priv->next->prev = priv->prev;
545 else dev->file_last = priv->prev;
546 up(&dev->struct_sem);
548 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
549 #if LINUX_VERSION_CODE < 0x020333
550 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
551 #endif
552 atomic_inc(&dev->total_close);
553 spin_lock(&dev->count_lock);
554 if (!--dev->open_count) {
555 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
556 DRM_ERROR("Device busy: %d %d\n",
557 atomic_read(&dev->ioctl_count),
558 dev->blocked);
559 spin_unlock(&dev->count_lock);
560 unlock_kernel();
561 return -EBUSY;
563 spin_unlock(&dev->count_lock);
564 unlock_kernel();
565 return i810_takedown(dev);
567 spin_unlock(&dev->count_lock);
568 unlock_kernel();
569 return retcode;
572 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
574 int i810_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
575 unsigned long arg)
577 int nr = DRM_IOCTL_NR(cmd);
578 drm_file_t *priv = filp->private_data;
579 drm_device_t *dev = priv->dev;
580 int retcode = 0;
581 drm_ioctl_desc_t *ioctl;
582 drm_ioctl_t *func;
584 atomic_inc(&dev->ioctl_count);
585 atomic_inc(&dev->total_ioctl);
586 ++priv->ioctl_count;
588 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
589 current->pid, cmd, nr, dev->device, priv->authenticated);
591 if (nr >= I810_IOCTL_COUNT) {
592 retcode = -EINVAL;
593 } else {
594 ioctl = &i810_ioctls[nr];
595 func = ioctl->func;
597 if (!func) {
598 DRM_DEBUG("no function\n");
599 retcode = -EINVAL;
600 } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
601 || (ioctl->auth_needed && !priv->authenticated)) {
602 retcode = -EACCES;
603 } else {
604 retcode = (func)(inode, filp, cmd, arg);
608 atomic_dec(&dev->ioctl_count);
609 return retcode;
612 int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
613 unsigned long arg)
615 drm_file_t *priv = filp->private_data;
616 drm_device_t *dev = priv->dev;
617 drm_lock_t lock;
619 copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
621 if (lock.context == DRM_KERNEL_CONTEXT) {
622 DRM_ERROR("Process %d using kernel context %d\n",
623 current->pid, lock.context);
624 return -EINVAL;
627 DRM_DEBUG("%d frees lock (%d holds)\n",
628 lock.context,
629 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
630 atomic_inc(&dev->total_unlocks);
631 if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
632 atomic_inc(&dev->total_contends);
633 drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
634 if (!dev->context_flag) {
635 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
636 DRM_KERNEL_CONTEXT)) {
637 DRM_ERROR("\n");
640 #if DRM_DMA_HISTOGRAM
641 atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles()
642 - dev->lck_start)]);
643 #endif
645 return 0;