Import 2.4.0-test2pre7
[davej-history.git] / drivers / char / drm / ffb_drv.c
bloba1ab1f27953f3f0aad939348ee7f82c8bd42f147
1 /* $Id: ffb_drv.c,v 1.3 2000/06/01 04:24:39 davem Exp $
2 * ffb_drv.c: Creator/Creator3D direct rendering driver.
4 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
5 */
7 #include "drmP.h"
9 #include <asm/oplib.h>
10 #include <asm/upa.h>
12 #include "ffb_drv.h"
14 #define FFB_NAME "ffb"
15 #define FFB_DESC "Creator/Creator3D"
16 #define FFB_DATE "20000517"
17 #define FFB_MAJOR 0
18 #define FFB_MINOR 0
19 #define FFB_PATCHLEVEL 1
21 /* Forward declarations. */
22 int ffb_init(void);
23 void ffb_cleanup(void);
24 static int ffb_version(struct inode *inode, struct file *filp,
25 unsigned int cmd, unsigned long arg);
26 static int ffb_open(struct inode *inode, struct file *filp);
27 static int ffb_release(struct inode *inode, struct file *filp);
28 static int ffb_ioctl(struct inode *inode, struct file *filp,
29 unsigned int cmd, unsigned long arg);
30 static int ffb_lock(struct inode *inode, struct file *filp,
31 unsigned int cmd, unsigned long arg);
32 static int ffb_unlock(struct inode *inode, struct file *filp,
33 unsigned int cmd, unsigned long arg);
34 static int ffb_mmap(struct file *filp, struct vm_area_struct *vma);
36 /* From ffb_context.c */
37 extern int ffb_resctx(struct inode *, struct file *, unsigned int, unsigned long);
38 extern int ffb_addctx(struct inode *, struct file *, unsigned int, unsigned long);
39 extern int ffb_modctx(struct inode *, struct file *, unsigned int, unsigned long);
40 extern int ffb_getctx(struct inode *, struct file *, unsigned int, unsigned long);
41 extern int ffb_switchctx(struct inode *, struct file *, unsigned int, unsigned long);
42 extern int ffb_newctx(struct inode *, struct file *, unsigned int, unsigned long);
43 extern int ffb_rmctx(struct inode *, struct file *, unsigned int, unsigned long);
44 extern int ffb_context_switch(drm_device_t *, int, int);
46 static struct file_operations ffb_fops = {
47 open: ffb_open,
48 flush: drm_flush,
49 release: ffb_release,
50 ioctl: ffb_ioctl,
51 mmap: ffb_mmap,
52 read: drm_read,
53 fasync: drm_fasync,
54 poll: drm_poll,
57 /* This is just a template, we make a new copy for each FFB
58 * we discover at init time so that each one gets a unique
59 * misc device minor number.
61 static struct miscdevice ffb_misc = {
62 minor: MISC_DYNAMIC_MINOR,
63 name: FFB_NAME,
64 fops: &ffb_fops,
67 static drm_ioctl_desc_t ffb_ioctls[] = {
68 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { ffb_version, 0, 0 },
69 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
70 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
71 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, /* XXX */
73 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
75 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
76 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
79 /* The implementation is currently a nop just like on tdfx.
80 * Later we can do something more clever. -DaveM
82 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { ffb_addctx, 1, 1 },
83 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { ffb_rmctx, 1, 1 },
84 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { ffb_modctx, 1, 1 },
85 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { ffb_getctx, 1, 0 },
86 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { ffb_switchctx, 1, 1 },
87 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { ffb_newctx, 1, 1 },
88 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { ffb_resctx, 1, 0 },
90 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
91 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
93 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { ffb_lock, 1, 0 },
94 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { ffb_unlock, 1, 0 },
95 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
97 #define FFB_IOCTL_COUNT DRM_ARRAY_SIZE(ffb_ioctls)
99 #ifdef MODULE
100 static char *ffb = NULL;
101 #endif
103 MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
104 MODULE_DESCRIPTION("Sun Creator/Creator3D DRI");
106 static int ffb_takedown(drm_device_t *dev)
108 int i;
109 drm_magic_entry_t *pt, *next;
110 drm_map_t *map;
111 drm_vma_entry_t *vma, *vma_next;
113 DRM_DEBUG("\n");
115 down(&dev->struct_sem);
116 del_timer(&dev->timer);
118 if (dev->devname) {
119 drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
120 dev->devname = NULL;
123 if (dev->unique) {
124 drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
125 dev->unique = NULL;
126 dev->unique_len = 0;
129 /* Clear pid list */
130 for (i = 0; i < DRM_HASH_SIZE; i++) {
131 for (pt = dev->magiclist[i].head; pt; pt = next) {
132 next = pt->next;
133 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
135 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
138 /* Clear vma list (only built for debugging) */
139 if (dev->vmalist) {
140 for (vma = dev->vmalist; vma; vma = vma_next) {
141 vma_next = vma->next;
142 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
144 dev->vmalist = NULL;
147 /* Clear map area information */
148 if (dev->maplist) {
149 for (i = 0; i < dev->map_count; i++) {
150 map = dev->maplist[i];
151 switch (map->type) {
152 case _DRM_REGISTERS:
153 case _DRM_FRAME_BUFFER:
154 drm_ioremapfree(map->handle, map->size);
155 break;
157 case _DRM_SHM:
158 drm_free_pages((unsigned long)map->handle,
159 drm_order(map->size)
160 - PAGE_SHIFT,
161 DRM_MEM_SAREA);
162 break;
165 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
168 drm_free(dev->maplist,
169 dev->map_count * sizeof(*dev->maplist),
170 DRM_MEM_MAPS);
171 dev->maplist = NULL;
172 dev->map_count = 0;
175 if (dev->lock.hw_lock) {
176 dev->lock.hw_lock = NULL; /* SHM removed */
177 dev->lock.pid = 0;
178 wake_up_interruptible(&dev->lock.lock_queue);
180 up(&dev->struct_sem);
182 return 0;
185 drm_device_t **ffb_dev_table;
186 static int ffb_dev_table_size;
188 static void get_ffb_type(ffb_dev_priv_t *ffb_priv, int instance)
190 volatile unsigned char *strap_bits;
191 unsigned char val;
193 strap_bits = (volatile unsigned char *)
194 (ffb_priv->card_phys_base + 0x00200000UL);
196 /* Don't ask, you have to read the value twice for whatever
197 * reason to get correct contents.
199 val = upa_readb(strap_bits);
200 val = upa_readb(strap_bits);
201 switch (val & 0x78) {
202 case (0x0 << 5) | (0x0 << 3):
203 ffb_priv->ffb_type = ffb1_prototype;
204 printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance);
205 break;
206 case (0x0 << 5) | (0x1 << 3):
207 ffb_priv->ffb_type = ffb1_standard;
208 printk("ffb%d: Detected FFB1\n", instance);
209 break;
210 case (0x0 << 5) | (0x3 << 3):
211 ffb_priv->ffb_type = ffb1_speedsort;
212 printk("ffb%d: Detected FFB1-SpeedSort\n", instance);
213 break;
214 case (0x1 << 5) | (0x0 << 3):
215 ffb_priv->ffb_type = ffb2_prototype;
216 printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n", instance);
217 break;
218 case (0x1 << 5) | (0x1 << 3):
219 ffb_priv->ffb_type = ffb2_vertical;
220 printk("ffb%d: Detected FFB2/vertical\n", instance);
221 break;
222 case (0x1 << 5) | (0x2 << 3):
223 ffb_priv->ffb_type = ffb2_vertical_plus;
224 printk("ffb%d: Detected FFB2+/vertical\n", instance);
225 break;
226 case (0x2 << 5) | (0x0 << 3):
227 ffb_priv->ffb_type = ffb2_horizontal;
228 printk("ffb%d: Detected FFB2/horizontal\n", instance);
229 break;
230 case (0x2 << 5) | (0x2 << 3):
231 ffb_priv->ffb_type = ffb2_horizontal;
232 printk("ffb%d: Detected FFB2+/horizontal\n", instance);
233 break;
234 default:
235 ffb_priv->ffb_type = ffb2_vertical;
236 printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n", instance, val);
237 break;
241 static int ffb_init_one(int prom_node, int instance)
243 struct linux_prom64_registers regs[2*PROMREG_MAX];
244 drm_device_t *dev;
245 ffb_dev_priv_t *ffb_priv;
246 int ret, i;
248 dev = kmalloc(sizeof(drm_device_t) + sizeof(ffb_dev_priv_t), GFP_KERNEL);
249 if (!dev)
250 return -ENOMEM;
252 memset(dev, 0, sizeof(*dev));
253 spin_lock_init(&dev->count_lock);
254 sema_init(&dev->struct_sem, 1);
256 ffb_priv = (ffb_dev_priv_t *) (dev + 1);
257 ffb_priv->prom_node = prom_node;
258 if (prom_getproperty(ffb_priv->prom_node, "reg",
259 (void *)regs, sizeof(regs)) <= 0) {
260 kfree(dev);
261 return -EINVAL;
263 ffb_priv->card_phys_base = regs[0].phys_addr;
264 ffb_priv->regs = (ffb_fbcPtr)
265 (regs[0].phys_addr + 0x00600000UL);
266 get_ffb_type(ffb_priv, instance);
267 for (i = 0; i < FFB_MAX_CTXS; i++)
268 ffb_priv->hw_state[i] = NULL;
270 ffb_dev_table[instance] = dev;
272 #ifdef MODULE
273 drm_parse_options(ffb);
274 #endif
276 memcpy(&ffb_priv->miscdev, &ffb_misc, sizeof(ffb_misc));
277 ret = misc_register(&ffb_priv->miscdev);
278 if (ret) {
279 ffb_dev_table[instance] = NULL;
280 kfree(dev);
281 return ret;
284 dev->device = MKDEV(MISC_MAJOR, ffb_priv->miscdev.minor);
285 dev->name = FFB_NAME;
287 drm_mem_init();
288 drm_proc_init(dev);
290 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d at %016lx\n",
291 FFB_NAME,
292 FFB_MAJOR,
293 FFB_MINOR,
294 FFB_PATCHLEVEL,
295 FFB_DATE,
296 ffb_priv->miscdev.minor,
297 ffb_priv->card_phys_base);
299 return 0;
302 static int ffb_init_dev_table(void)
304 int root, node;
305 int total = 0;
307 root = prom_getchild(prom_root_node);
308 for (node = prom_searchsiblings(root, "SUNW,ffb"); node;
309 node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb"))
310 total++;
312 ffb_dev_table = kmalloc(sizeof(drm_device_t *) * total, GFP_KERNEL);
313 if (!ffb_dev_table)
314 return -ENOMEM;
316 ffb_dev_table_size = total;
318 return 0;
321 int ffb_init(void)
323 int root, node, instance, ret;
325 ret = ffb_init_dev_table();
326 if (ret)
327 return ret;
329 instance = 0;
330 root = prom_getchild(prom_root_node);
331 for (node = prom_searchsiblings(root, "SUNW,ffb"); node;
332 node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) {
333 ret = ffb_init_one(node, instance);
334 if (ret)
335 return ret;
336 instance++;
339 return 0;
342 void ffb_cleanup(void)
344 int instance;
346 DRM_DEBUG("\n");
348 drm_proc_cleanup();
349 for (instance = 0; instance < ffb_dev_table_size; instance++) {
350 drm_device_t *dev = ffb_dev_table[instance];
351 ffb_dev_priv_t *ffb_priv;
353 if (!dev)
354 continue;
356 ffb_priv = (ffb_dev_priv_t *) (dev + 1);
357 if (misc_deregister(&ffb_priv->miscdev)) {
358 DRM_ERROR("Cannot unload module\n");
359 } else {
360 DRM_INFO("Module unloaded\n");
362 ffb_takedown(dev);
363 kfree(dev);
364 ffb_dev_table[instance] = NULL;
366 kfree(ffb_dev_table);
367 ffb_dev_table = NULL;
368 ffb_dev_table_size = 0;
371 static int ffb_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
373 drm_version_t version;
374 int len, ret;
376 ret = copy_from_user(&version, (drm_version_t *)arg, sizeof(version));
377 if (ret)
378 return -EFAULT;
380 version.version_major = FFB_MAJOR;
381 version.version_minor = FFB_MINOR;
382 version.version_patchlevel = FFB_PATCHLEVEL;
384 len = strlen(FFB_NAME);
385 if (len > version.name_len)
386 len = version.name_len;
387 version.name_len = len;
388 if (len && version.name) {
389 ret = copy_to_user(version.name, FFB_NAME, len);
390 if (ret)
391 return -EFAULT;
394 len = strlen(FFB_DATE);
395 if (len > version.date_len)
396 len = version.date_len;
397 version.date_len = len;
398 if (len && version.date) {
399 ret = copy_to_user(version.date, FFB_DATE, len);
400 if (ret)
401 return -EFAULT;
404 len = strlen(FFB_DESC);
405 if (len > version.desc_len)
406 len = version.desc_len;
407 version.desc_len = len;
408 if (len && version.desc) {
409 ret = copy_to_user(version.desc, FFB_DESC, len);
410 if (ret)
411 return -EFAULT;
414 ret = copy_to_user((drm_version_t *) arg, &version, sizeof(version));
415 if (ret)
416 ret = -EFAULT;
418 return ret;
421 static int ffb_setup(drm_device_t *dev)
423 int i;
425 atomic_set(&dev->ioctl_count, 0);
426 atomic_set(&dev->vma_count, 0);
427 dev->buf_use = 0;
428 atomic_set(&dev->buf_alloc, 0);
430 atomic_set(&dev->total_open, 0);
431 atomic_set(&dev->total_close, 0);
432 atomic_set(&dev->total_ioctl, 0);
433 atomic_set(&dev->total_irq, 0);
434 atomic_set(&dev->total_ctx, 0);
435 atomic_set(&dev->total_locks, 0);
436 atomic_set(&dev->total_unlocks, 0);
437 atomic_set(&dev->total_contends, 0);
438 atomic_set(&dev->total_sleeps, 0);
440 for (i = 0; i < DRM_HASH_SIZE; i++) {
441 dev->magiclist[i].head = NULL;
442 dev->magiclist[i].tail = NULL;
445 dev->maplist = NULL;
446 dev->map_count = 0;
447 dev->vmalist = NULL;
448 dev->lock.hw_lock = NULL;
449 init_waitqueue_head(&dev->lock.lock_queue);
450 dev->queue_count = 0;
451 dev->queue_reserved = 0;
452 dev->queue_slots = 0;
453 dev->queuelist = NULL;
454 dev->irq = 0;
455 dev->context_flag = 0;
456 dev->interrupt_flag = 0;
457 dev->dma = 0;
458 dev->dma_flag = 0;
459 dev->last_context = 0;
460 dev->last_switch = 0;
461 dev->last_checked = 0;
462 init_timer(&dev->timer);
463 init_waitqueue_head(&dev->context_wait);
465 dev->ctx_start = 0;
466 dev->lck_start = 0;
468 dev->buf_rp = dev->buf;
469 dev->buf_wp = dev->buf;
470 dev->buf_end = dev->buf + DRM_BSZ;
471 dev->buf_async = NULL;
472 init_waitqueue_head(&dev->buf_readers);
473 init_waitqueue_head(&dev->buf_writers);
475 return 0;
478 static int ffb_open(struct inode *inode, struct file *filp)
480 drm_device_t *dev;
481 int minor, i;
482 int ret = 0;
484 minor = MINOR(inode->i_rdev);
485 for (i = 0; i < ffb_dev_table_size; i++) {
486 ffb_dev_priv_t *ffb_priv;
488 ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1);
490 if (ffb_priv->miscdev.minor == minor)
491 break;
494 if (i >= ffb_dev_table_size)
495 return -EINVAL;
497 dev = ffb_dev_table[i];
498 if (!dev)
499 return -EINVAL;
501 DRM_DEBUG("open_count = %d\n", dev->open_count);
502 ret = drm_open_helper(inode, filp, dev);
503 if (!ret) {
504 MOD_INC_USE_COUNT;
505 atomic_inc(&dev->total_open);
506 spin_lock(&dev->count_lock);
507 if (!dev->open_count++) {
508 spin_unlock(&dev->count_lock);
509 return ffb_setup(dev);
511 spin_unlock(&dev->count_lock);
514 return ret;
517 static int ffb_release(struct inode *inode, struct file *filp)
519 drm_file_t *priv = filp->private_data;
520 drm_device_t *dev = priv->dev;
521 int ret = 0;
523 DRM_DEBUG("open_count = %d\n", dev->open_count);
524 if (dev->lock.hw_lock != NULL
525 && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
526 && dev->lock.pid == current->pid) {
527 ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) (dev + 1);
528 int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);
529 int idx;
531 /* We have to free up the rogue hw context state
532 * holding error or else we will leak it.
534 idx = context - 1;
535 if (fpriv->hw_state[idx] != NULL) {
536 kfree(fpriv->hw_state[idx]);
537 fpriv->hw_state[idx] = NULL;
541 ret = drm_release(inode, filp);
543 if (!ret) {
544 MOD_DEC_USE_COUNT;
545 atomic_inc(&dev->total_close);
546 spin_lock(&dev->count_lock);
547 if (!--dev->open_count) {
548 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
549 DRM_ERROR("Device busy: %d %d\n",
550 atomic_read(&dev->ioctl_count),
551 dev->blocked);
552 spin_unlock(&dev->count_lock);
553 return -EBUSY;
555 spin_unlock(&dev->count_lock);
556 return ffb_takedown(dev);
558 spin_unlock(&dev->count_lock);
561 return ret;
564 static int ffb_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
566 int nr = DRM_IOCTL_NR(cmd);
567 drm_file_t *priv = filp->private_data;
568 drm_device_t *dev = priv->dev;
569 drm_ioctl_desc_t *ioctl;
570 drm_ioctl_t *func;
571 int ret;
573 atomic_inc(&dev->ioctl_count);
574 atomic_inc(&dev->total_ioctl);
575 ++priv->ioctl_count;
577 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
578 current->pid, cmd, nr, dev->device, priv->authenticated);
580 if (nr >= FFB_IOCTL_COUNT) {
581 ret = -EINVAL;
582 } else {
583 ioctl = &ffb_ioctls[nr];
584 func = ioctl->func;
586 if (!func) {
587 DRM_DEBUG("no function\n");
588 ret = -EINVAL;
589 } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
590 || (ioctl->auth_needed && !priv->authenticated)) {
591 ret = -EACCES;
592 } else {
593 ret = (func)(inode, filp, cmd, arg);
597 atomic_dec(&dev->ioctl_count);
599 return ret;
602 static int ffb_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
604 drm_file_t *priv = filp->private_data;
605 drm_device_t *dev = priv->dev;
606 DECLARE_WAITQUEUE(entry, current);
607 int ret = 0;
608 drm_lock_t lock;
610 ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock));
611 if (ret)
612 return -EFAULT;
614 if (lock.context == DRM_KERNEL_CONTEXT) {
615 DRM_ERROR("Process %d using kernel context %d\n",
616 current->pid, lock.context);
617 return -EINVAL;
620 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
621 lock.context, current->pid, dev->lock.hw_lock->lock,
622 lock.flags);
624 add_wait_queue(&dev->lock.lock_queue, &entry);
625 for (;;) {
626 if (!dev->lock.hw_lock) {
627 /* Device has been unregistered */
628 ret = -EINTR;
629 break;
631 if (drm_lock_take(&dev->lock.hw_lock->lock,
632 lock.context)) {
633 dev->lock.pid = current->pid;
634 dev->lock.lock_time = jiffies;
635 atomic_inc(&dev->total_locks);
636 break; /* Got lock */
639 /* Contention */
640 atomic_inc(&dev->total_sleeps);
641 current->state = TASK_INTERRUPTIBLE;
642 current->policy |= SCHED_YIELD;
643 schedule();
644 if (signal_pending(current)) {
645 ret = -ERESTARTSYS;
646 break;
649 current->state = TASK_RUNNING;
650 remove_wait_queue(&dev->lock.lock_queue, &entry);
652 if (!ret &&
653 (dev->last_context != lock.context))
654 ffb_context_switch(dev, dev->last_context, lock.context);
656 DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
658 return ret;
661 int ffb_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
663 drm_file_t *priv = filp->private_data;
664 drm_device_t *dev = priv->dev;
665 drm_lock_t lock;
666 unsigned int old, new, prev, ctx;
667 int ret;
669 ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock));
670 if (ret)
671 return -EFAULT;
673 if ((ctx = lock.context) == DRM_KERNEL_CONTEXT) {
674 DRM_ERROR("Process %d using kernel context %d\n",
675 current->pid, lock.context);
676 return -EINVAL;
679 DRM_DEBUG("%d frees lock (%d holds)\n",
680 lock.context,
681 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
682 atomic_inc(&dev->total_unlocks);
683 if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
684 atomic_inc(&dev->total_contends);
686 /* We no longer really hold it, but if we are the next
687 * agent to request it then we should just be able to
688 * take it immediately and not eat the ioctl.
690 dev->lock.pid = 0;
692 __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
694 do {
695 old = *plock;
696 new = ctx;
697 prev = cmpxchg(plock, old, new);
698 } while (prev != old);
701 wake_up_interruptible(&dev->lock.lock_queue);
703 return 0;
706 static void align_fb_mapping(struct vm_area_struct *vma)
708 unsigned long j, alignment;
710 j = vma->vm_end - vma->vm_start;
711 for (alignment = (4 * 1024 * 1024); alignment > PAGE_SIZE; alignment >>= 3)
712 if (j >= alignment)
713 break;
714 if (alignment > PAGE_SIZE) {
715 j = alignment;
716 alignment = j - (vma->vm_start & (j - 1));
717 if (alignment != j) {
718 struct vm_area_struct *vmm = find_vma(current->mm,vma->vm_start);
720 if (!vmm || vmm->vm_start >= vma->vm_end + alignment) {
721 vma->vm_start += alignment;
722 vma->vm_end += alignment;
728 /* The problem here is, due to virtual cache aliasing,
729 * we must make sure the shared memory area lands in the
730 * same dcache line for both the kernel and all drm clients.
732 static void align_shm_mapping(struct vm_area_struct *vma, unsigned long kvirt)
734 kvirt &= PAGE_SIZE;
735 if ((vma->vm_start & PAGE_SIZE) != kvirt) {
736 struct vm_area_struct *vmm = find_vma(current->mm, vma->vm_start);
738 if (!vmm || vmm->vm_start >= vma->vm_end + PAGE_SIZE) {
739 vma->vm_start += PAGE_SIZE;
740 vma->vm_end += PAGE_SIZE;
745 extern struct vm_operations_struct drm_vm_ops;
746 extern struct vm_operations_struct drm_vm_shm_ops;
748 static int ffb_mmap(struct file *filp, struct vm_area_struct *vma)
750 drm_file_t *priv = filp->private_data;
751 drm_device_t *dev = priv->dev;
752 drm_map_t *map = NULL;
753 ffb_dev_priv_t *ffb_priv;
754 int i, minor;
756 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
757 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
759 minor = MINOR(filp->f_dentry->d_inode->i_rdev);
760 ffb_priv = NULL;
761 for (i = 0; i < ffb_dev_table_size; i++) {
762 ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1);
763 if (ffb_priv->miscdev.minor == minor)
764 break;
766 if (i >= ffb_dev_table_size)
767 return -EINVAL;
769 /* We don't support/need dma mappings, so... */
770 if (!VM_OFFSET(vma))
771 return -EINVAL;
773 for (i = 0; i < dev->map_count; i++) {
774 unsigned long off;
776 map = dev->maplist[i];
778 /* Ok, a little hack to make 32-bit apps work. */
779 off = (map->offset & 0xffffffff);
780 if (off == VM_OFFSET(vma))
781 break;
784 if (i >= dev->map_count)
785 return -EINVAL;
787 if (!map ||
788 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
789 return -EPERM;
791 if (map->size != (vma->vm_end - vma->vm_start))
792 return -EINVAL;
794 /* Set read-only attribute before mappings are created
795 * so it works for fb/reg maps too.
797 if (map->flags & _DRM_READ_ONLY)
798 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
799 __pte(pgprot_val(vma->vm_page_prot)))));
801 switch (map->type) {
802 case _DRM_FRAME_BUFFER:
803 align_fb_mapping(vma);
804 /* FALLTHROUGH */
806 case _DRM_REGISTERS:
807 /* In order to handle 32-bit drm apps/xserver we
808 * play a trick. The mappings only really specify
809 * the 32-bit offset from the cards 64-bit base
810 * address, and we just add in the base here.
812 vma->vm_flags |= VM_IO;
813 if (io_remap_page_range(vma->vm_start,
814 ffb_priv->card_phys_base + VM_OFFSET(vma),
815 vma->vm_end - vma->vm_start,
816 vma->vm_page_prot, 0))
817 return -EAGAIN;
819 vma->vm_ops = &drm_vm_ops;
820 break;
821 case _DRM_SHM:
822 align_shm_mapping(vma, (unsigned long)dev->lock.hw_lock);
823 vma->vm_ops = &drm_vm_shm_ops;
825 /* Don't let this area swap. Change when
826 * DRM_KERNEL advisory is supported.
828 vma->vm_flags |= VM_LOCKED;
829 break;
830 default:
831 return -EINVAL; /* This should never happen. */
834 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
836 vma->vm_file = filp; /* Needed for drm_vm_open() */
837 drm_vm_open(vma);
838 return 0;
841 module_init(ffb_init);
842 module_exit(ffb_cleanup);