Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / char / drm / ffb_drv.c
blobcf9a9f5d9a479a627c17cb0355f4fc6b63f9997a
1 /* $Id: ffb_drv.c,v 1.7 2000/11/12 10:01:41 davem Exp $
2 * ffb_drv.c: Creator/Creator3D direct rendering driver.
4 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
5 */
7 #include "drmP.h"
9 #include <linux/sched.h>
10 #include <linux/smp_lock.h>
11 #include <asm/oplib.h>
12 #include <asm/upa.h>
14 #include "ffb_drv.h"
16 #define FFB_NAME "ffb"
17 #define FFB_DESC "Creator/Creator3D"
18 #define FFB_DATE "20000517"
19 #define FFB_MAJOR 0
20 #define FFB_MINOR 0
21 #define FFB_PATCHLEVEL 1
23 /* Forward declarations. */
24 int ffb_init(void);
25 void ffb_cleanup(void);
26 static int ffb_version(struct inode *inode, struct file *filp,
27 unsigned int cmd, unsigned long arg);
28 static int ffb_open(struct inode *inode, struct file *filp);
29 static int ffb_release(struct inode *inode, struct file *filp);
30 static int ffb_ioctl(struct inode *inode, struct file *filp,
31 unsigned int cmd, unsigned long arg);
32 static int ffb_lock(struct inode *inode, struct file *filp,
33 unsigned int cmd, unsigned long arg);
34 static int ffb_unlock(struct inode *inode, struct file *filp,
35 unsigned int cmd, unsigned long arg);
36 static int ffb_mmap(struct file *filp, struct vm_area_struct *vma);
38 /* From ffb_context.c */
39 extern int ffb_resctx(struct inode *, struct file *, unsigned int, unsigned long);
40 extern int ffb_addctx(struct inode *, struct file *, unsigned int, unsigned long);
41 extern int ffb_modctx(struct inode *, struct file *, unsigned int, unsigned long);
42 extern int ffb_getctx(struct inode *, struct file *, unsigned int, unsigned long);
43 extern int ffb_switchctx(struct inode *, struct file *, unsigned int, unsigned long);
44 extern int ffb_newctx(struct inode *, struct file *, unsigned int, unsigned long);
45 extern int ffb_rmctx(struct inode *, struct file *, unsigned int, unsigned long);
46 extern int ffb_context_switch(drm_device_t *, int, int);
48 static struct file_operations ffb_fops = {
49 owner: THIS_MODULE,
50 open: ffb_open,
51 flush: drm_flush,
52 release: ffb_release,
53 ioctl: ffb_ioctl,
54 mmap: ffb_mmap,
55 read: drm_read,
56 fasync: drm_fasync,
57 poll: drm_poll,
60 /* This is just a template, we make a new copy for each FFB
61 * we discover at init time so that each one gets a unique
62 * misc device minor number.
64 static struct miscdevice ffb_misc = {
65 minor: MISC_DYNAMIC_MINOR,
66 name: FFB_NAME,
67 fops: &ffb_fops,
70 static drm_ioctl_desc_t ffb_ioctls[] = {
71 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { ffb_version, 0, 0 },
72 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
73 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
74 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, /* XXX */
76 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
78 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
79 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
80 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
82 /* The implementation is currently a nop just like on tdfx.
83 * Later we can do something more clever. -DaveM
85 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { ffb_addctx, 1, 1 },
86 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { ffb_rmctx, 1, 1 },
87 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { ffb_modctx, 1, 1 },
88 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { ffb_getctx, 1, 0 },
89 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { ffb_switchctx, 1, 1 },
90 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { ffb_newctx, 1, 1 },
91 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { ffb_resctx, 1, 0 },
93 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
94 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
96 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { ffb_lock, 1, 0 },
97 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { ffb_unlock, 1, 0 },
98 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
100 #define FFB_IOCTL_COUNT DRM_ARRAY_SIZE(ffb_ioctls)
102 #ifdef MODULE
103 static char *ffb = NULL;
104 #endif
106 MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
107 MODULE_DESCRIPTION("Sun Creator/Creator3D DRI");
109 static int ffb_takedown(drm_device_t *dev)
111 int i;
112 drm_magic_entry_t *pt, *next;
113 drm_map_t *map;
114 drm_vma_entry_t *vma, *vma_next;
116 DRM_DEBUG("\n");
118 down(&dev->struct_sem);
119 del_timer(&dev->timer);
121 if (dev->devname) {
122 drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
123 dev->devname = NULL;
126 if (dev->unique) {
127 drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
128 dev->unique = NULL;
129 dev->unique_len = 0;
132 /* Clear pid list */
133 for (i = 0; i < DRM_HASH_SIZE; i++) {
134 for (pt = dev->magiclist[i].head; pt; pt = next) {
135 next = pt->next;
136 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
138 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
141 /* Clear vma list (only built for debugging) */
142 if (dev->vmalist) {
143 for (vma = dev->vmalist; vma; vma = vma_next) {
144 vma_next = vma->next;
145 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
147 dev->vmalist = NULL;
150 /* Clear map area information */
151 if (dev->maplist) {
152 for (i = 0; i < dev->map_count; i++) {
153 map = dev->maplist[i];
154 switch (map->type) {
155 case _DRM_REGISTERS:
156 case _DRM_FRAME_BUFFER:
157 drm_ioremapfree(map->handle, map->size);
158 break;
160 case _DRM_SHM:
161 drm_free_pages((unsigned long)map->handle,
162 drm_order(map->size)
163 - PAGE_SHIFT,
164 DRM_MEM_SAREA);
165 break;
167 default:
168 break;
171 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
174 drm_free(dev->maplist,
175 dev->map_count * sizeof(*dev->maplist),
176 DRM_MEM_MAPS);
177 dev->maplist = NULL;
178 dev->map_count = 0;
181 if (dev->lock.hw_lock) {
182 dev->lock.hw_lock = NULL; /* SHM removed */
183 dev->lock.pid = 0;
184 wake_up_interruptible(&dev->lock.lock_queue);
186 up(&dev->struct_sem);
188 return 0;
191 drm_device_t **ffb_dev_table;
192 static int ffb_dev_table_size;
194 static void get_ffb_type(ffb_dev_priv_t *ffb_priv, int instance)
196 volatile unsigned char *strap_bits;
197 unsigned char val;
199 strap_bits = (volatile unsigned char *)
200 (ffb_priv->card_phys_base + 0x00200000UL);
202 /* Don't ask, you have to read the value twice for whatever
203 * reason to get correct contents.
205 val = upa_readb(strap_bits);
206 val = upa_readb(strap_bits);
207 switch (val & 0x78) {
208 case (0x0 << 5) | (0x0 << 3):
209 ffb_priv->ffb_type = ffb1_prototype;
210 printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance);
211 break;
212 case (0x0 << 5) | (0x1 << 3):
213 ffb_priv->ffb_type = ffb1_standard;
214 printk("ffb%d: Detected FFB1\n", instance);
215 break;
216 case (0x0 << 5) | (0x3 << 3):
217 ffb_priv->ffb_type = ffb1_speedsort;
218 printk("ffb%d: Detected FFB1-SpeedSort\n", instance);
219 break;
220 case (0x1 << 5) | (0x0 << 3):
221 ffb_priv->ffb_type = ffb2_prototype;
222 printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n", instance);
223 break;
224 case (0x1 << 5) | (0x1 << 3):
225 ffb_priv->ffb_type = ffb2_vertical;
226 printk("ffb%d: Detected FFB2/vertical\n", instance);
227 break;
228 case (0x1 << 5) | (0x2 << 3):
229 ffb_priv->ffb_type = ffb2_vertical_plus;
230 printk("ffb%d: Detected FFB2+/vertical\n", instance);
231 break;
232 case (0x2 << 5) | (0x0 << 3):
233 ffb_priv->ffb_type = ffb2_horizontal;
234 printk("ffb%d: Detected FFB2/horizontal\n", instance);
235 break;
236 case (0x2 << 5) | (0x2 << 3):
237 ffb_priv->ffb_type = ffb2_horizontal;
238 printk("ffb%d: Detected FFB2+/horizontal\n", instance);
239 break;
240 default:
241 ffb_priv->ffb_type = ffb2_vertical;
242 printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n", instance, val);
243 break;
247 static int __init ffb_init_one(int prom_node, int instance)
249 struct linux_prom64_registers regs[2*PROMREG_MAX];
250 drm_device_t *dev;
251 ffb_dev_priv_t *ffb_priv;
252 int ret, i;
254 dev = kmalloc(sizeof(drm_device_t) + sizeof(ffb_dev_priv_t), GFP_KERNEL);
255 if (!dev)
256 return -ENOMEM;
258 memset(dev, 0, sizeof(*dev));
259 spin_lock_init(&dev->count_lock);
260 sema_init(&dev->struct_sem, 1);
262 ffb_priv = (ffb_dev_priv_t *) (dev + 1);
263 ffb_priv->prom_node = prom_node;
264 if (prom_getproperty(ffb_priv->prom_node, "reg",
265 (void *)regs, sizeof(regs)) <= 0) {
266 kfree(dev);
267 return -EINVAL;
269 ffb_priv->card_phys_base = regs[0].phys_addr;
270 ffb_priv->regs = (ffb_fbcPtr)
271 (regs[0].phys_addr + 0x00600000UL);
272 get_ffb_type(ffb_priv, instance);
273 for (i = 0; i < FFB_MAX_CTXS; i++)
274 ffb_priv->hw_state[i] = NULL;
276 ffb_dev_table[instance] = dev;
278 #ifdef MODULE
279 drm_parse_options(ffb);
280 #endif
282 memcpy(&ffb_priv->miscdev, &ffb_misc, sizeof(ffb_misc));
283 ret = misc_register(&ffb_priv->miscdev);
284 if (ret) {
285 ffb_dev_table[instance] = NULL;
286 kfree(dev);
287 return ret;
290 dev->device = MKDEV(MISC_MAJOR, ffb_priv->miscdev.minor);
291 dev->name = FFB_NAME;
293 drm_mem_init();
294 drm_proc_init(dev);
296 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d at %016lx\n",
297 FFB_NAME,
298 FFB_MAJOR,
299 FFB_MINOR,
300 FFB_PATCHLEVEL,
301 FFB_DATE,
302 ffb_priv->miscdev.minor,
303 ffb_priv->card_phys_base);
305 return 0;
308 static int __init ffb_init_dev_table(void)
310 int root, node;
311 int total = 0;
313 root = prom_getchild(prom_root_node);
314 for (node = prom_searchsiblings(root, "SUNW,ffb"); node;
315 node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb"))
316 total++;
318 ffb_dev_table = kmalloc(sizeof(drm_device_t *) * total, GFP_KERNEL);
319 if (!ffb_dev_table)
320 return -ENOMEM;
322 ffb_dev_table_size = total;
324 return 0;
327 int __init ffb_init(void)
329 int root, node, instance, ret;
331 ret = ffb_init_dev_table();
332 if (ret)
333 return ret;
335 instance = 0;
336 root = prom_getchild(prom_root_node);
337 for (node = prom_searchsiblings(root, "SUNW,ffb"); node;
338 node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) {
339 ret = ffb_init_one(node, instance);
340 if (ret)
341 return ret;
342 instance++;
345 return 0;
348 void __exit ffb_cleanup(void)
350 int instance;
352 DRM_DEBUG("\n");
354 drm_proc_cleanup();
355 for (instance = 0; instance < ffb_dev_table_size; instance++) {
356 drm_device_t *dev = ffb_dev_table[instance];
357 ffb_dev_priv_t *ffb_priv;
359 if (!dev)
360 continue;
362 ffb_priv = (ffb_dev_priv_t *) (dev + 1);
363 if (misc_deregister(&ffb_priv->miscdev)) {
364 DRM_ERROR("Cannot unload module\n");
365 } else {
366 DRM_INFO("Module unloaded\n");
368 ffb_takedown(dev);
369 kfree(dev);
370 ffb_dev_table[instance] = NULL;
372 kfree(ffb_dev_table);
373 ffb_dev_table = NULL;
374 ffb_dev_table_size = 0;
377 static int ffb_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
379 drm_version_t version;
380 int len, ret;
382 ret = copy_from_user(&version, (drm_version_t *)arg, sizeof(version));
383 if (ret)
384 return -EFAULT;
386 version.version_major = FFB_MAJOR;
387 version.version_minor = FFB_MINOR;
388 version.version_patchlevel = FFB_PATCHLEVEL;
390 len = strlen(FFB_NAME);
391 if (len > version.name_len)
392 len = version.name_len;
393 version.name_len = len;
394 if (len && version.name) {
395 ret = copy_to_user(version.name, FFB_NAME, len);
396 if (ret)
397 return -EFAULT;
400 len = strlen(FFB_DATE);
401 if (len > version.date_len)
402 len = version.date_len;
403 version.date_len = len;
404 if (len && version.date) {
405 ret = copy_to_user(version.date, FFB_DATE, len);
406 if (ret)
407 return -EFAULT;
410 len = strlen(FFB_DESC);
411 if (len > version.desc_len)
412 len = version.desc_len;
413 version.desc_len = len;
414 if (len && version.desc) {
415 ret = copy_to_user(version.desc, FFB_DESC, len);
416 if (ret)
417 return -EFAULT;
420 ret = copy_to_user((drm_version_t *) arg, &version, sizeof(version));
421 if (ret)
422 ret = -EFAULT;
424 return ret;
427 static int ffb_setup(drm_device_t *dev)
429 int i;
431 atomic_set(&dev->ioctl_count, 0);
432 atomic_set(&dev->vma_count, 0);
433 dev->buf_use = 0;
434 atomic_set(&dev->buf_alloc, 0);
436 atomic_set(&dev->total_open, 0);
437 atomic_set(&dev->total_close, 0);
438 atomic_set(&dev->total_ioctl, 0);
439 atomic_set(&dev->total_irq, 0);
440 atomic_set(&dev->total_ctx, 0);
441 atomic_set(&dev->total_locks, 0);
442 atomic_set(&dev->total_unlocks, 0);
443 atomic_set(&dev->total_contends, 0);
444 atomic_set(&dev->total_sleeps, 0);
446 for (i = 0; i < DRM_HASH_SIZE; i++) {
447 dev->magiclist[i].head = NULL;
448 dev->magiclist[i].tail = NULL;
451 dev->maplist = NULL;
452 dev->map_count = 0;
453 dev->vmalist = NULL;
454 dev->lock.hw_lock = NULL;
455 init_waitqueue_head(&dev->lock.lock_queue);
456 dev->queue_count = 0;
457 dev->queue_reserved = 0;
458 dev->queue_slots = 0;
459 dev->queuelist = NULL;
460 dev->irq = 0;
461 dev->context_flag = 0;
462 dev->interrupt_flag = 0;
463 dev->dma = 0;
464 dev->dma_flag = 0;
465 dev->last_context = 0;
466 dev->last_switch = 0;
467 dev->last_checked = 0;
468 init_timer(&dev->timer);
469 init_waitqueue_head(&dev->context_wait);
471 dev->ctx_start = 0;
472 dev->lck_start = 0;
474 dev->buf_rp = dev->buf;
475 dev->buf_wp = dev->buf;
476 dev->buf_end = dev->buf + DRM_BSZ;
477 dev->buf_async = NULL;
478 init_waitqueue_head(&dev->buf_readers);
479 init_waitqueue_head(&dev->buf_writers);
481 return 0;
484 static int ffb_open(struct inode *inode, struct file *filp)
486 drm_device_t *dev;
487 int minor, i;
488 int ret = 0;
490 minor = MINOR(inode->i_rdev);
491 for (i = 0; i < ffb_dev_table_size; i++) {
492 ffb_dev_priv_t *ffb_priv;
494 ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1);
496 if (ffb_priv->miscdev.minor == minor)
497 break;
500 if (i >= ffb_dev_table_size)
501 return -EINVAL;
503 dev = ffb_dev_table[i];
504 if (!dev)
505 return -EINVAL;
507 DRM_DEBUG("open_count = %d\n", dev->open_count);
508 ret = drm_open_helper(inode, filp, dev);
509 if (!ret) {
510 atomic_inc(&dev->total_open);
511 spin_lock(&dev->count_lock);
512 if (!dev->open_count++) {
513 spin_unlock(&dev->count_lock);
514 return ffb_setup(dev);
516 spin_unlock(&dev->count_lock);
519 return ret;
522 static int ffb_release(struct inode *inode, struct file *filp)
524 drm_file_t *priv = filp->private_data;
525 drm_device_t *dev;
526 int ret = 0;
528 lock_kernel();
529 dev = priv->dev;
530 DRM_DEBUG("open_count = %d\n", dev->open_count);
531 if (dev->lock.hw_lock != NULL
532 && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
533 && dev->lock.pid == current->pid) {
534 ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) (dev + 1);
535 int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);
536 int idx;
538 /* We have to free up the rogue hw context state
539 * holding error or else we will leak it.
541 idx = context - 1;
542 if (fpriv->hw_state[idx] != NULL) {
543 kfree(fpriv->hw_state[idx]);
544 fpriv->hw_state[idx] = NULL;
548 ret = drm_release(inode, filp);
550 if (!ret) {
551 atomic_inc(&dev->total_close);
552 spin_lock(&dev->count_lock);
553 if (!--dev->open_count) {
554 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
555 DRM_ERROR("Device busy: %d %d\n",
556 atomic_read(&dev->ioctl_count),
557 dev->blocked);
558 spin_unlock(&dev->count_lock);
559 unlock_kernel();
560 return -EBUSY;
562 spin_unlock(&dev->count_lock);
563 ret = ffb_takedown(dev);
564 unlock_kernel();
565 return ret;
567 spin_unlock(&dev->count_lock);
570 unlock_kernel();
571 return ret;
574 static int ffb_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
576 int nr = DRM_IOCTL_NR(cmd);
577 drm_file_t *priv = filp->private_data;
578 drm_device_t *dev = priv->dev;
579 drm_ioctl_desc_t *ioctl;
580 drm_ioctl_t *func;
581 int ret;
583 atomic_inc(&dev->ioctl_count);
584 atomic_inc(&dev->total_ioctl);
585 ++priv->ioctl_count;
587 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
588 current->pid, cmd, nr, dev->device, priv->authenticated);
590 if (nr >= FFB_IOCTL_COUNT) {
591 ret = -EINVAL;
592 } else {
593 ioctl = &ffb_ioctls[nr];
594 func = ioctl->func;
596 if (!func) {
597 DRM_DEBUG("no function\n");
598 ret = -EINVAL;
599 } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
600 || (ioctl->auth_needed && !priv->authenticated)) {
601 ret = -EACCES;
602 } else {
603 ret = (func)(inode, filp, cmd, arg);
607 atomic_dec(&dev->ioctl_count);
609 return ret;
612 static int ffb_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
614 drm_file_t *priv = filp->private_data;
615 drm_device_t *dev = priv->dev;
616 DECLARE_WAITQUEUE(entry, current);
617 int ret = 0;
618 drm_lock_t lock;
620 ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock));
621 if (ret)
622 return -EFAULT;
624 if (lock.context == DRM_KERNEL_CONTEXT) {
625 DRM_ERROR("Process %d using kernel context %d\n",
626 current->pid, lock.context);
627 return -EINVAL;
630 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
631 lock.context, current->pid, dev->lock.hw_lock->lock,
632 lock.flags);
634 add_wait_queue(&dev->lock.lock_queue, &entry);
635 for (;;) {
636 if (!dev->lock.hw_lock) {
637 /* Device has been unregistered */
638 ret = -EINTR;
639 break;
641 if (drm_lock_take(&dev->lock.hw_lock->lock,
642 lock.context)) {
643 dev->lock.pid = current->pid;
644 dev->lock.lock_time = jiffies;
645 atomic_inc(&dev->total_locks);
646 break; /* Got lock */
649 /* Contention */
650 atomic_inc(&dev->total_sleeps);
651 current->state = TASK_INTERRUPTIBLE;
652 current->policy |= SCHED_YIELD;
653 schedule();
654 if (signal_pending(current)) {
655 ret = -ERESTARTSYS;
656 break;
659 current->state = TASK_RUNNING;
660 remove_wait_queue(&dev->lock.lock_queue, &entry);
662 if (!ret &&
663 (dev->last_context != lock.context))
664 ffb_context_switch(dev, dev->last_context, lock.context);
666 DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
668 return ret;
671 int ffb_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
673 drm_file_t *priv = filp->private_data;
674 drm_device_t *dev = priv->dev;
675 drm_lock_t lock;
676 unsigned int old, new, prev, ctx;
677 int ret;
679 ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock));
680 if (ret)
681 return -EFAULT;
683 if ((ctx = lock.context) == DRM_KERNEL_CONTEXT) {
684 DRM_ERROR("Process %d using kernel context %d\n",
685 current->pid, lock.context);
686 return -EINVAL;
689 DRM_DEBUG("%d frees lock (%d holds)\n",
690 lock.context,
691 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
692 atomic_inc(&dev->total_unlocks);
693 if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
694 atomic_inc(&dev->total_contends);
696 /* We no longer really hold it, but if we are the next
697 * agent to request it then we should just be able to
698 * take it immediately and not eat the ioctl.
700 dev->lock.pid = 0;
702 __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
704 do {
705 old = *plock;
706 new = ctx;
707 prev = cmpxchg(plock, old, new);
708 } while (prev != old);
711 wake_up_interruptible(&dev->lock.lock_queue);
713 return 0;
716 static void align_fb_mapping(struct vm_area_struct *vma)
718 unsigned long j, alignment;
720 j = vma->vm_end - vma->vm_start;
721 for (alignment = (4 * 1024 * 1024); alignment > PAGE_SIZE; alignment >>= 3)
722 if (j >= alignment)
723 break;
724 if (alignment > PAGE_SIZE) {
725 j = alignment;
726 alignment = j - (vma->vm_start & (j - 1));
727 if (alignment != j) {
728 struct vm_area_struct *vmm = find_vma(current->mm,vma->vm_start);
730 if (!vmm || vmm->vm_start >= vma->vm_end + alignment) {
731 vma->vm_start += alignment;
732 vma->vm_end += alignment;
738 /* The problem here is, due to virtual cache aliasing,
739 * we must make sure the shared memory area lands in the
740 * same dcache line for both the kernel and all drm clients.
742 static void align_shm_mapping(struct vm_area_struct *vma, unsigned long kvirt)
744 kvirt &= PAGE_SIZE;
745 if ((vma->vm_start & PAGE_SIZE) != kvirt) {
746 struct vm_area_struct *vmm = find_vma(current->mm, vma->vm_start);
748 if (!vmm || vmm->vm_start >= vma->vm_end + PAGE_SIZE) {
749 vma->vm_start += PAGE_SIZE;
750 vma->vm_end += PAGE_SIZE;
755 extern struct vm_operations_struct drm_vm_ops;
756 extern struct vm_operations_struct drm_vm_shm_ops;
757 extern struct vm_operations_struct drm_vm_shm_lock_ops;
759 static int ffb_mmap(struct file *filp, struct vm_area_struct *vma)
761 drm_file_t *priv = filp->private_data;
762 drm_device_t *dev = priv->dev;
763 drm_map_t *map = NULL;
764 ffb_dev_priv_t *ffb_priv;
765 int i, minor;
767 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
768 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
770 minor = MINOR(filp->f_dentry->d_inode->i_rdev);
771 ffb_priv = NULL;
772 for (i = 0; i < ffb_dev_table_size; i++) {
773 ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1);
774 if (ffb_priv->miscdev.minor == minor)
775 break;
777 if (i >= ffb_dev_table_size)
778 return -EINVAL;
780 /* We don't support/need dma mappings, so... */
781 if (!VM_OFFSET(vma))
782 return -EINVAL;
784 for (i = 0; i < dev->map_count; i++) {
785 unsigned long off;
787 map = dev->maplist[i];
789 /* Ok, a little hack to make 32-bit apps work. */
790 off = (map->offset & 0xffffffff);
791 if (off == VM_OFFSET(vma))
792 break;
795 if (i >= dev->map_count)
796 return -EINVAL;
798 if (!map ||
799 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
800 return -EPERM;
802 if (map->size != (vma->vm_end - vma->vm_start))
803 return -EINVAL;
805 /* Set read-only attribute before mappings are created
806 * so it works for fb/reg maps too.
808 if (map->flags & _DRM_READ_ONLY)
809 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
810 __pte(pgprot_val(vma->vm_page_prot)))));
812 switch (map->type) {
813 case _DRM_FRAME_BUFFER:
814 align_fb_mapping(vma);
815 /* FALLTHROUGH */
817 case _DRM_REGISTERS:
818 /* In order to handle 32-bit drm apps/xserver we
819 * play a trick. The mappings only really specify
820 * the 32-bit offset from the cards 64-bit base
821 * address, and we just add in the base here.
823 vma->vm_flags |= VM_IO;
824 if (io_remap_page_range(vma->vm_start,
825 ffb_priv->card_phys_base + VM_OFFSET(vma),
826 vma->vm_end - vma->vm_start,
827 vma->vm_page_prot, 0))
828 return -EAGAIN;
830 vma->vm_ops = &drm_vm_ops;
831 break;
832 case _DRM_SHM:
833 align_shm_mapping(vma, (unsigned long)dev->lock.hw_lock);
834 if (map->flags & _DRM_CONTAINS_LOCK)
835 vma->vm_ops = &drm_vm_shm_lock_ops;
836 else {
837 vma->vm_ops = &drm_vm_shm_ops;
838 vma->vm_private_data = (void *) map;
841 /* Don't let this area swap. Change when
842 * DRM_KERNEL advisory is supported.
844 vma->vm_flags |= VM_LOCKED;
845 break;
846 default:
847 return -EINVAL; /* This should never happen. */
850 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
852 vma->vm_file = filp; /* Needed for drm_vm_open() */
853 drm_vm_open(vma);
854 return 0;
857 module_init(ffb_init);
858 module_exit(ffb_cleanup);