1 /* $Id: ffb_drv.c,v 1.7 2000/11/12 10:01:41 davem Exp $
2 * ffb_drv.c: Creator/Creator3D direct rendering driver.
4 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
9 #include <linux/sched.h>
10 #include <linux/smp_lock.h>
11 #include <asm/oplib.h>
16 #define FFB_NAME "ffb"
17 #define FFB_DESC "Creator/Creator3D"
18 #define FFB_DATE "20000517"
21 #define FFB_PATCHLEVEL 1
23 /* Forward declarations. */
25 void ffb_cleanup(void);
26 static int ffb_version(struct inode
*inode
, struct file
*filp
,
27 unsigned int cmd
, unsigned long arg
);
28 static int ffb_open(struct inode
*inode
, struct file
*filp
);
29 static int ffb_release(struct inode
*inode
, struct file
*filp
);
30 static int ffb_ioctl(struct inode
*inode
, struct file
*filp
,
31 unsigned int cmd
, unsigned long arg
);
32 static int ffb_lock(struct inode
*inode
, struct file
*filp
,
33 unsigned int cmd
, unsigned long arg
);
34 static int ffb_unlock(struct inode
*inode
, struct file
*filp
,
35 unsigned int cmd
, unsigned long arg
);
36 static int ffb_mmap(struct file
*filp
, struct vm_area_struct
*vma
);
38 /* From ffb_context.c */
39 extern int ffb_resctx(struct inode
*, struct file
*, unsigned int, unsigned long);
40 extern int ffb_addctx(struct inode
*, struct file
*, unsigned int, unsigned long);
41 extern int ffb_modctx(struct inode
*, struct file
*, unsigned int, unsigned long);
42 extern int ffb_getctx(struct inode
*, struct file
*, unsigned int, unsigned long);
43 extern int ffb_switchctx(struct inode
*, struct file
*, unsigned int, unsigned long);
44 extern int ffb_newctx(struct inode
*, struct file
*, unsigned int, unsigned long);
45 extern int ffb_rmctx(struct inode
*, struct file
*, unsigned int, unsigned long);
46 extern int ffb_context_switch(drm_device_t
*, int, int);
48 static struct file_operations ffb_fops
= {
60 /* This is just a template, we make a new copy for each FFB
61 * we discover at init time so that each one gets a unique
62 * misc device minor number.
64 static struct miscdevice ffb_misc
= {
65 minor
: MISC_DYNAMIC_MINOR
,
70 static drm_ioctl_desc_t ffb_ioctls
[] = {
71 [DRM_IOCTL_NR(DRM_IOCTL_VERSION
)] = { ffb_version
, 0, 0 },
72 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE
)] = { drm_getunique
, 0, 0 },
73 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC
)] = { drm_getmagic
, 0, 0 },
74 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID
)] = { drm_irq_busid
, 0, 1 }, /* XXX */
76 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE
)] = { drm_setunique
, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK
)] = { drm_block
, 1, 1 },
78 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK
)] = { drm_unblock
, 1, 1 },
79 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC
)] = { drm_authmagic
, 1, 1 },
80 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP
)] = { drm_addmap
, 1, 1 },
82 /* The implementation is currently a nop just like on tdfx.
83 * Later we can do something more clever. -DaveM
85 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX
)] = { ffb_addctx
, 1, 1 },
86 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX
)] = { ffb_rmctx
, 1, 1 },
87 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX
)] = { ffb_modctx
, 1, 1 },
88 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX
)] = { ffb_getctx
, 1, 0 },
89 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX
)] = { ffb_switchctx
, 1, 1 },
90 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX
)] = { ffb_newctx
, 1, 1 },
91 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX
)] = { ffb_resctx
, 1, 0 },
93 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW
)] = { drm_adddraw
, 1, 1 },
94 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW
)] = { drm_rmdraw
, 1, 1 },
96 [DRM_IOCTL_NR(DRM_IOCTL_LOCK
)] = { ffb_lock
, 1, 0 },
97 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK
)] = { ffb_unlock
, 1, 0 },
98 [DRM_IOCTL_NR(DRM_IOCTL_FINISH
)] = { drm_finish
, 1, 0 },
100 #define FFB_IOCTL_COUNT DRM_ARRAY_SIZE(ffb_ioctls)
103 static char *ffb
= NULL
;
106 MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
107 MODULE_DESCRIPTION("Sun Creator/Creator3D DRI");
109 static int ffb_takedown(drm_device_t
*dev
)
112 drm_magic_entry_t
*pt
, *next
;
114 drm_vma_entry_t
*vma
, *vma_next
;
118 down(&dev
->struct_sem
);
119 del_timer(&dev
->timer
);
122 drm_free(dev
->devname
, strlen(dev
->devname
)+1, DRM_MEM_DRIVER
);
127 drm_free(dev
->unique
, strlen(dev
->unique
)+1, DRM_MEM_DRIVER
);
133 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
134 for (pt
= dev
->magiclist
[i
].head
; pt
; pt
= next
) {
136 drm_free(pt
, sizeof(*pt
), DRM_MEM_MAGIC
);
138 dev
->magiclist
[i
].head
= dev
->magiclist
[i
].tail
= NULL
;
141 /* Clear vma list (only built for debugging) */
143 for (vma
= dev
->vmalist
; vma
; vma
= vma_next
) {
144 vma_next
= vma
->next
;
145 drm_free(vma
, sizeof(*vma
), DRM_MEM_VMAS
);
150 /* Clear map area information */
152 for (i
= 0; i
< dev
->map_count
; i
++) {
153 map
= dev
->maplist
[i
];
156 case _DRM_FRAME_BUFFER
:
157 drm_ioremapfree(map
->handle
, map
->size
);
161 drm_free_pages((unsigned long)map
->handle
,
171 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
174 drm_free(dev
->maplist
,
175 dev
->map_count
* sizeof(*dev
->maplist
),
181 if (dev
->lock
.hw_lock
) {
182 dev
->lock
.hw_lock
= NULL
; /* SHM removed */
184 wake_up_interruptible(&dev
->lock
.lock_queue
);
186 up(&dev
->struct_sem
);
191 drm_device_t
**ffb_dev_table
;
192 static int ffb_dev_table_size
;
194 static void get_ffb_type(ffb_dev_priv_t
*ffb_priv
, int instance
)
196 volatile unsigned char *strap_bits
;
199 strap_bits
= (volatile unsigned char *)
200 (ffb_priv
->card_phys_base
+ 0x00200000UL
);
202 /* Don't ask, you have to read the value twice for whatever
203 * reason to get correct contents.
205 val
= upa_readb(strap_bits
);
206 val
= upa_readb(strap_bits
);
207 switch (val
& 0x78) {
208 case (0x0 << 5) | (0x0 << 3):
209 ffb_priv
->ffb_type
= ffb1_prototype
;
210 printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance
);
212 case (0x0 << 5) | (0x1 << 3):
213 ffb_priv
->ffb_type
= ffb1_standard
;
214 printk("ffb%d: Detected FFB1\n", instance
);
216 case (0x0 << 5) | (0x3 << 3):
217 ffb_priv
->ffb_type
= ffb1_speedsort
;
218 printk("ffb%d: Detected FFB1-SpeedSort\n", instance
);
220 case (0x1 << 5) | (0x0 << 3):
221 ffb_priv
->ffb_type
= ffb2_prototype
;
222 printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n", instance
);
224 case (0x1 << 5) | (0x1 << 3):
225 ffb_priv
->ffb_type
= ffb2_vertical
;
226 printk("ffb%d: Detected FFB2/vertical\n", instance
);
228 case (0x1 << 5) | (0x2 << 3):
229 ffb_priv
->ffb_type
= ffb2_vertical_plus
;
230 printk("ffb%d: Detected FFB2+/vertical\n", instance
);
232 case (0x2 << 5) | (0x0 << 3):
233 ffb_priv
->ffb_type
= ffb2_horizontal
;
234 printk("ffb%d: Detected FFB2/horizontal\n", instance
);
236 case (0x2 << 5) | (0x2 << 3):
237 ffb_priv
->ffb_type
= ffb2_horizontal
;
238 printk("ffb%d: Detected FFB2+/horizontal\n", instance
);
241 ffb_priv
->ffb_type
= ffb2_vertical
;
242 printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n", instance
, val
);
247 static int __init
ffb_init_one(int prom_node
, int instance
)
249 struct linux_prom64_registers regs
[2*PROMREG_MAX
];
251 ffb_dev_priv_t
*ffb_priv
;
254 dev
= kmalloc(sizeof(drm_device_t
) + sizeof(ffb_dev_priv_t
), GFP_KERNEL
);
258 memset(dev
, 0, sizeof(*dev
));
259 spin_lock_init(&dev
->count_lock
);
260 sema_init(&dev
->struct_sem
, 1);
262 ffb_priv
= (ffb_dev_priv_t
*) (dev
+ 1);
263 ffb_priv
->prom_node
= prom_node
;
264 if (prom_getproperty(ffb_priv
->prom_node
, "reg",
265 (void *)regs
, sizeof(regs
)) <= 0) {
269 ffb_priv
->card_phys_base
= regs
[0].phys_addr
;
270 ffb_priv
->regs
= (ffb_fbcPtr
)
271 (regs
[0].phys_addr
+ 0x00600000UL
);
272 get_ffb_type(ffb_priv
, instance
);
273 for (i
= 0; i
< FFB_MAX_CTXS
; i
++)
274 ffb_priv
->hw_state
[i
] = NULL
;
276 ffb_dev_table
[instance
] = dev
;
279 drm_parse_options(ffb
);
282 memcpy(&ffb_priv
->miscdev
, &ffb_misc
, sizeof(ffb_misc
));
283 ret
= misc_register(&ffb_priv
->miscdev
);
285 ffb_dev_table
[instance
] = NULL
;
290 dev
->device
= MKDEV(MISC_MAJOR
, ffb_priv
->miscdev
.minor
);
291 dev
->name
= FFB_NAME
;
296 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d at %016lx\n",
302 ffb_priv
->miscdev
.minor
,
303 ffb_priv
->card_phys_base
);
308 static int __init
ffb_init_dev_table(void)
313 root
= prom_getchild(prom_root_node
);
314 for (node
= prom_searchsiblings(root
, "SUNW,ffb"); node
;
315 node
= prom_searchsiblings(prom_getsibling(node
), "SUNW,ffb"))
318 ffb_dev_table
= kmalloc(sizeof(drm_device_t
*) * total
, GFP_KERNEL
);
322 ffb_dev_table_size
= total
;
327 int __init
ffb_init(void)
329 int root
, node
, instance
, ret
;
331 ret
= ffb_init_dev_table();
336 root
= prom_getchild(prom_root_node
);
337 for (node
= prom_searchsiblings(root
, "SUNW,ffb"); node
;
338 node
= prom_searchsiblings(prom_getsibling(node
), "SUNW,ffb")) {
339 ret
= ffb_init_one(node
, instance
);
348 void __exit
ffb_cleanup(void)
355 for (instance
= 0; instance
< ffb_dev_table_size
; instance
++) {
356 drm_device_t
*dev
= ffb_dev_table
[instance
];
357 ffb_dev_priv_t
*ffb_priv
;
362 ffb_priv
= (ffb_dev_priv_t
*) (dev
+ 1);
363 if (misc_deregister(&ffb_priv
->miscdev
)) {
364 DRM_ERROR("Cannot unload module\n");
366 DRM_INFO("Module unloaded\n");
370 ffb_dev_table
[instance
] = NULL
;
372 kfree(ffb_dev_table
);
373 ffb_dev_table
= NULL
;
374 ffb_dev_table_size
= 0;
377 static int ffb_version(struct inode
*inode
, struct file
*filp
, unsigned int cmd
, unsigned long arg
)
379 drm_version_t version
;
382 ret
= copy_from_user(&version
, (drm_version_t
*)arg
, sizeof(version
));
386 version
.version_major
= FFB_MAJOR
;
387 version
.version_minor
= FFB_MINOR
;
388 version
.version_patchlevel
= FFB_PATCHLEVEL
;
390 len
= strlen(FFB_NAME
);
391 if (len
> version
.name_len
)
392 len
= version
.name_len
;
393 version
.name_len
= len
;
394 if (len
&& version
.name
) {
395 ret
= copy_to_user(version
.name
, FFB_NAME
, len
);
400 len
= strlen(FFB_DATE
);
401 if (len
> version
.date_len
)
402 len
= version
.date_len
;
403 version
.date_len
= len
;
404 if (len
&& version
.date
) {
405 ret
= copy_to_user(version
.date
, FFB_DATE
, len
);
410 len
= strlen(FFB_DESC
);
411 if (len
> version
.desc_len
)
412 len
= version
.desc_len
;
413 version
.desc_len
= len
;
414 if (len
&& version
.desc
) {
415 ret
= copy_to_user(version
.desc
, FFB_DESC
, len
);
420 ret
= copy_to_user((drm_version_t
*) arg
, &version
, sizeof(version
));
427 static int ffb_setup(drm_device_t
*dev
)
431 atomic_set(&dev
->ioctl_count
, 0);
432 atomic_set(&dev
->vma_count
, 0);
434 atomic_set(&dev
->buf_alloc
, 0);
436 atomic_set(&dev
->total_open
, 0);
437 atomic_set(&dev
->total_close
, 0);
438 atomic_set(&dev
->total_ioctl
, 0);
439 atomic_set(&dev
->total_irq
, 0);
440 atomic_set(&dev
->total_ctx
, 0);
441 atomic_set(&dev
->total_locks
, 0);
442 atomic_set(&dev
->total_unlocks
, 0);
443 atomic_set(&dev
->total_contends
, 0);
444 atomic_set(&dev
->total_sleeps
, 0);
446 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
447 dev
->magiclist
[i
].head
= NULL
;
448 dev
->magiclist
[i
].tail
= NULL
;
454 dev
->lock
.hw_lock
= NULL
;
455 init_waitqueue_head(&dev
->lock
.lock_queue
);
456 dev
->queue_count
= 0;
457 dev
->queue_reserved
= 0;
458 dev
->queue_slots
= 0;
459 dev
->queuelist
= NULL
;
461 dev
->context_flag
= 0;
462 dev
->interrupt_flag
= 0;
465 dev
->last_context
= 0;
466 dev
->last_switch
= 0;
467 dev
->last_checked
= 0;
468 init_timer(&dev
->timer
);
469 init_waitqueue_head(&dev
->context_wait
);
474 dev
->buf_rp
= dev
->buf
;
475 dev
->buf_wp
= dev
->buf
;
476 dev
->buf_end
= dev
->buf
+ DRM_BSZ
;
477 dev
->buf_async
= NULL
;
478 init_waitqueue_head(&dev
->buf_readers
);
479 init_waitqueue_head(&dev
->buf_writers
);
484 static int ffb_open(struct inode
*inode
, struct file
*filp
)
490 minor
= MINOR(inode
->i_rdev
);
491 for (i
= 0; i
< ffb_dev_table_size
; i
++) {
492 ffb_dev_priv_t
*ffb_priv
;
494 ffb_priv
= (ffb_dev_priv_t
*) (ffb_dev_table
[i
] + 1);
496 if (ffb_priv
->miscdev
.minor
== minor
)
500 if (i
>= ffb_dev_table_size
)
503 dev
= ffb_dev_table
[i
];
507 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
508 ret
= drm_open_helper(inode
, filp
, dev
);
510 atomic_inc(&dev
->total_open
);
511 spin_lock(&dev
->count_lock
);
512 if (!dev
->open_count
++) {
513 spin_unlock(&dev
->count_lock
);
514 return ffb_setup(dev
);
516 spin_unlock(&dev
->count_lock
);
522 static int ffb_release(struct inode
*inode
, struct file
*filp
)
524 drm_file_t
*priv
= filp
->private_data
;
530 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
531 if (dev
->lock
.hw_lock
!= NULL
532 && _DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)
533 && dev
->lock
.pid
== current
->pid
) {
534 ffb_dev_priv_t
*fpriv
= (ffb_dev_priv_t
*) (dev
+ 1);
535 int context
= _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
);
538 /* We have to free up the rogue hw context state
539 * holding error or else we will leak it.
542 if (fpriv
->hw_state
[idx
] != NULL
) {
543 kfree(fpriv
->hw_state
[idx
]);
544 fpriv
->hw_state
[idx
] = NULL
;
548 ret
= drm_release(inode
, filp
);
551 atomic_inc(&dev
->total_close
);
552 spin_lock(&dev
->count_lock
);
553 if (!--dev
->open_count
) {
554 if (atomic_read(&dev
->ioctl_count
) || dev
->blocked
) {
555 DRM_ERROR("Device busy: %d %d\n",
556 atomic_read(&dev
->ioctl_count
),
558 spin_unlock(&dev
->count_lock
);
562 spin_unlock(&dev
->count_lock
);
563 ret
= ffb_takedown(dev
);
567 spin_unlock(&dev
->count_lock
);
574 static int ffb_ioctl(struct inode
*inode
, struct file
*filp
, unsigned int cmd
, unsigned long arg
)
576 int nr
= DRM_IOCTL_NR(cmd
);
577 drm_file_t
*priv
= filp
->private_data
;
578 drm_device_t
*dev
= priv
->dev
;
579 drm_ioctl_desc_t
*ioctl
;
583 atomic_inc(&dev
->ioctl_count
);
584 atomic_inc(&dev
->total_ioctl
);
587 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
588 current
->pid
, cmd
, nr
, dev
->device
, priv
->authenticated
);
590 if (nr
>= FFB_IOCTL_COUNT
) {
593 ioctl
= &ffb_ioctls
[nr
];
597 DRM_DEBUG("no function\n");
599 } else if ((ioctl
->root_only
&& !capable(CAP_SYS_ADMIN
))
600 || (ioctl
->auth_needed
&& !priv
->authenticated
)) {
603 ret
= (func
)(inode
, filp
, cmd
, arg
);
607 atomic_dec(&dev
->ioctl_count
);
612 static int ffb_lock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
, unsigned long arg
)
614 drm_file_t
*priv
= filp
->private_data
;
615 drm_device_t
*dev
= priv
->dev
;
616 DECLARE_WAITQUEUE(entry
, current
);
620 ret
= copy_from_user(&lock
, (drm_lock_t
*)arg
, sizeof(lock
));
624 if (lock
.context
== DRM_KERNEL_CONTEXT
) {
625 DRM_ERROR("Process %d using kernel context %d\n",
626 current
->pid
, lock
.context
);
630 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
631 lock
.context
, current
->pid
, dev
->lock
.hw_lock
->lock
,
634 add_wait_queue(&dev
->lock
.lock_queue
, &entry
);
636 if (!dev
->lock
.hw_lock
) {
637 /* Device has been unregistered */
641 if (drm_lock_take(&dev
->lock
.hw_lock
->lock
,
643 dev
->lock
.pid
= current
->pid
;
644 dev
->lock
.lock_time
= jiffies
;
645 atomic_inc(&dev
->total_locks
);
646 break; /* Got lock */
650 atomic_inc(&dev
->total_sleeps
);
651 current
->state
= TASK_INTERRUPTIBLE
;
652 current
->policy
|= SCHED_YIELD
;
654 if (signal_pending(current
)) {
659 current
->state
= TASK_RUNNING
;
660 remove_wait_queue(&dev
->lock
.lock_queue
, &entry
);
663 (dev
->last_context
!= lock
.context
))
664 ffb_context_switch(dev
, dev
->last_context
, lock
.context
);
666 DRM_DEBUG("%d %s\n", lock
.context
, ret
? "interrupted" : "has lock");
671 int ffb_unlock(struct inode
*inode
, struct file
*filp
, unsigned int cmd
, unsigned long arg
)
673 drm_file_t
*priv
= filp
->private_data
;
674 drm_device_t
*dev
= priv
->dev
;
676 unsigned int old
, new, prev
, ctx
;
679 ret
= copy_from_user(&lock
, (drm_lock_t
*)arg
, sizeof(lock
));
683 if ((ctx
= lock
.context
) == DRM_KERNEL_CONTEXT
) {
684 DRM_ERROR("Process %d using kernel context %d\n",
685 current
->pid
, lock
.context
);
689 DRM_DEBUG("%d frees lock (%d holds)\n",
691 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
692 atomic_inc(&dev
->total_unlocks
);
693 if (_DRM_LOCK_IS_CONT(dev
->lock
.hw_lock
->lock
))
694 atomic_inc(&dev
->total_contends
);
696 /* We no longer really hold it, but if we are the next
697 * agent to request it then we should just be able to
698 * take it immediately and not eat the ioctl.
702 __volatile__
unsigned int *plock
= &dev
->lock
.hw_lock
->lock
;
707 prev
= cmpxchg(plock
, old
, new);
708 } while (prev
!= old
);
711 wake_up_interruptible(&dev
->lock
.lock_queue
);
716 static void align_fb_mapping(struct vm_area_struct
*vma
)
718 unsigned long j
, alignment
;
720 j
= vma
->vm_end
- vma
->vm_start
;
721 for (alignment
= (4 * 1024 * 1024); alignment
> PAGE_SIZE
; alignment
>>= 3)
724 if (alignment
> PAGE_SIZE
) {
726 alignment
= j
- (vma
->vm_start
& (j
- 1));
727 if (alignment
!= j
) {
728 struct vm_area_struct
*vmm
= find_vma(current
->mm
,vma
->vm_start
);
730 if (!vmm
|| vmm
->vm_start
>= vma
->vm_end
+ alignment
) {
731 vma
->vm_start
+= alignment
;
732 vma
->vm_end
+= alignment
;
738 /* The problem here is, due to virtual cache aliasing,
739 * we must make sure the shared memory area lands in the
740 * same dcache line for both the kernel and all drm clients.
742 static void align_shm_mapping(struct vm_area_struct
*vma
, unsigned long kvirt
)
745 if ((vma
->vm_start
& PAGE_SIZE
) != kvirt
) {
746 struct vm_area_struct
*vmm
= find_vma(current
->mm
, vma
->vm_start
);
748 if (!vmm
|| vmm
->vm_start
>= vma
->vm_end
+ PAGE_SIZE
) {
749 vma
->vm_start
+= PAGE_SIZE
;
750 vma
->vm_end
+= PAGE_SIZE
;
755 extern struct vm_operations_struct drm_vm_ops
;
756 extern struct vm_operations_struct drm_vm_shm_ops
;
757 extern struct vm_operations_struct drm_vm_shm_lock_ops
;
759 static int ffb_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
761 drm_file_t
*priv
= filp
->private_data
;
762 drm_device_t
*dev
= priv
->dev
;
763 drm_map_t
*map
= NULL
;
764 ffb_dev_priv_t
*ffb_priv
;
767 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
768 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
770 minor
= MINOR(filp
->f_dentry
->d_inode
->i_rdev
);
772 for (i
= 0; i
< ffb_dev_table_size
; i
++) {
773 ffb_priv
= (ffb_dev_priv_t
*) (ffb_dev_table
[i
] + 1);
774 if (ffb_priv
->miscdev
.minor
== minor
)
777 if (i
>= ffb_dev_table_size
)
780 /* We don't support/need dma mappings, so... */
784 for (i
= 0; i
< dev
->map_count
; i
++) {
787 map
= dev
->maplist
[i
];
789 /* Ok, a little hack to make 32-bit apps work. */
790 off
= (map
->offset
& 0xffffffff);
791 if (off
== VM_OFFSET(vma
))
795 if (i
>= dev
->map_count
)
799 ((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
802 if (map
->size
!= (vma
->vm_end
- vma
->vm_start
))
805 /* Set read-only attribute before mappings are created
806 * so it works for fb/reg maps too.
808 if (map
->flags
& _DRM_READ_ONLY
)
809 vma
->vm_page_prot
= __pgprot(pte_val(pte_wrprotect(
810 __pte(pgprot_val(vma
->vm_page_prot
)))));
813 case _DRM_FRAME_BUFFER
:
814 align_fb_mapping(vma
);
818 /* In order to handle 32-bit drm apps/xserver we
819 * play a trick. The mappings only really specify
820 * the 32-bit offset from the cards 64-bit base
821 * address, and we just add in the base here.
823 vma
->vm_flags
|= VM_IO
;
824 if (io_remap_page_range(vma
->vm_start
,
825 ffb_priv
->card_phys_base
+ VM_OFFSET(vma
),
826 vma
->vm_end
- vma
->vm_start
,
827 vma
->vm_page_prot
, 0))
830 vma
->vm_ops
= &drm_vm_ops
;
833 align_shm_mapping(vma
, (unsigned long)dev
->lock
.hw_lock
);
834 if (map
->flags
& _DRM_CONTAINS_LOCK
)
835 vma
->vm_ops
= &drm_vm_shm_lock_ops
;
837 vma
->vm_ops
= &drm_vm_shm_ops
;
838 vma
->vm_private_data
= (void *) map
;
841 /* Don't let this area swap. Change when
842 * DRM_KERNEL advisory is supported.
844 vma
->vm_flags
|= VM_LOCKED
;
847 return -EINVAL
; /* This should never happen. */
850 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
852 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
857 module_init(ffb_init
);
858 module_exit(ffb_cleanup
);