2 * linux/drivers/video/fb_defio.c
4 * Copyright (C) 2006 Jaya Kumar
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
21 #include <linux/list.h>
23 /* to support deferred IO */
24 #include <linux/rmap.h>
25 #include <linux/pagemap.h>
27 struct page
*fb_deferred_io_page(struct fb_info
*info
, unsigned long offs
)
29 void *screen_base
= (void __force
*) info
->screen_base
;
32 if (is_vmalloc_addr(screen_base
+ offs
))
33 page
= vmalloc_to_page(screen_base
+ offs
);
35 page
= pfn_to_page((info
->fix
.smem_start
+ offs
) >> PAGE_SHIFT
);
40 /* this is to find and return the vmalloc-ed fb pages */
41 static int fb_deferred_io_fault(struct vm_area_struct
*vma
,
46 struct fb_info
*info
= vma
->vm_private_data
;
48 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
49 if (offset
>= info
->fix
.smem_len
)
50 return VM_FAULT_SIGBUS
;
52 page
= fb_deferred_io_page(info
, offset
);
54 return VM_FAULT_SIGBUS
;
59 page
->mapping
= vma
->vm_file
->f_mapping
;
61 printk(KERN_ERR
"no mapping available\n");
63 BUG_ON(!page
->mapping
);
64 page
->index
= vmf
->pgoff
;
70 int fb_deferred_io_fsync(struct file
*file
, struct dentry
*dentry
, int datasync
)
72 struct fb_info
*info
= file
->private_data
;
74 /* Skip if deferred io is complied-in but disabled on this fbdev */
78 /* Kill off the delayed work */
79 cancel_rearming_delayed_work(&info
->deferred_work
);
81 /* Run it immediately */
82 return schedule_delayed_work(&info
->deferred_work
, 0);
84 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync
);
86 /* vm_ops->page_mkwrite handler */
87 static int fb_deferred_io_mkwrite(struct vm_area_struct
*vma
,
90 struct page
*page
= vmf
->page
;
91 struct fb_info
*info
= vma
->vm_private_data
;
92 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
95 /* this is a callback we get when userspace first tries to
96 write to the page. we schedule a workqueue. that workqueue
97 will eventually mkclean the touched pages and execute the
98 deferred framebuffer IO. then if userspace touches a page
99 again, we repeat the same scheme */
101 /* protect against the workqueue changing the page list */
102 mutex_lock(&fbdefio
->lock
);
104 /* we loop through the pagelist before adding in order
105 to keep the pagelist sorted */
106 list_for_each_entry(cur
, &fbdefio
->pagelist
, lru
) {
107 /* this check is to catch the case where a new
108 process could start writing to the same page
109 through a new pte. this new access can cause the
110 mkwrite even when the original ps's pte is marked
112 if (unlikely(cur
== page
))
113 goto page_already_added
;
114 else if (cur
->index
> page
->index
)
118 list_add_tail(&page
->lru
, &cur
->lru
);
121 mutex_unlock(&fbdefio
->lock
);
123 /* come back after delay to process the deferred IO */
124 schedule_delayed_work(&info
->deferred_work
, fbdefio
->delay
);
128 static struct vm_operations_struct fb_deferred_io_vm_ops
= {
129 .fault
= fb_deferred_io_fault
,
130 .page_mkwrite
= fb_deferred_io_mkwrite
,
133 static int fb_deferred_io_set_page_dirty(struct page
*page
)
135 if (!PageDirty(page
))
140 static const struct address_space_operations fb_deferred_io_aops
= {
141 .set_page_dirty
= fb_deferred_io_set_page_dirty
,
144 static int fb_deferred_io_mmap(struct fb_info
*info
, struct vm_area_struct
*vma
)
146 vma
->vm_ops
= &fb_deferred_io_vm_ops
;
147 vma
->vm_flags
|= ( VM_IO
| VM_RESERVED
| VM_DONTEXPAND
);
148 vma
->vm_private_data
= info
;
152 /* workqueue callback */
153 static void fb_deferred_io_work(struct work_struct
*work
)
155 struct fb_info
*info
= container_of(work
, struct fb_info
,
157 struct list_head
*node
, *next
;
159 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
161 /* here we mkclean the pages, then do all deferred IO */
162 mutex_lock(&fbdefio
->lock
);
163 list_for_each_entry(cur
, &fbdefio
->pagelist
, lru
) {
169 /* driver's callback with pagelist */
170 fbdefio
->deferred_io(info
, &fbdefio
->pagelist
);
173 list_for_each_safe(node
, next
, &fbdefio
->pagelist
) {
176 mutex_unlock(&fbdefio
->lock
);
179 void fb_deferred_io_init(struct fb_info
*info
)
181 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
184 mutex_init(&fbdefio
->lock
);
185 info
->fbops
->fb_mmap
= fb_deferred_io_mmap
;
186 INIT_DELAYED_WORK(&info
->deferred_work
, fb_deferred_io_work
);
187 INIT_LIST_HEAD(&fbdefio
->pagelist
);
188 if (fbdefio
->delay
== 0) /* set a default of 1 s */
191 EXPORT_SYMBOL_GPL(fb_deferred_io_init
);
193 void fb_deferred_io_open(struct fb_info
*info
,
197 file
->f_mapping
->a_ops
= &fb_deferred_io_aops
;
199 EXPORT_SYMBOL_GPL(fb_deferred_io_open
);
201 void fb_deferred_io_cleanup(struct fb_info
*info
)
203 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
208 cancel_delayed_work(&info
->deferred_work
);
209 flush_scheduled_work();
211 /* clear out the mapping that we setup */
212 for (i
= 0 ; i
< info
->fix
.smem_len
; i
+= PAGE_SIZE
) {
213 page
= fb_deferred_io_page(info
, i
);
214 page
->mapping
= NULL
;
217 info
->fbops
->fb_mmap
= NULL
;
218 mutex_destroy(&fbdefio
->lock
);
220 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup
);
222 MODULE_LICENSE("GPL");