PCI PM: Export pci_pme_active to drivers
[linux-2.6/mini2440.git] / drivers / video / fb_defio.c
blob59df132cc3756deda96bfcce5d453a309e803a15
1 /*
2 * linux/drivers/video/fb_defio.c
4 * Copyright (C) 2006 Jaya Kumar
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/fb.h>
21 #include <linux/list.h>
23 /* to support deferred IO */
24 #include <linux/rmap.h>
25 #include <linux/pagemap.h>
27 /* this is to find and return the vmalloc-ed fb pages */
28 static int fb_deferred_io_fault(struct vm_area_struct *vma,
29 struct vm_fault *vmf)
31 unsigned long offset;
32 struct page *page;
33 struct fb_info *info = vma->vm_private_data;
34 /* info->screen_base is virtual memory */
35 void *screen_base = (void __force *) info->screen_base;
37 offset = vmf->pgoff << PAGE_SHIFT;
38 if (offset >= info->fix.smem_len)
39 return VM_FAULT_SIGBUS;
41 page = vmalloc_to_page(screen_base + offset);
42 if (!page)
43 return VM_FAULT_SIGBUS;
45 get_page(page);
47 if (vma->vm_file)
48 page->mapping = vma->vm_file->f_mapping;
49 else
50 printk(KERN_ERR "no mapping available\n");
52 BUG_ON(!page->mapping);
53 page->index = vmf->pgoff;
55 vmf->page = page;
56 return 0;
59 int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
61 struct fb_info *info = file->private_data;
63 /* Kill off the delayed work */
64 cancel_rearming_delayed_work(&info->deferred_work);
66 /* Run it immediately */
67 return schedule_delayed_work(&info->deferred_work, 0);
69 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
71 /* vm_ops->page_mkwrite handler */
72 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
73 struct page *page)
75 struct fb_info *info = vma->vm_private_data;
76 struct fb_deferred_io *fbdefio = info->fbdefio;
77 struct page *cur;
79 /* this is a callback we get when userspace first tries to
80 write to the page. we schedule a workqueue. that workqueue
81 will eventually mkclean the touched pages and execute the
82 deferred framebuffer IO. then if userspace touches a page
83 again, we repeat the same scheme */
85 /* protect against the workqueue changing the page list */
86 mutex_lock(&fbdefio->lock);
88 /* we loop through the pagelist before adding in order
89 to keep the pagelist sorted */
90 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
91 /* this check is to catch the case where a new
92 process could start writing to the same page
93 through a new pte. this new access can cause the
94 mkwrite even when the original ps's pte is marked
95 writable */
96 if (unlikely(cur == page))
97 goto page_already_added;
98 else if (cur->index > page->index)
99 break;
102 list_add_tail(&page->lru, &cur->lru);
104 page_already_added:
105 mutex_unlock(&fbdefio->lock);
107 /* come back after delay to process the deferred IO */
108 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
109 return 0;
112 static struct vm_operations_struct fb_deferred_io_vm_ops = {
113 .fault = fb_deferred_io_fault,
114 .page_mkwrite = fb_deferred_io_mkwrite,
117 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
119 vma->vm_ops = &fb_deferred_io_vm_ops;
120 vma->vm_flags |= ( VM_IO | VM_RESERVED | VM_DONTEXPAND );
121 vma->vm_private_data = info;
122 return 0;
125 /* workqueue callback */
126 static void fb_deferred_io_work(struct work_struct *work)
128 struct fb_info *info = container_of(work, struct fb_info,
129 deferred_work.work);
130 struct list_head *node, *next;
131 struct page *cur;
132 struct fb_deferred_io *fbdefio = info->fbdefio;
134 /* here we mkclean the pages, then do all deferred IO */
135 mutex_lock(&fbdefio->lock);
136 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
137 lock_page(cur);
138 page_mkclean(cur);
139 unlock_page(cur);
142 /* driver's callback with pagelist */
143 fbdefio->deferred_io(info, &fbdefio->pagelist);
145 /* clear the list */
146 list_for_each_safe(node, next, &fbdefio->pagelist) {
147 list_del(node);
149 mutex_unlock(&fbdefio->lock);
152 void fb_deferred_io_init(struct fb_info *info)
154 struct fb_deferred_io *fbdefio = info->fbdefio;
156 BUG_ON(!fbdefio);
157 mutex_init(&fbdefio->lock);
158 info->fbops->fb_mmap = fb_deferred_io_mmap;
159 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
160 INIT_LIST_HEAD(&fbdefio->pagelist);
161 if (fbdefio->delay == 0) /* set a default of 1 s */
162 fbdefio->delay = HZ;
164 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
166 void fb_deferred_io_cleanup(struct fb_info *info)
168 void *screen_base = (void __force *) info->screen_base;
169 struct fb_deferred_io *fbdefio = info->fbdefio;
170 struct page *page;
171 int i;
173 BUG_ON(!fbdefio);
174 cancel_delayed_work(&info->deferred_work);
175 flush_scheduled_work();
177 /* clear out the mapping that we setup */
178 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
179 page = vmalloc_to_page(screen_base + i);
180 page->mapping = NULL;
183 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
185 MODULE_LICENSE("GPL");