timerfd use waitqueue lock ...
[linux-2.6/mini2440.git] / fs / ramfs / file-nommu.c
blob3b481d557edbd01df82c18d5cbef6386fa862bf1
1 /* file-nommu.c: no-MMU version of ramfs
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/pagemap.h>
16 #include <linux/highmem.h>
17 #include <linux/init.h>
18 #include <linux/string.h>
19 #include <linux/backing-dev.h>
20 #include <linux/ramfs.h>
21 #include <linux/quotaops.h>
22 #include <linux/pagevec.h>
23 #include <linux/mman.h>
25 #include <asm/uaccess.h>
26 #include "internal.h"
28 static int ramfs_nommu_setattr(struct dentry *, struct iattr *);
30 const struct address_space_operations ramfs_aops = {
31 .readpage = simple_readpage,
32 .prepare_write = simple_prepare_write,
33 .commit_write = simple_commit_write,
34 .set_page_dirty = __set_page_dirty_no_writeback,
37 const struct file_operations ramfs_file_operations = {
38 .mmap = ramfs_nommu_mmap,
39 .get_unmapped_area = ramfs_nommu_get_unmapped_area,
40 .read = do_sync_read,
41 .aio_read = generic_file_aio_read,
42 .write = do_sync_write,
43 .aio_write = generic_file_aio_write,
44 .fsync = simple_sync_file,
45 .sendfile = generic_file_sendfile,
46 .llseek = generic_file_llseek,
49 const struct inode_operations ramfs_file_inode_operations = {
50 .setattr = ramfs_nommu_setattr,
51 .getattr = simple_getattr,
54 /*****************************************************************************/
56 * add a contiguous set of pages into a ramfs inode when it's truncated from
57 * size 0 on the assumption that it's going to be used for an mmap of shared
58 * memory
60 static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
62 struct pagevec lru_pvec;
63 unsigned long npages, xpages, loop, limit;
64 struct page *pages;
65 unsigned order;
66 void *data;
67 int ret;
69 /* make various checks */
70 order = get_order(newsize);
71 if (unlikely(order >= MAX_ORDER))
72 goto too_big;
74 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
75 if (limit != RLIM_INFINITY && newsize > limit)
76 goto fsize_exceeded;
78 if (newsize > inode->i_sb->s_maxbytes)
79 goto too_big;
81 i_size_write(inode, newsize);
83 /* allocate enough contiguous pages to be able to satisfy the
84 * request */
85 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
86 if (!pages)
87 return -ENOMEM;
89 /* split the high-order page into an array of single pages */
90 xpages = 1UL << order;
91 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
93 split_page(pages, order);
95 /* trim off any pages we don't actually require */
96 for (loop = npages; loop < xpages; loop++)
97 __free_page(pages + loop);
99 /* clear the memory we allocated */
100 newsize = PAGE_SIZE * npages;
101 data = page_address(pages);
102 memset(data, 0, newsize);
104 /* attach all the pages to the inode's address space */
105 pagevec_init(&lru_pvec, 0);
106 for (loop = 0; loop < npages; loop++) {
107 struct page *page = pages + loop;
109 ret = add_to_page_cache(page, inode->i_mapping, loop, GFP_KERNEL);
110 if (ret < 0)
111 goto add_error;
113 if (!pagevec_add(&lru_pvec, page))
114 __pagevec_lru_add(&lru_pvec);
116 unlock_page(page);
119 pagevec_lru_add(&lru_pvec);
120 return 0;
122 fsize_exceeded:
123 send_sig(SIGXFSZ, current, 0);
124 too_big:
125 return -EFBIG;
127 add_error:
128 page_cache_release(pages + loop);
129 for (loop++; loop < npages; loop++)
130 __free_page(pages + loop);
131 return ret;
134 /*****************************************************************************/
136 * check that file shrinkage doesn't leave any VMAs dangling in midair
138 static int ramfs_nommu_check_mappings(struct inode *inode,
139 size_t newsize, size_t size)
141 struct vm_area_struct *vma;
142 struct prio_tree_iter iter;
144 /* search for VMAs that fall within the dead zone */
145 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
146 newsize >> PAGE_SHIFT,
147 (size + PAGE_SIZE - 1) >> PAGE_SHIFT
149 /* found one - only interested if it's shared out of the page
150 * cache */
151 if (vma->vm_flags & VM_SHARED)
152 return -ETXTBSY; /* not quite true, but near enough */
155 return 0;
158 /*****************************************************************************/
162 static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
164 int ret;
166 /* assume a truncate from zero size is going to be for the purposes of
167 * shared mmap */
168 if (size == 0) {
169 if (unlikely(newsize >> 32))
170 return -EFBIG;
172 return ramfs_nommu_expand_for_mapping(inode, newsize);
175 /* check that a decrease in size doesn't cut off any shared mappings */
176 if (newsize < size) {
177 ret = ramfs_nommu_check_mappings(inode, newsize, size);
178 if (ret < 0)
179 return ret;
182 ret = vmtruncate(inode, size);
184 return ret;
187 /*****************************************************************************/
189 * handle a change of attributes
190 * - we're specifically interested in a change of size
192 static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
194 struct inode *inode = dentry->d_inode;
195 unsigned int old_ia_valid = ia->ia_valid;
196 int ret = 0;
198 /* by providing our own setattr() method, we skip this quotaism */
199 if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
200 (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
201 ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
203 /* pick out size-changing events */
204 if (ia->ia_valid & ATTR_SIZE) {
205 loff_t size = i_size_read(inode);
206 if (ia->ia_size != size) {
207 ret = ramfs_nommu_resize(inode, ia->ia_size, size);
208 if (ret < 0 || ia->ia_valid == ATTR_SIZE)
209 goto out;
210 } else {
211 /* we skipped the truncate but must still update
212 * timestamps
214 ia->ia_valid |= ATTR_MTIME|ATTR_CTIME;
218 ret = inode_setattr(inode, ia);
219 out:
220 ia->ia_valid = old_ia_valid;
221 return ret;
224 /*****************************************************************************/
226 * try to determine where a shared mapping can be made
227 * - we require that:
228 * - the pages to be mapped must exist
229 * - the pages be physically contiguous in sequence
231 unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
232 unsigned long addr, unsigned long len,
233 unsigned long pgoff, unsigned long flags)
235 unsigned long maxpages, lpages, nr, loop, ret;
236 struct inode *inode = file->f_path.dentry->d_inode;
237 struct page **pages = NULL, **ptr, *page;
238 loff_t isize;
240 if (!(flags & MAP_SHARED))
241 return addr;
243 /* the mapping mustn't extend beyond the EOF */
244 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
245 isize = i_size_read(inode);
247 ret = -EINVAL;
248 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
249 if (pgoff >= maxpages)
250 goto out;
252 if (maxpages - pgoff < lpages)
253 goto out;
255 /* gang-find the pages */
256 ret = -ENOMEM;
257 pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL);
258 if (!pages)
259 goto out;
261 nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages);
262 if (nr != lpages)
263 goto out; /* leave if some pages were missing */
265 /* check the pages for physical adjacency */
266 ptr = pages;
267 page = *ptr++;
268 page++;
269 for (loop = lpages; loop > 1; loop--)
270 if (*ptr++ != page++)
271 goto out;
273 /* okay - all conditions fulfilled */
274 ret = (unsigned long) page_address(pages[0]);
276 out:
277 if (pages) {
278 ptr = pages;
279 for (loop = lpages; loop > 0; loop--)
280 put_page(*ptr++);
281 kfree(pages);
284 return ret;
287 /*****************************************************************************/
289 * set up a mapping for shared memory segments
291 int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
293 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;