[NET] RXRPC: Fix whitespace errors.
[linux-2.6/libata-dev.git] / fs / ramfs / file-nommu.c
blobe9d6c47332826501c2857514c6ba334366856791
1 /* file-nommu.c: no-MMU version of ramfs
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/pagemap.h>
16 #include <linux/highmem.h>
17 #include <linux/init.h>
18 #include <linux/string.h>
19 #include <linux/smp_lock.h>
20 #include <linux/backing-dev.h>
21 #include <linux/ramfs.h>
22 #include <linux/quotaops.h>
23 #include <linux/pagevec.h>
24 #include <linux/mman.h>
26 #include <asm/uaccess.h>
27 #include "internal.h"
29 static int ramfs_nommu_setattr(struct dentry *, struct iattr *);
31 const struct address_space_operations ramfs_aops = {
32 .readpage = simple_readpage,
33 .prepare_write = simple_prepare_write,
34 .commit_write = simple_commit_write,
35 .set_page_dirty = __set_page_dirty_nobuffers,
38 const struct file_operations ramfs_file_operations = {
39 .mmap = ramfs_nommu_mmap,
40 .get_unmapped_area = ramfs_nommu_get_unmapped_area,
41 .read = do_sync_read,
42 .aio_read = generic_file_aio_read,
43 .write = do_sync_write,
44 .aio_write = generic_file_aio_write,
45 .fsync = simple_sync_file,
46 .sendfile = generic_file_sendfile,
47 .llseek = generic_file_llseek,
50 struct inode_operations ramfs_file_inode_operations = {
51 .setattr = ramfs_nommu_setattr,
52 .getattr = simple_getattr,
55 /*****************************************************************************/
57 * add a contiguous set of pages into a ramfs inode when it's truncated from
58 * size 0 on the assumption that it's going to be used for an mmap of shared
59 * memory
61 static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
63 struct pagevec lru_pvec;
64 unsigned long npages, xpages, loop, limit;
65 struct page *pages;
66 unsigned order;
67 void *data;
68 int ret;
70 /* make various checks */
71 order = get_order(newsize);
72 if (unlikely(order >= MAX_ORDER))
73 goto too_big;
75 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
76 if (limit != RLIM_INFINITY && newsize > limit)
77 goto fsize_exceeded;
79 if (newsize > inode->i_sb->s_maxbytes)
80 goto too_big;
82 i_size_write(inode, newsize);
84 /* allocate enough contiguous pages to be able to satisfy the
85 * request */
86 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
87 if (!pages)
88 return -ENOMEM;
90 /* split the high-order page into an array of single pages */
91 xpages = 1UL << order;
92 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
94 split_page(pages, order);
96 /* trim off any pages we don't actually require */
97 for (loop = npages; loop < xpages; loop++)
98 __free_page(pages + loop);
100 /* clear the memory we allocated */
101 newsize = PAGE_SIZE * npages;
102 data = page_address(pages);
103 memset(data, 0, newsize);
105 /* attach all the pages to the inode's address space */
106 pagevec_init(&lru_pvec, 0);
107 for (loop = 0; loop < npages; loop++) {
108 struct page *page = pages + loop;
110 ret = add_to_page_cache(page, inode->i_mapping, loop, GFP_KERNEL);
111 if (ret < 0)
112 goto add_error;
114 if (!pagevec_add(&lru_pvec, page))
115 __pagevec_lru_add(&lru_pvec);
117 unlock_page(page);
120 pagevec_lru_add(&lru_pvec);
121 return 0;
123 fsize_exceeded:
124 send_sig(SIGXFSZ, current, 0);
125 too_big:
126 return -EFBIG;
128 add_error:
129 page_cache_release(pages + loop);
130 for (loop++; loop < npages; loop++)
131 __free_page(pages + loop);
132 return ret;
135 /*****************************************************************************/
137 * check that file shrinkage doesn't leave any VMAs dangling in midair
139 static int ramfs_nommu_check_mappings(struct inode *inode,
140 size_t newsize, size_t size)
142 struct vm_area_struct *vma;
143 struct prio_tree_iter iter;
145 /* search for VMAs that fall within the dead zone */
146 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
147 newsize >> PAGE_SHIFT,
148 (size + PAGE_SIZE - 1) >> PAGE_SHIFT
150 /* found one - only interested if it's shared out of the page
151 * cache */
152 if (vma->vm_flags & VM_SHARED)
153 return -ETXTBSY; /* not quite true, but near enough */
156 return 0;
159 /*****************************************************************************/
163 static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
165 int ret;
167 /* assume a truncate from zero size is going to be for the purposes of
168 * shared mmap */
169 if (size == 0) {
170 if (unlikely(newsize >> 32))
171 return -EFBIG;
173 return ramfs_nommu_expand_for_mapping(inode, newsize);
176 /* check that a decrease in size doesn't cut off any shared mappings */
177 if (newsize < size) {
178 ret = ramfs_nommu_check_mappings(inode, newsize, size);
179 if (ret < 0)
180 return ret;
183 ret = vmtruncate(inode, size);
185 return ret;
188 /*****************************************************************************/
190 * handle a change of attributes
191 * - we're specifically interested in a change of size
193 static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
195 struct inode *inode = dentry->d_inode;
196 unsigned int old_ia_valid = ia->ia_valid;
197 int ret = 0;
199 /* by providing our own setattr() method, we skip this quotaism */
200 if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
201 (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
202 ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
204 /* pick out size-changing events */
205 if (ia->ia_valid & ATTR_SIZE) {
206 loff_t size = i_size_read(inode);
207 if (ia->ia_size != size) {
208 ret = ramfs_nommu_resize(inode, ia->ia_size, size);
209 if (ret < 0 || ia->ia_valid == ATTR_SIZE)
210 goto out;
211 } else {
212 /* we skipped the truncate but must still update
213 * timestamps
215 ia->ia_valid |= ATTR_MTIME|ATTR_CTIME;
219 ret = inode_setattr(inode, ia);
220 out:
221 ia->ia_valid = old_ia_valid;
222 return ret;
225 /*****************************************************************************/
227 * try to determine where a shared mapping can be made
228 * - we require that:
229 * - the pages to be mapped must exist
230 * - the pages be physically contiguous in sequence
232 unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
233 unsigned long addr, unsigned long len,
234 unsigned long pgoff, unsigned long flags)
236 unsigned long maxpages, lpages, nr, loop, ret;
237 struct inode *inode = file->f_path.dentry->d_inode;
238 struct page **pages = NULL, **ptr, *page;
239 loff_t isize;
241 if (!(flags & MAP_SHARED))
242 return addr;
244 /* the mapping mustn't extend beyond the EOF */
245 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
246 isize = i_size_read(inode);
248 ret = -EINVAL;
249 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
250 if (pgoff >= maxpages)
251 goto out;
253 if (maxpages - pgoff < lpages)
254 goto out;
256 /* gang-find the pages */
257 ret = -ENOMEM;
258 pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL);
259 if (!pages)
260 goto out;
262 nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages);
263 if (nr != lpages)
264 goto out; /* leave if some pages were missing */
266 /* check the pages for physical adjacency */
267 ptr = pages;
268 page = *ptr++;
269 page++;
270 for (loop = lpages; loop > 1; loop--)
271 if (*ptr++ != page++)
272 goto out;
274 /* okay - all conditions fulfilled */
275 ret = (unsigned long) page_address(pages[0]);
277 out:
278 if (pages) {
279 ptr = pages;
280 for (loop = lpages; loop > 0; loop--)
281 put_page(*ptr++);
282 kfree(pages);
285 return ret;
288 /*****************************************************************************/
290 * set up a mapping for shared memory segments
292 int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
294 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;