DECNET: Handle a failure in neigh_parms_alloc (take 2)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / mm / shmem.c
blobe25bceda7dd0a4d3d54ff07b4a08f1d93c3bd884
1 /*
2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 * This file is released under the GPL.
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
26 #include <linux/config.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/devfs_fs_kernel.h>
30 #include <linux/fs.h>
31 #include <linux/mm.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/swap.h>
35 #include <linux/pagemap.h>
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/backing-dev.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/mount.h>
41 #include <linux/writeback.h>
42 #include <linux/vfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/security.h>
45 #include <linux/swapops.h>
46 #include <linux/mempolicy.h>
47 #include <linux/namei.h>
48 #include <linux/ctype.h>
49 #include <asm/uaccess.h>
50 #include <asm/div64.h>
51 #include <asm/pgtable.h>
53 /* This magic number is used in glibc for posix shared memory */
54 #define TMPFS_MAGIC 0x01021994
56 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
57 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
58 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
60 #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
61 #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
63 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
65 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
66 #define SHMEM_PAGEIN VM_READ
67 #define SHMEM_TRUNCATE VM_WRITE
69 /* Definition to limit shmem_truncate's steps between cond_rescheds */
70 #define LATENCY_LIMIT 64
72 /* Pretend that each entry is of this size in directory's i_size */
73 #define BOGO_DIRENT_SIZE 20
75 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
76 enum sgp_type {
77 SGP_QUICK, /* don't try more than file page cache lookup */
78 SGP_READ, /* don't exceed i_size, don't allocate page */
79 SGP_CACHE, /* don't exceed i_size, may allocate page */
80 SGP_WRITE, /* may exceed i_size, may allocate page */
83 static int shmem_getpage(struct inode *inode, unsigned long idx,
84 struct page **pagep, enum sgp_type sgp, int *type);
86 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
89 * The above definition of ENTRIES_PER_PAGE, and the use of
90 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
91 * might be reconsidered if it ever diverges from PAGE_SIZE.
93 return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
96 static inline void shmem_dir_free(struct page *page)
98 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
101 static struct page **shmem_dir_map(struct page *page)
103 return (struct page **)kmap_atomic(page, KM_USER0);
106 static inline void shmem_dir_unmap(struct page **dir)
108 kunmap_atomic(dir, KM_USER0);
111 static swp_entry_t *shmem_swp_map(struct page *page)
113 return (swp_entry_t *)kmap_atomic(page, KM_USER1);
116 static inline void shmem_swp_balance_unmap(void)
119 * When passing a pointer to an i_direct entry, to code which
120 * also handles indirect entries and so will shmem_swp_unmap,
121 * we must arrange for the preempt count to remain in balance.
122 * What kmap_atomic of a lowmem page does depends on config
123 * and architecture, so pretend to kmap_atomic some lowmem page.
125 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
128 static inline void shmem_swp_unmap(swp_entry_t *entry)
130 kunmap_atomic(entry, KM_USER1);
133 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
135 return sb->s_fs_info;
139 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
140 * for shared memory and for shared anonymous (/dev/zero) mappings
141 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
142 * consistent with the pre-accounting of private mappings ...
144 static inline int shmem_acct_size(unsigned long flags, loff_t size)
146 return (flags & VM_ACCOUNT)?
147 security_vm_enough_memory(VM_ACCT(size)): 0;
150 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
152 if (flags & VM_ACCOUNT)
153 vm_unacct_memory(VM_ACCT(size));
157 * ... whereas tmpfs objects are accounted incrementally as
158 * pages are allocated, in order to allow huge sparse files.
159 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
160 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
162 static inline int shmem_acct_block(unsigned long flags)
164 return (flags & VM_ACCOUNT)?
165 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
168 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
170 if (!(flags & VM_ACCOUNT))
171 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
174 static struct super_operations shmem_ops;
175 static struct address_space_operations shmem_aops;
176 static struct file_operations shmem_file_operations;
177 static struct inode_operations shmem_inode_operations;
178 static struct inode_operations shmem_dir_inode_operations;
179 static struct vm_operations_struct shmem_vm_ops;
181 static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
182 .ra_pages = 0, /* No readahead */
183 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
184 .unplug_io_fn = default_unplug_io_fn,
187 static LIST_HEAD(shmem_swaplist);
188 static DEFINE_SPINLOCK(shmem_swaplist_lock);
190 static void shmem_free_blocks(struct inode *inode, long pages)
192 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
193 if (sbinfo->max_blocks) {
194 spin_lock(&sbinfo->stat_lock);
195 sbinfo->free_blocks += pages;
196 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
197 spin_unlock(&sbinfo->stat_lock);
202 * shmem_recalc_inode - recalculate the size of an inode
204 * @inode: inode to recalc
206 * We have to calculate the free blocks since the mm can drop
207 * undirtied hole pages behind our back.
209 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
210 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
212 * It has to be called with the spinlock held.
214 static void shmem_recalc_inode(struct inode *inode)
216 struct shmem_inode_info *info = SHMEM_I(inode);
217 long freed;
219 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
220 if (freed > 0) {
221 info->alloced -= freed;
222 shmem_unacct_blocks(info->flags, freed);
223 shmem_free_blocks(inode, freed);
228 * shmem_swp_entry - find the swap vector position in the info structure
230 * @info: info structure for the inode
231 * @index: index of the page to find
232 * @page: optional page to add to the structure. Has to be preset to
233 * all zeros
235 * If there is no space allocated yet it will return NULL when
236 * page is NULL, else it will use the page for the needed block,
237 * setting it to NULL on return to indicate that it has been used.
239 * The swap vector is organized the following way:
241 * There are SHMEM_NR_DIRECT entries directly stored in the
242 * shmem_inode_info structure. So small files do not need an addional
243 * allocation.
245 * For pages with index > SHMEM_NR_DIRECT there is the pointer
246 * i_indirect which points to a page which holds in the first half
247 * doubly indirect blocks, in the second half triple indirect blocks:
249 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
250 * following layout (for SHMEM_NR_DIRECT == 16):
252 * i_indirect -> dir --> 16-19
253 * | +-> 20-23
255 * +-->dir2 --> 24-27
256 * | +-> 28-31
257 * | +-> 32-35
258 * | +-> 36-39
260 * +-->dir3 --> 40-43
261 * +-> 44-47
262 * +-> 48-51
263 * +-> 52-55
265 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
267 unsigned long offset;
268 struct page **dir;
269 struct page *subdir;
271 if (index < SHMEM_NR_DIRECT) {
272 shmem_swp_balance_unmap();
273 return info->i_direct+index;
275 if (!info->i_indirect) {
276 if (page) {
277 info->i_indirect = *page;
278 *page = NULL;
280 return NULL; /* need another page */
283 index -= SHMEM_NR_DIRECT;
284 offset = index % ENTRIES_PER_PAGE;
285 index /= ENTRIES_PER_PAGE;
286 dir = shmem_dir_map(info->i_indirect);
288 if (index >= ENTRIES_PER_PAGE/2) {
289 index -= ENTRIES_PER_PAGE/2;
290 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
291 index %= ENTRIES_PER_PAGE;
292 subdir = *dir;
293 if (!subdir) {
294 if (page) {
295 *dir = *page;
296 *page = NULL;
298 shmem_dir_unmap(dir);
299 return NULL; /* need another page */
301 shmem_dir_unmap(dir);
302 dir = shmem_dir_map(subdir);
305 dir += index;
306 subdir = *dir;
307 if (!subdir) {
308 if (!page || !(subdir = *page)) {
309 shmem_dir_unmap(dir);
310 return NULL; /* need a page */
312 *dir = subdir;
313 *page = NULL;
315 shmem_dir_unmap(dir);
316 return shmem_swp_map(subdir) + offset;
319 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
321 long incdec = value? 1: -1;
323 entry->val = value;
324 info->swapped += incdec;
325 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
326 struct page *page = kmap_atomic_to_page(entry);
327 set_page_private(page, page_private(page) + incdec);
332 * shmem_swp_alloc - get the position of the swap entry for the page.
333 * If it does not exist allocate the entry.
335 * @info: info structure for the inode
336 * @index: index of the page to find
337 * @sgp: check and recheck i_size? skip allocation?
339 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
341 struct inode *inode = &info->vfs_inode;
342 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
343 struct page *page = NULL;
344 swp_entry_t *entry;
346 if (sgp != SGP_WRITE &&
347 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
348 return ERR_PTR(-EINVAL);
350 while (!(entry = shmem_swp_entry(info, index, &page))) {
351 if (sgp == SGP_READ)
352 return shmem_swp_map(ZERO_PAGE(0));
354 * Test free_blocks against 1 not 0, since we have 1 data
355 * page (and perhaps indirect index pages) yet to allocate:
356 * a waste to allocate index if we cannot allocate data.
358 if (sbinfo->max_blocks) {
359 spin_lock(&sbinfo->stat_lock);
360 if (sbinfo->free_blocks <= 1) {
361 spin_unlock(&sbinfo->stat_lock);
362 return ERR_PTR(-ENOSPC);
364 sbinfo->free_blocks--;
365 inode->i_blocks += BLOCKS_PER_PAGE;
366 spin_unlock(&sbinfo->stat_lock);
369 spin_unlock(&info->lock);
370 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
371 if (page)
372 set_page_private(page, 0);
373 spin_lock(&info->lock);
375 if (!page) {
376 shmem_free_blocks(inode, 1);
377 return ERR_PTR(-ENOMEM);
379 if (sgp != SGP_WRITE &&
380 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
381 entry = ERR_PTR(-EINVAL);
382 break;
384 if (info->next_index <= index)
385 info->next_index = index + 1;
387 if (page) {
388 /* another task gave its page, or truncated the file */
389 shmem_free_blocks(inode, 1);
390 shmem_dir_free(page);
392 if (info->next_index <= index && !IS_ERR(entry))
393 info->next_index = index + 1;
394 return entry;
398 * shmem_free_swp - free some swap entries in a directory
400 * @dir: pointer to the directory
401 * @edir: pointer after last entry of the directory
403 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
405 swp_entry_t *ptr;
406 int freed = 0;
408 for (ptr = dir; ptr < edir; ptr++) {
409 if (ptr->val) {
410 free_swap_and_cache(*ptr);
411 *ptr = (swp_entry_t){0};
412 freed++;
415 return freed;
418 static int shmem_map_and_free_swp(struct page *subdir,
419 int offset, int limit, struct page ***dir)
421 swp_entry_t *ptr;
422 int freed = 0;
424 ptr = shmem_swp_map(subdir);
425 for (; offset < limit; offset += LATENCY_LIMIT) {
426 int size = limit - offset;
427 if (size > LATENCY_LIMIT)
428 size = LATENCY_LIMIT;
429 freed += shmem_free_swp(ptr+offset, ptr+offset+size);
430 if (need_resched()) {
431 shmem_swp_unmap(ptr);
432 if (*dir) {
433 shmem_dir_unmap(*dir);
434 *dir = NULL;
436 cond_resched();
437 ptr = shmem_swp_map(subdir);
440 shmem_swp_unmap(ptr);
441 return freed;
444 static void shmem_free_pages(struct list_head *next)
446 struct page *page;
447 int freed = 0;
449 do {
450 page = container_of(next, struct page, lru);
451 next = next->next;
452 shmem_dir_free(page);
453 freed++;
454 if (freed >= LATENCY_LIMIT) {
455 cond_resched();
456 freed = 0;
458 } while (next);
461 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
463 struct shmem_inode_info *info = SHMEM_I(inode);
464 unsigned long idx;
465 unsigned long size;
466 unsigned long limit;
467 unsigned long stage;
468 unsigned long diroff;
469 struct page **dir;
470 struct page *topdir;
471 struct page *middir;
472 struct page *subdir;
473 swp_entry_t *ptr;
474 LIST_HEAD(pages_to_free);
475 long nr_pages_to_free = 0;
476 long nr_swaps_freed = 0;
477 int offset;
478 int freed;
479 int punch_hole = 0;
481 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
482 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
483 if (idx >= info->next_index)
484 return;
486 spin_lock(&info->lock);
487 info->flags |= SHMEM_TRUNCATE;
488 if (likely(end == (loff_t) -1)) {
489 limit = info->next_index;
490 info->next_index = idx;
491 } else {
492 limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
493 if (limit > info->next_index)
494 limit = info->next_index;
495 punch_hole = 1;
498 topdir = info->i_indirect;
499 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
500 info->i_indirect = NULL;
501 nr_pages_to_free++;
502 list_add(&topdir->lru, &pages_to_free);
504 spin_unlock(&info->lock);
506 if (info->swapped && idx < SHMEM_NR_DIRECT) {
507 ptr = info->i_direct;
508 size = limit;
509 if (size > SHMEM_NR_DIRECT)
510 size = SHMEM_NR_DIRECT;
511 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
515 * If there are no indirect blocks or we are punching a hole
516 * below indirect blocks, nothing to be done.
518 if (!topdir || (punch_hole && (limit <= SHMEM_NR_DIRECT)))
519 goto done2;
521 BUG_ON(limit <= SHMEM_NR_DIRECT);
522 limit -= SHMEM_NR_DIRECT;
523 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
524 offset = idx % ENTRIES_PER_PAGE;
525 idx -= offset;
527 dir = shmem_dir_map(topdir);
528 stage = ENTRIES_PER_PAGEPAGE/2;
529 if (idx < ENTRIES_PER_PAGEPAGE/2) {
530 middir = topdir;
531 diroff = idx/ENTRIES_PER_PAGE;
532 } else {
533 dir += ENTRIES_PER_PAGE/2;
534 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
535 while (stage <= idx)
536 stage += ENTRIES_PER_PAGEPAGE;
537 middir = *dir;
538 if (*dir) {
539 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
540 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
541 if (!diroff && !offset) {
542 *dir = NULL;
543 nr_pages_to_free++;
544 list_add(&middir->lru, &pages_to_free);
546 shmem_dir_unmap(dir);
547 dir = shmem_dir_map(middir);
548 } else {
549 diroff = 0;
550 offset = 0;
551 idx = stage;
555 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
556 if (unlikely(idx == stage)) {
557 shmem_dir_unmap(dir);
558 dir = shmem_dir_map(topdir) +
559 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
560 while (!*dir) {
561 dir++;
562 idx += ENTRIES_PER_PAGEPAGE;
563 if (idx >= limit)
564 goto done1;
566 stage = idx + ENTRIES_PER_PAGEPAGE;
567 middir = *dir;
568 *dir = NULL;
569 nr_pages_to_free++;
570 list_add(&middir->lru, &pages_to_free);
571 shmem_dir_unmap(dir);
572 cond_resched();
573 dir = shmem_dir_map(middir);
574 diroff = 0;
576 subdir = dir[diroff];
577 if (subdir && page_private(subdir)) {
578 size = limit - idx;
579 if (size > ENTRIES_PER_PAGE)
580 size = ENTRIES_PER_PAGE;
581 freed = shmem_map_and_free_swp(subdir,
582 offset, size, &dir);
583 if (!dir)
584 dir = shmem_dir_map(middir);
585 nr_swaps_freed += freed;
586 if (offset)
587 spin_lock(&info->lock);
588 set_page_private(subdir, page_private(subdir) - freed);
589 if (offset)
590 spin_unlock(&info->lock);
591 if (!punch_hole)
592 BUG_ON(page_private(subdir) > offset);
594 if (offset)
595 offset = 0;
596 else if (subdir && !page_private(subdir)) {
597 dir[diroff] = NULL;
598 nr_pages_to_free++;
599 list_add(&subdir->lru, &pages_to_free);
602 done1:
603 shmem_dir_unmap(dir);
604 done2:
605 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
607 * Call truncate_inode_pages again: racing shmem_unuse_inode
608 * may have swizzled a page in from swap since vmtruncate or
609 * generic_delete_inode did it, before we lowered next_index.
610 * Also, though shmem_getpage checks i_size before adding to
611 * cache, no recheck after: so fix the narrow window there too.
613 truncate_inode_pages_range(inode->i_mapping, start, end);
616 spin_lock(&info->lock);
617 info->flags &= ~SHMEM_TRUNCATE;
618 info->swapped -= nr_swaps_freed;
619 if (nr_pages_to_free)
620 shmem_free_blocks(inode, nr_pages_to_free);
621 shmem_recalc_inode(inode);
622 spin_unlock(&info->lock);
625 * Empty swap vector directory pages to be freed?
627 if (!list_empty(&pages_to_free)) {
628 pages_to_free.prev->next = NULL;
629 shmem_free_pages(pages_to_free.next);
633 static void shmem_truncate(struct inode *inode)
635 shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
638 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
640 struct inode *inode = dentry->d_inode;
641 struct page *page = NULL;
642 int error;
644 if (attr->ia_valid & ATTR_SIZE) {
645 if (attr->ia_size < inode->i_size) {
647 * If truncating down to a partial page, then
648 * if that page is already allocated, hold it
649 * in memory until the truncation is over, so
650 * truncate_partial_page cannnot miss it were
651 * it assigned to swap.
653 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
654 (void) shmem_getpage(inode,
655 attr->ia_size>>PAGE_CACHE_SHIFT,
656 &page, SGP_READ, NULL);
659 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
660 * detect if any pages might have been added to cache
661 * after truncate_inode_pages. But we needn't bother
662 * if it's being fully truncated to zero-length: the
663 * nrpages check is efficient enough in that case.
665 if (attr->ia_size) {
666 struct shmem_inode_info *info = SHMEM_I(inode);
667 spin_lock(&info->lock);
668 info->flags &= ~SHMEM_PAGEIN;
669 spin_unlock(&info->lock);
674 error = inode_change_ok(inode, attr);
675 if (!error)
676 error = inode_setattr(inode, attr);
677 if (page)
678 page_cache_release(page);
679 return error;
682 static void shmem_delete_inode(struct inode *inode)
684 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
685 struct shmem_inode_info *info = SHMEM_I(inode);
687 if (inode->i_op->truncate == shmem_truncate) {
688 truncate_inode_pages(inode->i_mapping, 0);
689 shmem_unacct_size(info->flags, inode->i_size);
690 inode->i_size = 0;
691 shmem_truncate(inode);
692 if (!list_empty(&info->swaplist)) {
693 spin_lock(&shmem_swaplist_lock);
694 list_del_init(&info->swaplist);
695 spin_unlock(&shmem_swaplist_lock);
698 BUG_ON(inode->i_blocks);
699 if (sbinfo->max_inodes) {
700 spin_lock(&sbinfo->stat_lock);
701 sbinfo->free_inodes++;
702 spin_unlock(&sbinfo->stat_lock);
704 clear_inode(inode);
707 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
709 swp_entry_t *ptr;
711 for (ptr = dir; ptr < edir; ptr++) {
712 if (ptr->val == entry.val)
713 return ptr - dir;
715 return -1;
718 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
720 struct inode *inode;
721 unsigned long idx;
722 unsigned long size;
723 unsigned long limit;
724 unsigned long stage;
725 struct page **dir;
726 struct page *subdir;
727 swp_entry_t *ptr;
728 int offset;
730 idx = 0;
731 ptr = info->i_direct;
732 spin_lock(&info->lock);
733 limit = info->next_index;
734 size = limit;
735 if (size > SHMEM_NR_DIRECT)
736 size = SHMEM_NR_DIRECT;
737 offset = shmem_find_swp(entry, ptr, ptr+size);
738 if (offset >= 0) {
739 shmem_swp_balance_unmap();
740 goto found;
742 if (!info->i_indirect)
743 goto lost2;
745 dir = shmem_dir_map(info->i_indirect);
746 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
748 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
749 if (unlikely(idx == stage)) {
750 shmem_dir_unmap(dir-1);
751 dir = shmem_dir_map(info->i_indirect) +
752 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
753 while (!*dir) {
754 dir++;
755 idx += ENTRIES_PER_PAGEPAGE;
756 if (idx >= limit)
757 goto lost1;
759 stage = idx + ENTRIES_PER_PAGEPAGE;
760 subdir = *dir;
761 shmem_dir_unmap(dir);
762 dir = shmem_dir_map(subdir);
764 subdir = *dir;
765 if (subdir && page_private(subdir)) {
766 ptr = shmem_swp_map(subdir);
767 size = limit - idx;
768 if (size > ENTRIES_PER_PAGE)
769 size = ENTRIES_PER_PAGE;
770 offset = shmem_find_swp(entry, ptr, ptr+size);
771 if (offset >= 0) {
772 shmem_dir_unmap(dir);
773 goto found;
775 shmem_swp_unmap(ptr);
778 lost1:
779 shmem_dir_unmap(dir-1);
780 lost2:
781 spin_unlock(&info->lock);
782 return 0;
783 found:
784 idx += offset;
785 inode = &info->vfs_inode;
786 if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
787 info->flags |= SHMEM_PAGEIN;
788 shmem_swp_set(info, ptr + offset, 0);
790 shmem_swp_unmap(ptr);
791 spin_unlock(&info->lock);
793 * Decrement swap count even when the entry is left behind:
794 * try_to_unuse will skip over mms, then reincrement count.
796 swap_free(entry);
797 return 1;
801 * shmem_unuse() search for an eventually swapped out shmem page.
803 int shmem_unuse(swp_entry_t entry, struct page *page)
805 struct list_head *p, *next;
806 struct shmem_inode_info *info;
807 int found = 0;
809 spin_lock(&shmem_swaplist_lock);
810 list_for_each_safe(p, next, &shmem_swaplist) {
811 info = list_entry(p, struct shmem_inode_info, swaplist);
812 if (!info->swapped)
813 list_del_init(&info->swaplist);
814 else if (shmem_unuse_inode(info, entry, page)) {
815 /* move head to start search for next from here */
816 list_move_tail(&shmem_swaplist, &info->swaplist);
817 found = 1;
818 break;
821 spin_unlock(&shmem_swaplist_lock);
822 return found;
826 * Move the page from the page cache to the swap cache.
828 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
830 struct shmem_inode_info *info;
831 swp_entry_t *entry, swap;
832 struct address_space *mapping;
833 unsigned long index;
834 struct inode *inode;
836 BUG_ON(!PageLocked(page));
837 BUG_ON(page_mapped(page));
839 mapping = page->mapping;
840 index = page->index;
841 inode = mapping->host;
842 info = SHMEM_I(inode);
843 if (info->flags & VM_LOCKED)
844 goto redirty;
845 swap = get_swap_page();
846 if (!swap.val)
847 goto redirty;
849 spin_lock(&info->lock);
850 shmem_recalc_inode(inode);
851 if (index >= info->next_index) {
852 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
853 goto unlock;
855 entry = shmem_swp_entry(info, index, NULL);
856 BUG_ON(!entry);
857 BUG_ON(entry->val);
859 if (move_to_swap_cache(page, swap) == 0) {
860 shmem_swp_set(info, entry, swap.val);
861 shmem_swp_unmap(entry);
862 spin_unlock(&info->lock);
863 if (list_empty(&info->swaplist)) {
864 spin_lock(&shmem_swaplist_lock);
865 /* move instead of add in case we're racing */
866 list_move_tail(&info->swaplist, &shmem_swaplist);
867 spin_unlock(&shmem_swaplist_lock);
869 unlock_page(page);
870 return 0;
873 shmem_swp_unmap(entry);
874 unlock:
875 spin_unlock(&info->lock);
876 swap_free(swap);
877 redirty:
878 set_page_dirty(page);
879 return AOP_WRITEPAGE_ACTIVATE; /* Return with the page locked */
882 #ifdef CONFIG_NUMA
883 static int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
885 char *nodelist = strchr(value, ':');
886 int err = 1;
888 if (nodelist) {
889 /* NUL-terminate policy string */
890 *nodelist++ = '\0';
891 if (nodelist_parse(nodelist, *policy_nodes))
892 goto out;
894 if (!strcmp(value, "default")) {
895 *policy = MPOL_DEFAULT;
896 /* Don't allow a nodelist */
897 if (!nodelist)
898 err = 0;
899 } else if (!strcmp(value, "prefer")) {
900 *policy = MPOL_PREFERRED;
901 /* Insist on a nodelist of one node only */
902 if (nodelist) {
903 char *rest = nodelist;
904 while (isdigit(*rest))
905 rest++;
906 if (!*rest)
907 err = 0;
909 } else if (!strcmp(value, "bind")) {
910 *policy = MPOL_BIND;
911 /* Insist on a nodelist */
912 if (nodelist)
913 err = 0;
914 } else if (!strcmp(value, "interleave")) {
915 *policy = MPOL_INTERLEAVE;
916 /* Default to nodes online if no nodelist */
917 if (!nodelist)
918 *policy_nodes = node_online_map;
919 err = 0;
921 out:
922 /* Restore string for error message */
923 if (nodelist)
924 *--nodelist = ':';
925 return err;
928 static struct page *shmem_swapin_async(struct shared_policy *p,
929 swp_entry_t entry, unsigned long idx)
931 struct page *page;
932 struct vm_area_struct pvma;
934 /* Create a pseudo vma that just contains the policy */
935 memset(&pvma, 0, sizeof(struct vm_area_struct));
936 pvma.vm_end = PAGE_SIZE;
937 pvma.vm_pgoff = idx;
938 pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
939 page = read_swap_cache_async(entry, &pvma, 0);
940 mpol_free(pvma.vm_policy);
941 return page;
944 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
945 unsigned long idx)
947 struct shared_policy *p = &info->policy;
948 int i, num;
949 struct page *page;
950 unsigned long offset;
952 num = valid_swaphandles(entry, &offset);
953 for (i = 0; i < num; offset++, i++) {
954 page = shmem_swapin_async(p,
955 swp_entry(swp_type(entry), offset), idx);
956 if (!page)
957 break;
958 page_cache_release(page);
960 lru_add_drain(); /* Push any new pages onto the LRU now */
961 return shmem_swapin_async(p, entry, idx);
964 static struct page *
965 shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
966 unsigned long idx)
968 struct vm_area_struct pvma;
969 struct page *page;
971 memset(&pvma, 0, sizeof(struct vm_area_struct));
972 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
973 pvma.vm_pgoff = idx;
974 pvma.vm_end = PAGE_SIZE;
975 page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
976 mpol_free(pvma.vm_policy);
977 return page;
979 #else
980 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
982 return 1;
985 static inline struct page *
986 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
988 swapin_readahead(entry, 0, NULL);
989 return read_swap_cache_async(entry, NULL, 0);
992 static inline struct page *
993 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
995 return alloc_page(gfp | __GFP_ZERO);
997 #endif
1000 * shmem_getpage - either get the page from swap or allocate a new one
1002 * If we allocate a new one we do not mark it dirty. That's up to the
1003 * vm. If we swap it in we mark it dirty since we also free the swap
1004 * entry since a page cannot live in both the swap and page cache
1006 static int shmem_getpage(struct inode *inode, unsigned long idx,
1007 struct page **pagep, enum sgp_type sgp, int *type)
1009 struct address_space *mapping = inode->i_mapping;
1010 struct shmem_inode_info *info = SHMEM_I(inode);
1011 struct shmem_sb_info *sbinfo;
1012 struct page *filepage = *pagep;
1013 struct page *swappage;
1014 swp_entry_t *entry;
1015 swp_entry_t swap;
1016 int error;
1018 if (idx >= SHMEM_MAX_INDEX)
1019 return -EFBIG;
1021 * Normally, filepage is NULL on entry, and either found
1022 * uptodate immediately, or allocated and zeroed, or read
1023 * in under swappage, which is then assigned to filepage.
1024 * But shmem_prepare_write passes in a locked filepage,
1025 * which may be found not uptodate by other callers too,
1026 * and may need to be copied from the swappage read in.
1028 repeat:
1029 if (!filepage)
1030 filepage = find_lock_page(mapping, idx);
1031 if (filepage && PageUptodate(filepage))
1032 goto done;
1033 error = 0;
1034 if (sgp == SGP_QUICK)
1035 goto failed;
1037 spin_lock(&info->lock);
1038 shmem_recalc_inode(inode);
1039 entry = shmem_swp_alloc(info, idx, sgp);
1040 if (IS_ERR(entry)) {
1041 spin_unlock(&info->lock);
1042 error = PTR_ERR(entry);
1043 goto failed;
1045 swap = *entry;
1047 if (swap.val) {
1048 /* Look it up and read it in.. */
1049 swappage = lookup_swap_cache(swap);
1050 if (!swappage) {
1051 shmem_swp_unmap(entry);
1052 spin_unlock(&info->lock);
1053 /* here we actually do the io */
1054 if (type && *type == VM_FAULT_MINOR) {
1055 inc_page_state(pgmajfault);
1056 *type = VM_FAULT_MAJOR;
1058 swappage = shmem_swapin(info, swap, idx);
1059 if (!swappage) {
1060 spin_lock(&info->lock);
1061 entry = shmem_swp_alloc(info, idx, sgp);
1062 if (IS_ERR(entry))
1063 error = PTR_ERR(entry);
1064 else {
1065 if (entry->val == swap.val)
1066 error = -ENOMEM;
1067 shmem_swp_unmap(entry);
1069 spin_unlock(&info->lock);
1070 if (error)
1071 goto failed;
1072 goto repeat;
1074 wait_on_page_locked(swappage);
1075 page_cache_release(swappage);
1076 goto repeat;
1079 /* We have to do this with page locked to prevent races */
1080 if (TestSetPageLocked(swappage)) {
1081 shmem_swp_unmap(entry);
1082 spin_unlock(&info->lock);
1083 wait_on_page_locked(swappage);
1084 page_cache_release(swappage);
1085 goto repeat;
1087 if (!PageSwapCache(swappage)) {
1088 /* Page migration has occured */
1089 shmem_swp_unmap(entry);
1090 spin_unlock(&info->lock);
1091 unlock_page(swappage);
1092 page_cache_release(swappage);
1093 goto repeat;
1095 if (PageWriteback(swappage)) {
1096 shmem_swp_unmap(entry);
1097 spin_unlock(&info->lock);
1098 wait_on_page_writeback(swappage);
1099 unlock_page(swappage);
1100 page_cache_release(swappage);
1101 goto repeat;
1103 if (!PageUptodate(swappage)) {
1104 shmem_swp_unmap(entry);
1105 spin_unlock(&info->lock);
1106 unlock_page(swappage);
1107 page_cache_release(swappage);
1108 error = -EIO;
1109 goto failed;
1112 if (filepage) {
1113 shmem_swp_set(info, entry, 0);
1114 shmem_swp_unmap(entry);
1115 delete_from_swap_cache(swappage);
1116 spin_unlock(&info->lock);
1117 copy_highpage(filepage, swappage);
1118 unlock_page(swappage);
1119 page_cache_release(swappage);
1120 flush_dcache_page(filepage);
1121 SetPageUptodate(filepage);
1122 set_page_dirty(filepage);
1123 swap_free(swap);
1124 } else if (!(error = move_from_swap_cache(
1125 swappage, idx, mapping))) {
1126 info->flags |= SHMEM_PAGEIN;
1127 shmem_swp_set(info, entry, 0);
1128 shmem_swp_unmap(entry);
1129 spin_unlock(&info->lock);
1130 filepage = swappage;
1131 swap_free(swap);
1132 } else {
1133 shmem_swp_unmap(entry);
1134 spin_unlock(&info->lock);
1135 unlock_page(swappage);
1136 page_cache_release(swappage);
1137 if (error == -ENOMEM) {
1138 /* let kswapd refresh zone for GFP_ATOMICs */
1139 blk_congestion_wait(WRITE, HZ/50);
1141 goto repeat;
1143 } else if (sgp == SGP_READ && !filepage) {
1144 shmem_swp_unmap(entry);
1145 filepage = find_get_page(mapping, idx);
1146 if (filepage &&
1147 (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1148 spin_unlock(&info->lock);
1149 wait_on_page_locked(filepage);
1150 page_cache_release(filepage);
1151 filepage = NULL;
1152 goto repeat;
1154 spin_unlock(&info->lock);
1155 } else {
1156 shmem_swp_unmap(entry);
1157 sbinfo = SHMEM_SB(inode->i_sb);
1158 if (sbinfo->max_blocks) {
1159 spin_lock(&sbinfo->stat_lock);
1160 if (sbinfo->free_blocks == 0 ||
1161 shmem_acct_block(info->flags)) {
1162 spin_unlock(&sbinfo->stat_lock);
1163 spin_unlock(&info->lock);
1164 error = -ENOSPC;
1165 goto failed;
1167 sbinfo->free_blocks--;
1168 inode->i_blocks += BLOCKS_PER_PAGE;
1169 spin_unlock(&sbinfo->stat_lock);
1170 } else if (shmem_acct_block(info->flags)) {
1171 spin_unlock(&info->lock);
1172 error = -ENOSPC;
1173 goto failed;
1176 if (!filepage) {
1177 spin_unlock(&info->lock);
1178 filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1179 info,
1180 idx);
1181 if (!filepage) {
1182 shmem_unacct_blocks(info->flags, 1);
1183 shmem_free_blocks(inode, 1);
1184 error = -ENOMEM;
1185 goto failed;
1188 spin_lock(&info->lock);
1189 entry = shmem_swp_alloc(info, idx, sgp);
1190 if (IS_ERR(entry))
1191 error = PTR_ERR(entry);
1192 else {
1193 swap = *entry;
1194 shmem_swp_unmap(entry);
1196 if (error || swap.val || 0 != add_to_page_cache_lru(
1197 filepage, mapping, idx, GFP_ATOMIC)) {
1198 spin_unlock(&info->lock);
1199 page_cache_release(filepage);
1200 shmem_unacct_blocks(info->flags, 1);
1201 shmem_free_blocks(inode, 1);
1202 filepage = NULL;
1203 if (error)
1204 goto failed;
1205 goto repeat;
1207 info->flags |= SHMEM_PAGEIN;
1210 info->alloced++;
1211 spin_unlock(&info->lock);
1212 flush_dcache_page(filepage);
1213 SetPageUptodate(filepage);
1215 done:
1216 if (*pagep != filepage) {
1217 unlock_page(filepage);
1218 *pagep = filepage;
1220 return 0;
1222 failed:
1223 if (*pagep != filepage) {
1224 unlock_page(filepage);
1225 page_cache_release(filepage);
1227 return error;
1230 struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
1232 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1233 struct page *page = NULL;
1234 unsigned long idx;
1235 int error;
1237 idx = (address - vma->vm_start) >> PAGE_SHIFT;
1238 idx += vma->vm_pgoff;
1239 idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1240 if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1241 return NOPAGE_SIGBUS;
1243 error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1244 if (error)
1245 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1247 mark_page_accessed(page);
1248 return page;
1251 static int shmem_populate(struct vm_area_struct *vma,
1252 unsigned long addr, unsigned long len,
1253 pgprot_t prot, unsigned long pgoff, int nonblock)
1255 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1256 struct mm_struct *mm = vma->vm_mm;
1257 enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1258 unsigned long size;
1260 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1261 if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1262 return -EINVAL;
1264 while ((long) len > 0) {
1265 struct page *page = NULL;
1266 int err;
1268 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1270 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1271 if (err)
1272 return err;
1273 /* Page may still be null, but only if nonblock was set. */
1274 if (page) {
1275 mark_page_accessed(page);
1276 err = install_page(mm, vma, addr, page, prot);
1277 if (err) {
1278 page_cache_release(page);
1279 return err;
1281 } else if (vma->vm_flags & VM_NONLINEAR) {
1282 /* No page was found just because we can't read it in
1283 * now (being here implies nonblock != 0), but the page
1284 * may exist, so set the PTE to fault it in later. */
1285 err = install_file_pte(mm, vma, addr, pgoff, prot);
1286 if (err)
1287 return err;
1290 len -= PAGE_SIZE;
1291 addr += PAGE_SIZE;
1292 pgoff++;
1294 return 0;
1297 #ifdef CONFIG_NUMA
1298 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1300 struct inode *i = vma->vm_file->f_dentry->d_inode;
1301 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1304 struct mempolicy *
1305 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1307 struct inode *i = vma->vm_file->f_dentry->d_inode;
1308 unsigned long idx;
1310 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1311 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1313 #endif
1315 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1317 struct inode *inode = file->f_dentry->d_inode;
1318 struct shmem_inode_info *info = SHMEM_I(inode);
1319 int retval = -ENOMEM;
1321 spin_lock(&info->lock);
1322 if (lock && !(info->flags & VM_LOCKED)) {
1323 if (!user_shm_lock(inode->i_size, user))
1324 goto out_nomem;
1325 info->flags |= VM_LOCKED;
1327 if (!lock && (info->flags & VM_LOCKED) && user) {
1328 user_shm_unlock(inode->i_size, user);
1329 info->flags &= ~VM_LOCKED;
1331 retval = 0;
1332 out_nomem:
1333 spin_unlock(&info->lock);
1334 return retval;
1337 int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1339 file_accessed(file);
1340 vma->vm_ops = &shmem_vm_ops;
1341 return 0;
1344 static struct inode *
1345 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1347 struct inode *inode;
1348 struct shmem_inode_info *info;
1349 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1351 if (sbinfo->max_inodes) {
1352 spin_lock(&sbinfo->stat_lock);
1353 if (!sbinfo->free_inodes) {
1354 spin_unlock(&sbinfo->stat_lock);
1355 return NULL;
1357 sbinfo->free_inodes--;
1358 spin_unlock(&sbinfo->stat_lock);
1361 inode = new_inode(sb);
1362 if (inode) {
1363 inode->i_mode = mode;
1364 inode->i_uid = current->fsuid;
1365 inode->i_gid = current->fsgid;
1366 inode->i_blksize = PAGE_CACHE_SIZE;
1367 inode->i_blocks = 0;
1368 inode->i_mapping->a_ops = &shmem_aops;
1369 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1370 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1371 info = SHMEM_I(inode);
1372 memset(info, 0, (char *)inode - (char *)info);
1373 spin_lock_init(&info->lock);
1374 INIT_LIST_HEAD(&info->swaplist);
1376 switch (mode & S_IFMT) {
1377 default:
1378 init_special_inode(inode, mode, dev);
1379 break;
1380 case S_IFREG:
1381 inode->i_op = &shmem_inode_operations;
1382 inode->i_fop = &shmem_file_operations;
1383 mpol_shared_policy_init(&info->policy, sbinfo->policy,
1384 &sbinfo->policy_nodes);
1385 break;
1386 case S_IFDIR:
1387 inode->i_nlink++;
1388 /* Some things misbehave if size == 0 on a directory */
1389 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1390 inode->i_op = &shmem_dir_inode_operations;
1391 inode->i_fop = &simple_dir_operations;
1392 break;
1393 case S_IFLNK:
1395 * Must not load anything in the rbtree,
1396 * mpol_free_shared_policy will not be called.
1398 mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1399 NULL);
1400 break;
1402 } else if (sbinfo->max_inodes) {
1403 spin_lock(&sbinfo->stat_lock);
1404 sbinfo->free_inodes++;
1405 spin_unlock(&sbinfo->stat_lock);
1407 return inode;
1410 #ifdef CONFIG_TMPFS
1411 static struct inode_operations shmem_symlink_inode_operations;
1412 static struct inode_operations shmem_symlink_inline_operations;
1415 * Normally tmpfs makes no use of shmem_prepare_write, but it
1416 * lets a tmpfs file be used read-write below the loop driver.
1418 static int
1419 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1421 struct inode *inode = page->mapping->host;
1422 return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1425 static ssize_t
1426 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1428 struct inode *inode = file->f_dentry->d_inode;
1429 loff_t pos;
1430 unsigned long written;
1431 ssize_t err;
1433 if ((ssize_t) count < 0)
1434 return -EINVAL;
1436 if (!access_ok(VERIFY_READ, buf, count))
1437 return -EFAULT;
1439 mutex_lock(&inode->i_mutex);
1441 pos = *ppos;
1442 written = 0;
1444 err = generic_write_checks(file, &pos, &count, 0);
1445 if (err || !count)
1446 goto out;
1448 err = remove_suid(file->f_dentry);
1449 if (err)
1450 goto out;
1452 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1454 do {
1455 struct page *page = NULL;
1456 unsigned long bytes, index, offset;
1457 char *kaddr;
1458 int left;
1460 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1461 index = pos >> PAGE_CACHE_SHIFT;
1462 bytes = PAGE_CACHE_SIZE - offset;
1463 if (bytes > count)
1464 bytes = count;
1467 * We don't hold page lock across copy from user -
1468 * what would it guard against? - so no deadlock here.
1469 * But it still may be a good idea to prefault below.
1472 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1473 if (err)
1474 break;
1476 left = bytes;
1477 if (PageHighMem(page)) {
1478 volatile unsigned char dummy;
1479 __get_user(dummy, buf);
1480 __get_user(dummy, buf + bytes - 1);
1482 kaddr = kmap_atomic(page, KM_USER0);
1483 left = __copy_from_user_inatomic(kaddr + offset,
1484 buf, bytes);
1485 kunmap_atomic(kaddr, KM_USER0);
1487 if (left) {
1488 kaddr = kmap(page);
1489 left = __copy_from_user(kaddr + offset, buf, bytes);
1490 kunmap(page);
1493 written += bytes;
1494 count -= bytes;
1495 pos += bytes;
1496 buf += bytes;
1497 if (pos > inode->i_size)
1498 i_size_write(inode, pos);
1500 flush_dcache_page(page);
1501 set_page_dirty(page);
1502 mark_page_accessed(page);
1503 page_cache_release(page);
1505 if (left) {
1506 pos -= left;
1507 written -= left;
1508 err = -EFAULT;
1509 break;
1513 * Our dirty pages are not counted in nr_dirty,
1514 * and we do not attempt to balance dirty pages.
1517 cond_resched();
1518 } while (count);
1520 *ppos = pos;
1521 if (written)
1522 err = written;
1523 out:
1524 mutex_unlock(&inode->i_mutex);
1525 return err;
1528 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1530 struct inode *inode = filp->f_dentry->d_inode;
1531 struct address_space *mapping = inode->i_mapping;
1532 unsigned long index, offset;
1534 index = *ppos >> PAGE_CACHE_SHIFT;
1535 offset = *ppos & ~PAGE_CACHE_MASK;
1537 for (;;) {
1538 struct page *page = NULL;
1539 unsigned long end_index, nr, ret;
1540 loff_t i_size = i_size_read(inode);
1542 end_index = i_size >> PAGE_CACHE_SHIFT;
1543 if (index > end_index)
1544 break;
1545 if (index == end_index) {
1546 nr = i_size & ~PAGE_CACHE_MASK;
1547 if (nr <= offset)
1548 break;
1551 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1552 if (desc->error) {
1553 if (desc->error == -EINVAL)
1554 desc->error = 0;
1555 break;
1559 * We must evaluate after, since reads (unlike writes)
1560 * are called without i_mutex protection against truncate
1562 nr = PAGE_CACHE_SIZE;
1563 i_size = i_size_read(inode);
1564 end_index = i_size >> PAGE_CACHE_SHIFT;
1565 if (index == end_index) {
1566 nr = i_size & ~PAGE_CACHE_MASK;
1567 if (nr <= offset) {
1568 if (page)
1569 page_cache_release(page);
1570 break;
1573 nr -= offset;
1575 if (page) {
1577 * If users can be writing to this page using arbitrary
1578 * virtual addresses, take care about potential aliasing
1579 * before reading the page on the kernel side.
1581 if (mapping_writably_mapped(mapping))
1582 flush_dcache_page(page);
1584 * Mark the page accessed if we read the beginning.
1586 if (!offset)
1587 mark_page_accessed(page);
1588 } else {
1589 page = ZERO_PAGE(0);
1590 page_cache_get(page);
1594 * Ok, we have the page, and it's up-to-date, so
1595 * now we can copy it to user space...
1597 * The actor routine returns how many bytes were actually used..
1598 * NOTE! This may not be the same as how much of a user buffer
1599 * we filled up (we may be padding etc), so we can only update
1600 * "pos" here (the actor routine has to update the user buffer
1601 * pointers and the remaining count).
1603 ret = actor(desc, page, offset, nr);
1604 offset += ret;
1605 index += offset >> PAGE_CACHE_SHIFT;
1606 offset &= ~PAGE_CACHE_MASK;
1608 page_cache_release(page);
1609 if (ret != nr || !desc->count)
1610 break;
1612 cond_resched();
1615 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1616 file_accessed(filp);
1619 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1621 read_descriptor_t desc;
1623 if ((ssize_t) count < 0)
1624 return -EINVAL;
1625 if (!access_ok(VERIFY_WRITE, buf, count))
1626 return -EFAULT;
1627 if (!count)
1628 return 0;
1630 desc.written = 0;
1631 desc.count = count;
1632 desc.arg.buf = buf;
1633 desc.error = 0;
1635 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1636 if (desc.written)
1637 return desc.written;
1638 return desc.error;
1641 static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1642 size_t count, read_actor_t actor, void *target)
1644 read_descriptor_t desc;
1646 if (!count)
1647 return 0;
1649 desc.written = 0;
1650 desc.count = count;
1651 desc.arg.data = target;
1652 desc.error = 0;
1654 do_shmem_file_read(in_file, ppos, &desc, actor);
1655 if (desc.written)
1656 return desc.written;
1657 return desc.error;
1660 static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
1662 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1664 buf->f_type = TMPFS_MAGIC;
1665 buf->f_bsize = PAGE_CACHE_SIZE;
1666 buf->f_namelen = NAME_MAX;
1667 spin_lock(&sbinfo->stat_lock);
1668 if (sbinfo->max_blocks) {
1669 buf->f_blocks = sbinfo->max_blocks;
1670 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1672 if (sbinfo->max_inodes) {
1673 buf->f_files = sbinfo->max_inodes;
1674 buf->f_ffree = sbinfo->free_inodes;
1676 /* else leave those fields 0 like simple_statfs */
1677 spin_unlock(&sbinfo->stat_lock);
1678 return 0;
1682 * File creation. Allocate an inode, and we're done..
1684 static int
1685 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1687 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1688 int error = -ENOSPC;
1690 if (inode) {
1691 error = security_inode_init_security(inode, dir, NULL, NULL,
1692 NULL);
1693 if (error) {
1694 if (error != -EOPNOTSUPP) {
1695 iput(inode);
1696 return error;
1698 error = 0;
1700 if (dir->i_mode & S_ISGID) {
1701 inode->i_gid = dir->i_gid;
1702 if (S_ISDIR(mode))
1703 inode->i_mode |= S_ISGID;
1705 dir->i_size += BOGO_DIRENT_SIZE;
1706 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1707 d_instantiate(dentry, inode);
1708 dget(dentry); /* Extra count - pin the dentry in core */
1710 return error;
1713 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1715 int error;
1717 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1718 return error;
1719 dir->i_nlink++;
1720 return 0;
1723 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1724 struct nameidata *nd)
1726 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1730 * Link a file..
1732 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1734 struct inode *inode = old_dentry->d_inode;
1735 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1738 * No ordinary (disk based) filesystem counts links as inodes;
1739 * but each new link needs a new dentry, pinning lowmem, and
1740 * tmpfs dentries cannot be pruned until they are unlinked.
1742 if (sbinfo->max_inodes) {
1743 spin_lock(&sbinfo->stat_lock);
1744 if (!sbinfo->free_inodes) {
1745 spin_unlock(&sbinfo->stat_lock);
1746 return -ENOSPC;
1748 sbinfo->free_inodes--;
1749 spin_unlock(&sbinfo->stat_lock);
1752 dir->i_size += BOGO_DIRENT_SIZE;
1753 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1754 inode->i_nlink++;
1755 atomic_inc(&inode->i_count); /* New dentry reference */
1756 dget(dentry); /* Extra pinning count for the created dentry */
1757 d_instantiate(dentry, inode);
1758 return 0;
1761 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1763 struct inode *inode = dentry->d_inode;
1765 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1766 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1767 if (sbinfo->max_inodes) {
1768 spin_lock(&sbinfo->stat_lock);
1769 sbinfo->free_inodes++;
1770 spin_unlock(&sbinfo->stat_lock);
1774 dir->i_size -= BOGO_DIRENT_SIZE;
1775 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1776 inode->i_nlink--;
1777 dput(dentry); /* Undo the count from "create" - this does all the work */
1778 return 0;
1781 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1783 if (!simple_empty(dentry))
1784 return -ENOTEMPTY;
1786 dir->i_nlink--;
1787 return shmem_unlink(dir, dentry);
1791 * The VFS layer already does all the dentry stuff for rename,
1792 * we just have to decrement the usage count for the target if
1793 * it exists so that the VFS layer correctly free's it when it
1794 * gets overwritten.
1796 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1798 struct inode *inode = old_dentry->d_inode;
1799 int they_are_dirs = S_ISDIR(inode->i_mode);
1801 if (!simple_empty(new_dentry))
1802 return -ENOTEMPTY;
1804 if (new_dentry->d_inode) {
1805 (void) shmem_unlink(new_dir, new_dentry);
1806 if (they_are_dirs)
1807 old_dir->i_nlink--;
1808 } else if (they_are_dirs) {
1809 old_dir->i_nlink--;
1810 new_dir->i_nlink++;
1813 old_dir->i_size -= BOGO_DIRENT_SIZE;
1814 new_dir->i_size += BOGO_DIRENT_SIZE;
1815 old_dir->i_ctime = old_dir->i_mtime =
1816 new_dir->i_ctime = new_dir->i_mtime =
1817 inode->i_ctime = CURRENT_TIME;
1818 return 0;
1821 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1823 int error;
1824 int len;
1825 struct inode *inode;
1826 struct page *page = NULL;
1827 char *kaddr;
1828 struct shmem_inode_info *info;
1830 len = strlen(symname) + 1;
1831 if (len > PAGE_CACHE_SIZE)
1832 return -ENAMETOOLONG;
1834 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1835 if (!inode)
1836 return -ENOSPC;
1838 error = security_inode_init_security(inode, dir, NULL, NULL,
1839 NULL);
1840 if (error) {
1841 if (error != -EOPNOTSUPP) {
1842 iput(inode);
1843 return error;
1845 error = 0;
1848 info = SHMEM_I(inode);
1849 inode->i_size = len-1;
1850 if (len <= (char *)inode - (char *)info) {
1851 /* do it inline */
1852 memcpy(info, symname, len);
1853 inode->i_op = &shmem_symlink_inline_operations;
1854 } else {
1855 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1856 if (error) {
1857 iput(inode);
1858 return error;
1860 inode->i_op = &shmem_symlink_inode_operations;
1861 kaddr = kmap_atomic(page, KM_USER0);
1862 memcpy(kaddr, symname, len);
1863 kunmap_atomic(kaddr, KM_USER0);
1864 set_page_dirty(page);
1865 page_cache_release(page);
1867 if (dir->i_mode & S_ISGID)
1868 inode->i_gid = dir->i_gid;
1869 dir->i_size += BOGO_DIRENT_SIZE;
1870 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1871 d_instantiate(dentry, inode);
1872 dget(dentry);
1873 return 0;
1876 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1878 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1879 return NULL;
1882 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1884 struct page *page = NULL;
1885 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1886 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1887 return page;
1890 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1892 if (!IS_ERR(nd_get_link(nd))) {
1893 struct page *page = cookie;
1894 kunmap(page);
1895 mark_page_accessed(page);
1896 page_cache_release(page);
1900 static struct inode_operations shmem_symlink_inline_operations = {
1901 .readlink = generic_readlink,
1902 .follow_link = shmem_follow_link_inline,
1905 static struct inode_operations shmem_symlink_inode_operations = {
1906 .truncate = shmem_truncate,
1907 .readlink = generic_readlink,
1908 .follow_link = shmem_follow_link,
1909 .put_link = shmem_put_link,
1912 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
1913 gid_t *gid, unsigned long *blocks, unsigned long *inodes,
1914 int *policy, nodemask_t *policy_nodes)
1916 char *this_char, *value, *rest;
1918 while (options != NULL) {
1919 this_char = options;
1920 for (;;) {
1922 * NUL-terminate this option: unfortunately,
1923 * mount options form a comma-separated list,
1924 * but mpol's nodelist may also contain commas.
1926 options = strchr(options, ',');
1927 if (options == NULL)
1928 break;
1929 options++;
1930 if (!isdigit(*options)) {
1931 options[-1] = '\0';
1932 break;
1935 if (!*this_char)
1936 continue;
1937 if ((value = strchr(this_char,'=')) != NULL) {
1938 *value++ = 0;
1939 } else {
1940 printk(KERN_ERR
1941 "tmpfs: No value for mount option '%s'\n",
1942 this_char);
1943 return 1;
1946 if (!strcmp(this_char,"size")) {
1947 unsigned long long size;
1948 size = memparse(value,&rest);
1949 if (*rest == '%') {
1950 size <<= PAGE_SHIFT;
1951 size *= totalram_pages;
1952 do_div(size, 100);
1953 rest++;
1955 if (*rest)
1956 goto bad_val;
1957 *blocks = size >> PAGE_CACHE_SHIFT;
1958 } else if (!strcmp(this_char,"nr_blocks")) {
1959 *blocks = memparse(value,&rest);
1960 if (*rest)
1961 goto bad_val;
1962 } else if (!strcmp(this_char,"nr_inodes")) {
1963 *inodes = memparse(value,&rest);
1964 if (*rest)
1965 goto bad_val;
1966 } else if (!strcmp(this_char,"mode")) {
1967 if (!mode)
1968 continue;
1969 *mode = simple_strtoul(value,&rest,8);
1970 if (*rest)
1971 goto bad_val;
1972 } else if (!strcmp(this_char,"uid")) {
1973 if (!uid)
1974 continue;
1975 *uid = simple_strtoul(value,&rest,0);
1976 if (*rest)
1977 goto bad_val;
1978 } else if (!strcmp(this_char,"gid")) {
1979 if (!gid)
1980 continue;
1981 *gid = simple_strtoul(value,&rest,0);
1982 if (*rest)
1983 goto bad_val;
1984 } else if (!strcmp(this_char,"mpol")) {
1985 if (shmem_parse_mpol(value,policy,policy_nodes))
1986 goto bad_val;
1987 } else {
1988 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1989 this_char);
1990 return 1;
1993 return 0;
1995 bad_val:
1996 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
1997 value, this_char);
1998 return 1;
2002 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2004 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2005 unsigned long max_blocks = sbinfo->max_blocks;
2006 unsigned long max_inodes = sbinfo->max_inodes;
2007 int policy = sbinfo->policy;
2008 nodemask_t policy_nodes = sbinfo->policy_nodes;
2009 unsigned long blocks;
2010 unsigned long inodes;
2011 int error = -EINVAL;
2013 if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2014 &max_inodes, &policy, &policy_nodes))
2015 return error;
2017 spin_lock(&sbinfo->stat_lock);
2018 blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2019 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2020 if (max_blocks < blocks)
2021 goto out;
2022 if (max_inodes < inodes)
2023 goto out;
2025 * Those tests also disallow limited->unlimited while any are in
2026 * use, so i_blocks will always be zero when max_blocks is zero;
2027 * but we must separately disallow unlimited->limited, because
2028 * in that case we have no record of how much is already in use.
2030 if (max_blocks && !sbinfo->max_blocks)
2031 goto out;
2032 if (max_inodes && !sbinfo->max_inodes)
2033 goto out;
2035 error = 0;
2036 sbinfo->max_blocks = max_blocks;
2037 sbinfo->free_blocks = max_blocks - blocks;
2038 sbinfo->max_inodes = max_inodes;
2039 sbinfo->free_inodes = max_inodes - inodes;
2040 sbinfo->policy = policy;
2041 sbinfo->policy_nodes = policy_nodes;
2042 out:
2043 spin_unlock(&sbinfo->stat_lock);
2044 return error;
2046 #endif
2048 static void shmem_put_super(struct super_block *sb)
2050 kfree(sb->s_fs_info);
2051 sb->s_fs_info = NULL;
2054 static int shmem_fill_super(struct super_block *sb,
2055 void *data, int silent)
2057 struct inode *inode;
2058 struct dentry *root;
2059 int mode = S_IRWXUGO | S_ISVTX;
2060 uid_t uid = current->fsuid;
2061 gid_t gid = current->fsgid;
2062 int err = -ENOMEM;
2063 struct shmem_sb_info *sbinfo;
2064 unsigned long blocks = 0;
2065 unsigned long inodes = 0;
2066 int policy = MPOL_DEFAULT;
2067 nodemask_t policy_nodes = node_online_map;
2069 #ifdef CONFIG_TMPFS
2071 * Per default we only allow half of the physical ram per
2072 * tmpfs instance, limiting inodes to one per page of lowmem;
2073 * but the internal instance is left unlimited.
2075 if (!(sb->s_flags & MS_NOUSER)) {
2076 blocks = totalram_pages / 2;
2077 inodes = totalram_pages - totalhigh_pages;
2078 if (inodes > blocks)
2079 inodes = blocks;
2080 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2081 &inodes, &policy, &policy_nodes))
2082 return -EINVAL;
2084 #else
2085 sb->s_flags |= MS_NOUSER;
2086 #endif
2088 /* Round up to L1_CACHE_BYTES to resist false sharing */
2089 sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2090 L1_CACHE_BYTES), GFP_KERNEL);
2091 if (!sbinfo)
2092 return -ENOMEM;
2094 spin_lock_init(&sbinfo->stat_lock);
2095 sbinfo->max_blocks = blocks;
2096 sbinfo->free_blocks = blocks;
2097 sbinfo->max_inodes = inodes;
2098 sbinfo->free_inodes = inodes;
2099 sbinfo->policy = policy;
2100 sbinfo->policy_nodes = policy_nodes;
2102 sb->s_fs_info = sbinfo;
2103 sb->s_maxbytes = SHMEM_MAX_BYTES;
2104 sb->s_blocksize = PAGE_CACHE_SIZE;
2105 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2106 sb->s_magic = TMPFS_MAGIC;
2107 sb->s_op = &shmem_ops;
2108 sb->s_time_gran = 1;
2110 inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2111 if (!inode)
2112 goto failed;
2113 inode->i_uid = uid;
2114 inode->i_gid = gid;
2115 root = d_alloc_root(inode);
2116 if (!root)
2117 goto failed_iput;
2118 sb->s_root = root;
2119 return 0;
2121 failed_iput:
2122 iput(inode);
2123 failed:
2124 shmem_put_super(sb);
2125 return err;
2128 static kmem_cache_t *shmem_inode_cachep;
2130 static struct inode *shmem_alloc_inode(struct super_block *sb)
2132 struct shmem_inode_info *p;
2133 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
2134 if (!p)
2135 return NULL;
2136 return &p->vfs_inode;
2139 static void shmem_destroy_inode(struct inode *inode)
2141 if ((inode->i_mode & S_IFMT) == S_IFREG) {
2142 /* only struct inode is valid if it's an inline symlink */
2143 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2145 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2148 static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
2150 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2152 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2153 SLAB_CTOR_CONSTRUCTOR) {
2154 inode_init_once(&p->vfs_inode);
2158 static int init_inodecache(void)
2160 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2161 sizeof(struct shmem_inode_info),
2162 0, 0, init_once, NULL);
2163 if (shmem_inode_cachep == NULL)
2164 return -ENOMEM;
2165 return 0;
2168 static void destroy_inodecache(void)
2170 if (kmem_cache_destroy(shmem_inode_cachep))
2171 printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n");
2174 static struct address_space_operations shmem_aops = {
2175 .writepage = shmem_writepage,
2176 .set_page_dirty = __set_page_dirty_nobuffers,
2177 #ifdef CONFIG_TMPFS
2178 .prepare_write = shmem_prepare_write,
2179 .commit_write = simple_commit_write,
2180 #endif
2181 .migratepage = migrate_page,
2184 static struct file_operations shmem_file_operations = {
2185 .mmap = shmem_mmap,
2186 #ifdef CONFIG_TMPFS
2187 .llseek = generic_file_llseek,
2188 .read = shmem_file_read,
2189 .write = shmem_file_write,
2190 .fsync = simple_sync_file,
2191 .sendfile = shmem_file_sendfile,
2192 #endif
2195 static struct inode_operations shmem_inode_operations = {
2196 .truncate = shmem_truncate,
2197 .setattr = shmem_notify_change,
2198 .truncate_range = shmem_truncate_range,
2201 static struct inode_operations shmem_dir_inode_operations = {
2202 #ifdef CONFIG_TMPFS
2203 .create = shmem_create,
2204 .lookup = simple_lookup,
2205 .link = shmem_link,
2206 .unlink = shmem_unlink,
2207 .symlink = shmem_symlink,
2208 .mkdir = shmem_mkdir,
2209 .rmdir = shmem_rmdir,
2210 .mknod = shmem_mknod,
2211 .rename = shmem_rename,
2212 #endif
2215 static struct super_operations shmem_ops = {
2216 .alloc_inode = shmem_alloc_inode,
2217 .destroy_inode = shmem_destroy_inode,
2218 #ifdef CONFIG_TMPFS
2219 .statfs = shmem_statfs,
2220 .remount_fs = shmem_remount_fs,
2221 #endif
2222 .delete_inode = shmem_delete_inode,
2223 .drop_inode = generic_delete_inode,
2224 .put_super = shmem_put_super,
2227 static struct vm_operations_struct shmem_vm_ops = {
2228 .nopage = shmem_nopage,
2229 .populate = shmem_populate,
2230 #ifdef CONFIG_NUMA
2231 .set_policy = shmem_set_policy,
2232 .get_policy = shmem_get_policy,
2233 #endif
2237 static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
2238 int flags, const char *dev_name, void *data)
2240 return get_sb_nodev(fs_type, flags, data, shmem_fill_super);
2243 static struct file_system_type tmpfs_fs_type = {
2244 .owner = THIS_MODULE,
2245 .name = "tmpfs",
2246 .get_sb = shmem_get_sb,
2247 .kill_sb = kill_litter_super,
2249 static struct vfsmount *shm_mnt;
2251 static int __init init_tmpfs(void)
2253 int error;
2255 error = init_inodecache();
2256 if (error)
2257 goto out3;
2259 error = register_filesystem(&tmpfs_fs_type);
2260 if (error) {
2261 printk(KERN_ERR "Could not register tmpfs\n");
2262 goto out2;
2264 #ifdef CONFIG_TMPFS
2265 devfs_mk_dir("shm");
2266 #endif
2267 shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER,
2268 tmpfs_fs_type.name, NULL);
2269 if (IS_ERR(shm_mnt)) {
2270 error = PTR_ERR(shm_mnt);
2271 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2272 goto out1;
2274 return 0;
2276 out1:
2277 unregister_filesystem(&tmpfs_fs_type);
2278 out2:
2279 destroy_inodecache();
2280 out3:
2281 shm_mnt = ERR_PTR(error);
2282 return error;
2284 module_init(init_tmpfs)
2287 * shmem_file_setup - get an unlinked file living in tmpfs
2289 * @name: name for dentry (to be seen in /proc/<pid>/maps
2290 * @size: size to be set for the file
2293 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2295 int error;
2296 struct file *file;
2297 struct inode *inode;
2298 struct dentry *dentry, *root;
2299 struct qstr this;
2301 if (IS_ERR(shm_mnt))
2302 return (void *)shm_mnt;
2304 if (size < 0 || size > SHMEM_MAX_BYTES)
2305 return ERR_PTR(-EINVAL);
2307 if (shmem_acct_size(flags, size))
2308 return ERR_PTR(-ENOMEM);
2310 error = -ENOMEM;
2311 this.name = name;
2312 this.len = strlen(name);
2313 this.hash = 0; /* will go */
2314 root = shm_mnt->mnt_root;
2315 dentry = d_alloc(root, &this);
2316 if (!dentry)
2317 goto put_memory;
2319 error = -ENFILE;
2320 file = get_empty_filp();
2321 if (!file)
2322 goto put_dentry;
2324 error = -ENOSPC;
2325 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2326 if (!inode)
2327 goto close_file;
2329 SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2330 d_instantiate(dentry, inode);
2331 inode->i_size = size;
2332 inode->i_nlink = 0; /* It is unlinked */
2333 file->f_vfsmnt = mntget(shm_mnt);
2334 file->f_dentry = dentry;
2335 file->f_mapping = inode->i_mapping;
2336 file->f_op = &shmem_file_operations;
2337 file->f_mode = FMODE_WRITE | FMODE_READ;
2338 return file;
2340 close_file:
2341 put_filp(file);
2342 put_dentry:
2343 dput(dentry);
2344 put_memory:
2345 shmem_unacct_size(flags, size);
2346 return ERR_PTR(error);
2350 * shmem_zero_setup - setup a shared anonymous mapping
2352 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2354 int shmem_zero_setup(struct vm_area_struct *vma)
2356 struct file *file;
2357 loff_t size = vma->vm_end - vma->vm_start;
2359 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2360 if (IS_ERR(file))
2361 return PTR_ERR(file);
2363 if (vma->vm_file)
2364 fput(vma->vm_file);
2365 vma->vm_file = file;
2366 vma->vm_ops = &shmem_vm_ops;
2367 return 0;