2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 * This file is released under the GPL.
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <linux/swap.h>
31 #include <linux/ima.h>
33 static struct vfsmount
*shm_mnt
;
37 * This virtual memory filesystem is heavily based on the ramfs. It
38 * extends ramfs by the ability to use swap and honor resource limits
39 * which makes it a completely usable filesystem.
42 #include <linux/xattr.h>
43 #include <linux/exportfs.h>
44 #include <linux/generic_acl.h>
45 #include <linux/mman.h>
46 #include <linux/pagemap.h>
47 #include <linux/string.h>
48 #include <linux/slab.h>
49 #include <linux/backing-dev.h>
50 #include <linux/shmem_fs.h>
51 #include <linux/writeback.h>
52 #include <linux/vfs.h>
53 #include <linux/blkdev.h>
54 #include <linux/security.h>
55 #include <linux/swapops.h>
56 #include <linux/mempolicy.h>
57 #include <linux/namei.h>
58 #include <linux/ctype.h>
59 #include <linux/migrate.h>
60 #include <linux/highmem.h>
61 #include <linux/seq_file.h>
62 #include <linux/magic.h>
64 #include <asm/uaccess.h>
65 #include <asm/div64.h>
66 #include <asm/pgtable.h>
68 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
69 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
70 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
72 #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
73 #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
75 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
77 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
78 #define SHMEM_PAGEIN VM_READ
79 #define SHMEM_TRUNCATE VM_WRITE
81 /* Definition to limit shmem_truncate's steps between cond_rescheds */
82 #define LATENCY_LIMIT 64
84 /* Pretend that each entry is of this size in directory's i_size */
85 #define BOGO_DIRENT_SIZE 20
87 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
89 SGP_READ
, /* don't exceed i_size, don't allocate page */
90 SGP_CACHE
, /* don't exceed i_size, may allocate page */
91 SGP_DIRTY
, /* like SGP_CACHE, but set new page dirty */
92 SGP_WRITE
, /* may exceed i_size, may allocate page */
96 static unsigned long shmem_default_max_blocks(void)
98 return totalram_pages
/ 2;
101 static unsigned long shmem_default_max_inodes(void)
103 return min(totalram_pages
- totalhigh_pages
, totalram_pages
/ 2);
107 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
108 struct page
**pagep
, enum sgp_type sgp
, int *type
);
110 static inline struct page
*shmem_dir_alloc(gfp_t gfp_mask
)
113 * The above definition of ENTRIES_PER_PAGE, and the use of
114 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
115 * might be reconsidered if it ever diverges from PAGE_SIZE.
117 * Mobility flags are masked out as swap vectors cannot move
119 return alloc_pages((gfp_mask
& ~GFP_MOVABLE_MASK
) | __GFP_ZERO
,
120 PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
123 static inline void shmem_dir_free(struct page
*page
)
125 __free_pages(page
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
128 static struct page
**shmem_dir_map(struct page
*page
)
130 return (struct page
**)kmap_atomic(page
, KM_USER0
);
133 static inline void shmem_dir_unmap(struct page
**dir
)
135 kunmap_atomic(dir
, KM_USER0
);
138 static swp_entry_t
*shmem_swp_map(struct page
*page
)
140 return (swp_entry_t
*)kmap_atomic(page
, KM_USER1
);
143 static inline void shmem_swp_balance_unmap(void)
146 * When passing a pointer to an i_direct entry, to code which
147 * also handles indirect entries and so will shmem_swp_unmap,
148 * we must arrange for the preempt count to remain in balance.
149 * What kmap_atomic of a lowmem page does depends on config
150 * and architecture, so pretend to kmap_atomic some lowmem page.
152 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1
);
155 static inline void shmem_swp_unmap(swp_entry_t
*entry
)
157 kunmap_atomic(entry
, KM_USER1
);
160 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
162 return sb
->s_fs_info
;
166 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
167 * for shared memory and for shared anonymous (/dev/zero) mappings
168 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
169 * consistent with the pre-accounting of private mappings ...
171 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
173 return (flags
& VM_NORESERVE
) ?
174 0 : security_vm_enough_memory_kern(VM_ACCT(size
));
177 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
179 if (!(flags
& VM_NORESERVE
))
180 vm_unacct_memory(VM_ACCT(size
));
184 * ... whereas tmpfs objects are accounted incrementally as
185 * pages are allocated, in order to allow huge sparse files.
186 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
187 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
189 static inline int shmem_acct_block(unsigned long flags
)
191 return (flags
& VM_NORESERVE
) ?
192 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE
)) : 0;
195 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
197 if (flags
& VM_NORESERVE
)
198 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
201 static const struct super_operations shmem_ops
;
202 static const struct address_space_operations shmem_aops
;
203 static const struct file_operations shmem_file_operations
;
204 static const struct inode_operations shmem_inode_operations
;
205 static const struct inode_operations shmem_dir_inode_operations
;
206 static const struct inode_operations shmem_special_inode_operations
;
207 static struct vm_operations_struct shmem_vm_ops
;
209 static struct backing_dev_info shmem_backing_dev_info __read_mostly
= {
210 .ra_pages
= 0, /* No readahead */
211 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
| BDI_CAP_SWAP_BACKED
,
212 .unplug_io_fn
= default_unplug_io_fn
,
215 static LIST_HEAD(shmem_swaplist
);
216 static DEFINE_MUTEX(shmem_swaplist_mutex
);
218 static void shmem_free_blocks(struct inode
*inode
, long pages
)
220 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
221 if (sbinfo
->max_blocks
) {
222 spin_lock(&sbinfo
->stat_lock
);
223 sbinfo
->free_blocks
+= pages
;
224 inode
->i_blocks
-= pages
*BLOCKS_PER_PAGE
;
225 spin_unlock(&sbinfo
->stat_lock
);
229 static int shmem_reserve_inode(struct super_block
*sb
)
231 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
232 if (sbinfo
->max_inodes
) {
233 spin_lock(&sbinfo
->stat_lock
);
234 if (!sbinfo
->free_inodes
) {
235 spin_unlock(&sbinfo
->stat_lock
);
238 sbinfo
->free_inodes
--;
239 spin_unlock(&sbinfo
->stat_lock
);
244 static void shmem_free_inode(struct super_block
*sb
)
246 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
247 if (sbinfo
->max_inodes
) {
248 spin_lock(&sbinfo
->stat_lock
);
249 sbinfo
->free_inodes
++;
250 spin_unlock(&sbinfo
->stat_lock
);
255 * shmem_recalc_inode - recalculate the size of an inode
256 * @inode: inode to recalc
258 * We have to calculate the free blocks since the mm can drop
259 * undirtied hole pages behind our back.
261 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
262 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
264 * It has to be called with the spinlock held.
266 static void shmem_recalc_inode(struct inode
*inode
)
268 struct shmem_inode_info
*info
= SHMEM_I(inode
);
271 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
273 info
->alloced
-= freed
;
274 shmem_unacct_blocks(info
->flags
, freed
);
275 shmem_free_blocks(inode
, freed
);
280 * shmem_swp_entry - find the swap vector position in the info structure
281 * @info: info structure for the inode
282 * @index: index of the page to find
283 * @page: optional page to add to the structure. Has to be preset to
286 * If there is no space allocated yet it will return NULL when
287 * page is NULL, else it will use the page for the needed block,
288 * setting it to NULL on return to indicate that it has been used.
290 * The swap vector is organized the following way:
292 * There are SHMEM_NR_DIRECT entries directly stored in the
293 * shmem_inode_info structure. So small files do not need an addional
296 * For pages with index > SHMEM_NR_DIRECT there is the pointer
297 * i_indirect which points to a page which holds in the first half
298 * doubly indirect blocks, in the second half triple indirect blocks:
300 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
301 * following layout (for SHMEM_NR_DIRECT == 16):
303 * i_indirect -> dir --> 16-19
316 static swp_entry_t
*shmem_swp_entry(struct shmem_inode_info
*info
, unsigned long index
, struct page
**page
)
318 unsigned long offset
;
322 if (index
< SHMEM_NR_DIRECT
) {
323 shmem_swp_balance_unmap();
324 return info
->i_direct
+index
;
326 if (!info
->i_indirect
) {
328 info
->i_indirect
= *page
;
331 return NULL
; /* need another page */
334 index
-= SHMEM_NR_DIRECT
;
335 offset
= index
% ENTRIES_PER_PAGE
;
336 index
/= ENTRIES_PER_PAGE
;
337 dir
= shmem_dir_map(info
->i_indirect
);
339 if (index
>= ENTRIES_PER_PAGE
/2) {
340 index
-= ENTRIES_PER_PAGE
/2;
341 dir
+= ENTRIES_PER_PAGE
/2 + index
/ENTRIES_PER_PAGE
;
342 index
%= ENTRIES_PER_PAGE
;
349 shmem_dir_unmap(dir
);
350 return NULL
; /* need another page */
352 shmem_dir_unmap(dir
);
353 dir
= shmem_dir_map(subdir
);
359 if (!page
|| !(subdir
= *page
)) {
360 shmem_dir_unmap(dir
);
361 return NULL
; /* need a page */
366 shmem_dir_unmap(dir
);
367 return shmem_swp_map(subdir
) + offset
;
370 static void shmem_swp_set(struct shmem_inode_info
*info
, swp_entry_t
*entry
, unsigned long value
)
372 long incdec
= value
? 1: -1;
375 info
->swapped
+= incdec
;
376 if ((unsigned long)(entry
- info
->i_direct
) >= SHMEM_NR_DIRECT
) {
377 struct page
*page
= kmap_atomic_to_page(entry
);
378 set_page_private(page
, page_private(page
) + incdec
);
383 * shmem_swp_alloc - get the position of the swap entry for the page.
384 * @info: info structure for the inode
385 * @index: index of the page to find
386 * @sgp: check and recheck i_size? skip allocation?
388 * If the entry does not exist, allocate it.
390 static swp_entry_t
*shmem_swp_alloc(struct shmem_inode_info
*info
, unsigned long index
, enum sgp_type sgp
)
392 struct inode
*inode
= &info
->vfs_inode
;
393 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
394 struct page
*page
= NULL
;
397 if (sgp
!= SGP_WRITE
&&
398 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
399 return ERR_PTR(-EINVAL
);
401 while (!(entry
= shmem_swp_entry(info
, index
, &page
))) {
403 return shmem_swp_map(ZERO_PAGE(0));
405 * Test free_blocks against 1 not 0, since we have 1 data
406 * page (and perhaps indirect index pages) yet to allocate:
407 * a waste to allocate index if we cannot allocate data.
409 if (sbinfo
->max_blocks
) {
410 spin_lock(&sbinfo
->stat_lock
);
411 if (sbinfo
->free_blocks
<= 1) {
412 spin_unlock(&sbinfo
->stat_lock
);
413 return ERR_PTR(-ENOSPC
);
415 sbinfo
->free_blocks
--;
416 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
417 spin_unlock(&sbinfo
->stat_lock
);
420 spin_unlock(&info
->lock
);
421 page
= shmem_dir_alloc(mapping_gfp_mask(inode
->i_mapping
));
423 set_page_private(page
, 0);
424 spin_lock(&info
->lock
);
427 shmem_free_blocks(inode
, 1);
428 return ERR_PTR(-ENOMEM
);
430 if (sgp
!= SGP_WRITE
&&
431 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
432 entry
= ERR_PTR(-EINVAL
);
435 if (info
->next_index
<= index
)
436 info
->next_index
= index
+ 1;
439 /* another task gave its page, or truncated the file */
440 shmem_free_blocks(inode
, 1);
441 shmem_dir_free(page
);
443 if (info
->next_index
<= index
&& !IS_ERR(entry
))
444 info
->next_index
= index
+ 1;
449 * shmem_free_swp - free some swap entries in a directory
450 * @dir: pointer to the directory
451 * @edir: pointer after last entry of the directory
452 * @punch_lock: pointer to spinlock when needed for the holepunch case
454 static int shmem_free_swp(swp_entry_t
*dir
, swp_entry_t
*edir
,
455 spinlock_t
*punch_lock
)
457 spinlock_t
*punch_unlock
= NULL
;
461 for (ptr
= dir
; ptr
< edir
; ptr
++) {
463 if (unlikely(punch_lock
)) {
464 punch_unlock
= punch_lock
;
466 spin_lock(punch_unlock
);
470 free_swap_and_cache(*ptr
);
471 *ptr
= (swp_entry_t
){0};
476 spin_unlock(punch_unlock
);
480 static int shmem_map_and_free_swp(struct page
*subdir
, int offset
,
481 int limit
, struct page
***dir
, spinlock_t
*punch_lock
)
486 ptr
= shmem_swp_map(subdir
);
487 for (; offset
< limit
; offset
+= LATENCY_LIMIT
) {
488 int size
= limit
- offset
;
489 if (size
> LATENCY_LIMIT
)
490 size
= LATENCY_LIMIT
;
491 freed
+= shmem_free_swp(ptr
+offset
, ptr
+offset
+size
,
493 if (need_resched()) {
494 shmem_swp_unmap(ptr
);
496 shmem_dir_unmap(*dir
);
500 ptr
= shmem_swp_map(subdir
);
503 shmem_swp_unmap(ptr
);
507 static void shmem_free_pages(struct list_head
*next
)
513 page
= container_of(next
, struct page
, lru
);
515 shmem_dir_free(page
);
517 if (freed
>= LATENCY_LIMIT
) {
524 static void shmem_truncate_range(struct inode
*inode
, loff_t start
, loff_t end
)
526 struct shmem_inode_info
*info
= SHMEM_I(inode
);
531 unsigned long diroff
;
537 LIST_HEAD(pages_to_free
);
538 long nr_pages_to_free
= 0;
539 long nr_swaps_freed
= 0;
543 spinlock_t
*needs_lock
;
544 spinlock_t
*punch_lock
;
545 unsigned long upper_limit
;
547 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
548 idx
= (start
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
549 if (idx
>= info
->next_index
)
552 spin_lock(&info
->lock
);
553 info
->flags
|= SHMEM_TRUNCATE
;
554 if (likely(end
== (loff_t
) -1)) {
555 limit
= info
->next_index
;
556 upper_limit
= SHMEM_MAX_INDEX
;
557 info
->next_index
= idx
;
561 if (end
+ 1 >= inode
->i_size
) { /* we may free a little more */
562 limit
= (inode
->i_size
+ PAGE_CACHE_SIZE
- 1) >>
564 upper_limit
= SHMEM_MAX_INDEX
;
566 limit
= (end
+ 1) >> PAGE_CACHE_SHIFT
;
569 needs_lock
= &info
->lock
;
573 topdir
= info
->i_indirect
;
574 if (topdir
&& idx
<= SHMEM_NR_DIRECT
&& !punch_hole
) {
575 info
->i_indirect
= NULL
;
577 list_add(&topdir
->lru
, &pages_to_free
);
579 spin_unlock(&info
->lock
);
581 if (info
->swapped
&& idx
< SHMEM_NR_DIRECT
) {
582 ptr
= info
->i_direct
;
584 if (size
> SHMEM_NR_DIRECT
)
585 size
= SHMEM_NR_DIRECT
;
586 nr_swaps_freed
= shmem_free_swp(ptr
+idx
, ptr
+size
, needs_lock
);
590 * If there are no indirect blocks or we are punching a hole
591 * below indirect blocks, nothing to be done.
593 if (!topdir
|| limit
<= SHMEM_NR_DIRECT
)
597 * The truncation case has already dropped info->lock, and we're safe
598 * because i_size and next_index have already been lowered, preventing
599 * access beyond. But in the punch_hole case, we still need to take
600 * the lock when updating the swap directory, because there might be
601 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
602 * shmem_writepage. However, whenever we find we can remove a whole
603 * directory page (not at the misaligned start or end of the range),
604 * we first NULLify its pointer in the level above, and then have no
605 * need to take the lock when updating its contents: needs_lock and
606 * punch_lock (either pointing to info->lock or NULL) manage this.
609 upper_limit
-= SHMEM_NR_DIRECT
;
610 limit
-= SHMEM_NR_DIRECT
;
611 idx
= (idx
> SHMEM_NR_DIRECT
)? (idx
- SHMEM_NR_DIRECT
): 0;
612 offset
= idx
% ENTRIES_PER_PAGE
;
615 dir
= shmem_dir_map(topdir
);
616 stage
= ENTRIES_PER_PAGEPAGE
/2;
617 if (idx
< ENTRIES_PER_PAGEPAGE
/2) {
619 diroff
= idx
/ENTRIES_PER_PAGE
;
621 dir
+= ENTRIES_PER_PAGE
/2;
622 dir
+= (idx
- ENTRIES_PER_PAGEPAGE
/2)/ENTRIES_PER_PAGEPAGE
;
624 stage
+= ENTRIES_PER_PAGEPAGE
;
627 diroff
= ((idx
- ENTRIES_PER_PAGEPAGE
/2) %
628 ENTRIES_PER_PAGEPAGE
) / ENTRIES_PER_PAGE
;
629 if (!diroff
&& !offset
&& upper_limit
>= stage
) {
631 spin_lock(needs_lock
);
633 spin_unlock(needs_lock
);
638 list_add(&middir
->lru
, &pages_to_free
);
640 shmem_dir_unmap(dir
);
641 dir
= shmem_dir_map(middir
);
649 for (; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, diroff
++) {
650 if (unlikely(idx
== stage
)) {
651 shmem_dir_unmap(dir
);
652 dir
= shmem_dir_map(topdir
) +
653 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
656 idx
+= ENTRIES_PER_PAGEPAGE
;
660 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
663 needs_lock
= &info
->lock
;
664 if (upper_limit
>= stage
) {
666 spin_lock(needs_lock
);
668 spin_unlock(needs_lock
);
673 list_add(&middir
->lru
, &pages_to_free
);
675 shmem_dir_unmap(dir
);
677 dir
= shmem_dir_map(middir
);
680 punch_lock
= needs_lock
;
681 subdir
= dir
[diroff
];
682 if (subdir
&& !offset
&& upper_limit
-idx
>= ENTRIES_PER_PAGE
) {
684 spin_lock(needs_lock
);
686 spin_unlock(needs_lock
);
691 list_add(&subdir
->lru
, &pages_to_free
);
693 if (subdir
&& page_private(subdir
) /* has swap entries */) {
695 if (size
> ENTRIES_PER_PAGE
)
696 size
= ENTRIES_PER_PAGE
;
697 freed
= shmem_map_and_free_swp(subdir
,
698 offset
, size
, &dir
, punch_lock
);
700 dir
= shmem_dir_map(middir
);
701 nr_swaps_freed
+= freed
;
702 if (offset
|| punch_lock
) {
703 spin_lock(&info
->lock
);
704 set_page_private(subdir
,
705 page_private(subdir
) - freed
);
706 spin_unlock(&info
->lock
);
708 BUG_ON(page_private(subdir
) != freed
);
713 shmem_dir_unmap(dir
);
715 if (inode
->i_mapping
->nrpages
&& (info
->flags
& SHMEM_PAGEIN
)) {
717 * Call truncate_inode_pages again: racing shmem_unuse_inode
718 * may have swizzled a page in from swap since vmtruncate or
719 * generic_delete_inode did it, before we lowered next_index.
720 * Also, though shmem_getpage checks i_size before adding to
721 * cache, no recheck after: so fix the narrow window there too.
723 * Recalling truncate_inode_pages_range and unmap_mapping_range
724 * every time for punch_hole (which never got a chance to clear
725 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
726 * yet hardly ever necessary: try to optimize them out later.
728 truncate_inode_pages_range(inode
->i_mapping
, start
, end
);
730 unmap_mapping_range(inode
->i_mapping
, start
,
734 spin_lock(&info
->lock
);
735 info
->flags
&= ~SHMEM_TRUNCATE
;
736 info
->swapped
-= nr_swaps_freed
;
737 if (nr_pages_to_free
)
738 shmem_free_blocks(inode
, nr_pages_to_free
);
739 shmem_recalc_inode(inode
);
740 spin_unlock(&info
->lock
);
743 * Empty swap vector directory pages to be freed?
745 if (!list_empty(&pages_to_free
)) {
746 pages_to_free
.prev
->next
= NULL
;
747 shmem_free_pages(pages_to_free
.next
);
751 static void shmem_truncate(struct inode
*inode
)
753 shmem_truncate_range(inode
, inode
->i_size
, (loff_t
)-1);
756 static int shmem_notify_change(struct dentry
*dentry
, struct iattr
*attr
)
758 struct inode
*inode
= dentry
->d_inode
;
759 struct page
*page
= NULL
;
762 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
763 if (attr
->ia_size
< inode
->i_size
) {
765 * If truncating down to a partial page, then
766 * if that page is already allocated, hold it
767 * in memory until the truncation is over, so
768 * truncate_partial_page cannnot miss it were
769 * it assigned to swap.
771 if (attr
->ia_size
& (PAGE_CACHE_SIZE
-1)) {
772 (void) shmem_getpage(inode
,
773 attr
->ia_size
>>PAGE_CACHE_SHIFT
,
774 &page
, SGP_READ
, NULL
);
779 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
780 * detect if any pages might have been added to cache
781 * after truncate_inode_pages. But we needn't bother
782 * if it's being fully truncated to zero-length: the
783 * nrpages check is efficient enough in that case.
786 struct shmem_inode_info
*info
= SHMEM_I(inode
);
787 spin_lock(&info
->lock
);
788 info
->flags
&= ~SHMEM_PAGEIN
;
789 spin_unlock(&info
->lock
);
794 error
= inode_change_ok(inode
, attr
);
796 error
= inode_setattr(inode
, attr
);
797 #ifdef CONFIG_TMPFS_POSIX_ACL
798 if (!error
&& (attr
->ia_valid
& ATTR_MODE
))
799 error
= generic_acl_chmod(inode
, &shmem_acl_ops
);
802 page_cache_release(page
);
806 static void shmem_delete_inode(struct inode
*inode
)
808 struct shmem_inode_info
*info
= SHMEM_I(inode
);
810 if (inode
->i_op
->truncate
== shmem_truncate
) {
811 truncate_inode_pages(inode
->i_mapping
, 0);
812 shmem_unacct_size(info
->flags
, inode
->i_size
);
814 shmem_truncate(inode
);
815 if (!list_empty(&info
->swaplist
)) {
816 mutex_lock(&shmem_swaplist_mutex
);
817 list_del_init(&info
->swaplist
);
818 mutex_unlock(&shmem_swaplist_mutex
);
821 BUG_ON(inode
->i_blocks
);
822 shmem_free_inode(inode
->i_sb
);
826 static inline int shmem_find_swp(swp_entry_t entry
, swp_entry_t
*dir
, swp_entry_t
*edir
)
830 for (ptr
= dir
; ptr
< edir
; ptr
++) {
831 if (ptr
->val
== entry
.val
)
837 static int shmem_unuse_inode(struct shmem_inode_info
*info
, swp_entry_t entry
, struct page
*page
)
851 ptr
= info
->i_direct
;
852 spin_lock(&info
->lock
);
853 if (!info
->swapped
) {
854 list_del_init(&info
->swaplist
);
857 limit
= info
->next_index
;
859 if (size
> SHMEM_NR_DIRECT
)
860 size
= SHMEM_NR_DIRECT
;
861 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
864 if (!info
->i_indirect
)
867 dir
= shmem_dir_map(info
->i_indirect
);
868 stage
= SHMEM_NR_DIRECT
+ ENTRIES_PER_PAGEPAGE
/2;
870 for (idx
= SHMEM_NR_DIRECT
; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, dir
++) {
871 if (unlikely(idx
== stage
)) {
872 shmem_dir_unmap(dir
-1);
873 if (cond_resched_lock(&info
->lock
)) {
874 /* check it has not been truncated */
875 if (limit
> info
->next_index
) {
876 limit
= info
->next_index
;
881 dir
= shmem_dir_map(info
->i_indirect
) +
882 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
885 idx
+= ENTRIES_PER_PAGEPAGE
;
889 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
891 shmem_dir_unmap(dir
);
892 dir
= shmem_dir_map(subdir
);
895 if (subdir
&& page_private(subdir
)) {
896 ptr
= shmem_swp_map(subdir
);
898 if (size
> ENTRIES_PER_PAGE
)
899 size
= ENTRIES_PER_PAGE
;
900 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
901 shmem_swp_unmap(ptr
);
903 shmem_dir_unmap(dir
);
909 shmem_dir_unmap(dir
-1);
911 spin_unlock(&info
->lock
);
915 inode
= igrab(&info
->vfs_inode
);
916 spin_unlock(&info
->lock
);
919 * Move _head_ to start search for next from here.
920 * But be careful: shmem_delete_inode checks list_empty without taking
921 * mutex, and there's an instant in list_move_tail when info->swaplist
922 * would appear empty, if it were the only one on shmem_swaplist. We
923 * could avoid doing it if inode NULL; or use this minor optimization.
925 if (shmem_swaplist
.next
!= &info
->swaplist
)
926 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
927 mutex_unlock(&shmem_swaplist_mutex
);
933 * Charge page using GFP_KERNEL while we can wait.
934 * Charged back to the user(not to caller) when swap account is used.
935 * add_to_page_cache() will be called with GFP_NOWAIT.
937 error
= mem_cgroup_cache_charge(page
, current
->mm
, GFP_KERNEL
);
940 error
= radix_tree_preload(GFP_KERNEL
);
942 mem_cgroup_uncharge_cache_page(page
);
947 spin_lock(&info
->lock
);
948 ptr
= shmem_swp_entry(info
, idx
, NULL
);
949 if (ptr
&& ptr
->val
== entry
.val
) {
950 error
= add_to_page_cache_locked(page
, inode
->i_mapping
,
952 /* does mem_cgroup_uncharge_cache_page on error */
953 } else /* we must compensate for our precharge above */
954 mem_cgroup_uncharge_cache_page(page
);
956 if (error
== -EEXIST
) {
957 struct page
*filepage
= find_get_page(inode
->i_mapping
, idx
);
961 * There might be a more uptodate page coming down
962 * from a stacked writepage: forget our swappage if so.
964 if (PageUptodate(filepage
))
966 page_cache_release(filepage
);
970 delete_from_swap_cache(page
);
971 set_page_dirty(page
);
972 info
->flags
|= SHMEM_PAGEIN
;
973 shmem_swp_set(info
, ptr
, 0);
975 error
= 1; /* not an error, but entry was found */
978 shmem_swp_unmap(ptr
);
979 spin_unlock(&info
->lock
);
980 radix_tree_preload_end();
983 page_cache_release(page
);
984 iput(inode
); /* allows for NULL */
989 * shmem_unuse() search for an eventually swapped out shmem page.
991 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
993 struct list_head
*p
, *next
;
994 struct shmem_inode_info
*info
;
997 mutex_lock(&shmem_swaplist_mutex
);
998 list_for_each_safe(p
, next
, &shmem_swaplist
) {
999 info
= list_entry(p
, struct shmem_inode_info
, swaplist
);
1000 found
= shmem_unuse_inode(info
, entry
, page
);
1005 mutex_unlock(&shmem_swaplist_mutex
);
1006 out
: return found
; /* 0 or 1 or -ENOMEM */
1010 * Move the page from the page cache to the swap cache.
1012 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
1014 struct shmem_inode_info
*info
;
1015 swp_entry_t
*entry
, swap
;
1016 struct address_space
*mapping
;
1017 unsigned long index
;
1018 struct inode
*inode
;
1020 BUG_ON(!PageLocked(page
));
1021 mapping
= page
->mapping
;
1022 index
= page
->index
;
1023 inode
= mapping
->host
;
1024 info
= SHMEM_I(inode
);
1025 if (info
->flags
& VM_LOCKED
)
1027 if (!total_swap_pages
)
1031 * shmem_backing_dev_info's capabilities prevent regular writeback or
1032 * sync from ever calling shmem_writepage; but a stacking filesystem
1033 * may use the ->writepage of its underlying filesystem, in which case
1034 * tmpfs should write out to swap only in response to memory pressure,
1035 * and not for pdflush or sync. However, in those cases, we do still
1036 * want to check if there's a redundant swappage to be discarded.
1038 if (wbc
->for_reclaim
)
1039 swap
= get_swap_page();
1043 spin_lock(&info
->lock
);
1044 if (index
>= info
->next_index
) {
1045 BUG_ON(!(info
->flags
& SHMEM_TRUNCATE
));
1048 entry
= shmem_swp_entry(info
, index
, NULL
);
1051 * The more uptodate page coming down from a stacked
1052 * writepage should replace our old swappage.
1054 free_swap_and_cache(*entry
);
1055 shmem_swp_set(info
, entry
, 0);
1057 shmem_recalc_inode(inode
);
1059 if (swap
.val
&& add_to_swap_cache(page
, swap
, GFP_ATOMIC
) == 0) {
1060 remove_from_page_cache(page
);
1061 shmem_swp_set(info
, entry
, swap
.val
);
1062 shmem_swp_unmap(entry
);
1063 if (list_empty(&info
->swaplist
))
1064 inode
= igrab(inode
);
1067 spin_unlock(&info
->lock
);
1068 swap_duplicate(swap
);
1069 BUG_ON(page_mapped(page
));
1070 page_cache_release(page
); /* pagecache ref */
1071 set_page_dirty(page
);
1074 mutex_lock(&shmem_swaplist_mutex
);
1075 /* move instead of add in case we're racing */
1076 list_move_tail(&info
->swaplist
, &shmem_swaplist
);
1077 mutex_unlock(&shmem_swaplist_mutex
);
1083 shmem_swp_unmap(entry
);
1085 spin_unlock(&info
->lock
);
1088 set_page_dirty(page
);
1089 if (wbc
->for_reclaim
)
1090 return AOP_WRITEPAGE_ACTIVATE
; /* Return with page locked */
1097 static void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*mpol
)
1101 if (!mpol
|| mpol
->mode
== MPOL_DEFAULT
)
1102 return; /* show nothing */
1104 mpol_to_str(buffer
, sizeof(buffer
), mpol
, 1);
1106 seq_printf(seq
, ",mpol=%s", buffer
);
1109 static struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
1111 struct mempolicy
*mpol
= NULL
;
1113 spin_lock(&sbinfo
->stat_lock
); /* prevent replace/use races */
1114 mpol
= sbinfo
->mpol
;
1116 spin_unlock(&sbinfo
->stat_lock
);
1120 #endif /* CONFIG_TMPFS */
1122 static struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1123 struct shmem_inode_info
*info
, unsigned long idx
)
1125 struct mempolicy mpol
, *spol
;
1126 struct vm_area_struct pvma
;
1129 spol
= mpol_cond_copy(&mpol
,
1130 mpol_shared_policy_lookup(&info
->policy
, idx
));
1132 /* Create a pseudo vma that just contains the policy */
1134 pvma
.vm_pgoff
= idx
;
1136 pvma
.vm_policy
= spol
;
1137 page
= swapin_readahead(entry
, gfp
, &pvma
, 0);
1141 static struct page
*shmem_alloc_page(gfp_t gfp
,
1142 struct shmem_inode_info
*info
, unsigned long idx
)
1144 struct vm_area_struct pvma
;
1146 /* Create a pseudo vma that just contains the policy */
1148 pvma
.vm_pgoff
= idx
;
1150 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, idx
);
1153 * alloc_page_vma() will drop the shared policy reference
1155 return alloc_page_vma(gfp
, &pvma
, 0);
1157 #else /* !CONFIG_NUMA */
1159 static inline void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*p
)
1162 #endif /* CONFIG_TMPFS */
1164 static inline struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1165 struct shmem_inode_info
*info
, unsigned long idx
)
1167 return swapin_readahead(entry
, gfp
, NULL
, 0);
1170 static inline struct page
*shmem_alloc_page(gfp_t gfp
,
1171 struct shmem_inode_info
*info
, unsigned long idx
)
1173 return alloc_page(gfp
);
1175 #endif /* CONFIG_NUMA */
1177 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1178 static inline struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
1185 * shmem_getpage - either get the page from swap or allocate a new one
1187 * If we allocate a new one we do not mark it dirty. That's up to the
1188 * vm. If we swap it in we mark it dirty since we also free the swap
1189 * entry since a page cannot live in both the swap and page cache
1191 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
1192 struct page
**pagep
, enum sgp_type sgp
, int *type
)
1194 struct address_space
*mapping
= inode
->i_mapping
;
1195 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1196 struct shmem_sb_info
*sbinfo
;
1197 struct page
*filepage
= *pagep
;
1198 struct page
*swappage
;
1204 if (idx
>= SHMEM_MAX_INDEX
)
1211 * Normally, filepage is NULL on entry, and either found
1212 * uptodate immediately, or allocated and zeroed, or read
1213 * in under swappage, which is then assigned to filepage.
1214 * But shmem_readpage (required for splice) passes in a locked
1215 * filepage, which may be found not uptodate by other callers
1216 * too, and may need to be copied from the swappage read in.
1220 filepage
= find_lock_page(mapping
, idx
);
1221 if (filepage
&& PageUptodate(filepage
))
1224 gfp
= mapping_gfp_mask(mapping
);
1227 * Try to preload while we can wait, to not make a habit of
1228 * draining atomic reserves; but don't latch on to this cpu.
1230 error
= radix_tree_preload(gfp
& ~__GFP_HIGHMEM
);
1233 radix_tree_preload_end();
1236 spin_lock(&info
->lock
);
1237 shmem_recalc_inode(inode
);
1238 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1239 if (IS_ERR(entry
)) {
1240 spin_unlock(&info
->lock
);
1241 error
= PTR_ERR(entry
);
1247 /* Look it up and read it in.. */
1248 swappage
= lookup_swap_cache(swap
);
1250 shmem_swp_unmap(entry
);
1251 /* here we actually do the io */
1252 if (type
&& !(*type
& VM_FAULT_MAJOR
)) {
1253 __count_vm_event(PGMAJFAULT
);
1254 *type
|= VM_FAULT_MAJOR
;
1256 spin_unlock(&info
->lock
);
1257 swappage
= shmem_swapin(swap
, gfp
, info
, idx
);
1259 spin_lock(&info
->lock
);
1260 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1262 error
= PTR_ERR(entry
);
1264 if (entry
->val
== swap
.val
)
1266 shmem_swp_unmap(entry
);
1268 spin_unlock(&info
->lock
);
1273 wait_on_page_locked(swappage
);
1274 page_cache_release(swappage
);
1278 /* We have to do this with page locked to prevent races */
1279 if (!trylock_page(swappage
)) {
1280 shmem_swp_unmap(entry
);
1281 spin_unlock(&info
->lock
);
1282 wait_on_page_locked(swappage
);
1283 page_cache_release(swappage
);
1286 if (PageWriteback(swappage
)) {
1287 shmem_swp_unmap(entry
);
1288 spin_unlock(&info
->lock
);
1289 wait_on_page_writeback(swappage
);
1290 unlock_page(swappage
);
1291 page_cache_release(swappage
);
1294 if (!PageUptodate(swappage
)) {
1295 shmem_swp_unmap(entry
);
1296 spin_unlock(&info
->lock
);
1297 unlock_page(swappage
);
1298 page_cache_release(swappage
);
1304 shmem_swp_set(info
, entry
, 0);
1305 shmem_swp_unmap(entry
);
1306 delete_from_swap_cache(swappage
);
1307 spin_unlock(&info
->lock
);
1308 copy_highpage(filepage
, swappage
);
1309 unlock_page(swappage
);
1310 page_cache_release(swappage
);
1311 flush_dcache_page(filepage
);
1312 SetPageUptodate(filepage
);
1313 set_page_dirty(filepage
);
1315 } else if (!(error
= add_to_page_cache_locked(swappage
, mapping
,
1316 idx
, GFP_NOWAIT
))) {
1317 info
->flags
|= SHMEM_PAGEIN
;
1318 shmem_swp_set(info
, entry
, 0);
1319 shmem_swp_unmap(entry
);
1320 delete_from_swap_cache(swappage
);
1321 spin_unlock(&info
->lock
);
1322 filepage
= swappage
;
1323 set_page_dirty(filepage
);
1326 shmem_swp_unmap(entry
);
1327 spin_unlock(&info
->lock
);
1328 if (error
== -ENOMEM
) {
1329 /* allow reclaim from this memory cgroup */
1330 error
= mem_cgroup_shrink_usage(swappage
,
1334 unlock_page(swappage
);
1335 page_cache_release(swappage
);
1339 unlock_page(swappage
);
1340 page_cache_release(swappage
);
1343 } else if (sgp
== SGP_READ
&& !filepage
) {
1344 shmem_swp_unmap(entry
);
1345 filepage
= find_get_page(mapping
, idx
);
1347 (!PageUptodate(filepage
) || !trylock_page(filepage
))) {
1348 spin_unlock(&info
->lock
);
1349 wait_on_page_locked(filepage
);
1350 page_cache_release(filepage
);
1354 spin_unlock(&info
->lock
);
1356 shmem_swp_unmap(entry
);
1357 sbinfo
= SHMEM_SB(inode
->i_sb
);
1358 if (sbinfo
->max_blocks
) {
1359 spin_lock(&sbinfo
->stat_lock
);
1360 if (sbinfo
->free_blocks
== 0 ||
1361 shmem_acct_block(info
->flags
)) {
1362 spin_unlock(&sbinfo
->stat_lock
);
1363 spin_unlock(&info
->lock
);
1367 sbinfo
->free_blocks
--;
1368 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
1369 spin_unlock(&sbinfo
->stat_lock
);
1370 } else if (shmem_acct_block(info
->flags
)) {
1371 spin_unlock(&info
->lock
);
1379 spin_unlock(&info
->lock
);
1380 filepage
= shmem_alloc_page(gfp
, info
, idx
);
1382 shmem_unacct_blocks(info
->flags
, 1);
1383 shmem_free_blocks(inode
, 1);
1387 SetPageSwapBacked(filepage
);
1389 /* Precharge page while we can wait, compensate after */
1390 error
= mem_cgroup_cache_charge(filepage
, current
->mm
,
1393 page_cache_release(filepage
);
1394 shmem_unacct_blocks(info
->flags
, 1);
1395 shmem_free_blocks(inode
, 1);
1400 spin_lock(&info
->lock
);
1401 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1403 error
= PTR_ERR(entry
);
1406 shmem_swp_unmap(entry
);
1408 ret
= error
|| swap
.val
;
1410 mem_cgroup_uncharge_cache_page(filepage
);
1412 ret
= add_to_page_cache_lru(filepage
, mapping
,
1415 * At add_to_page_cache_lru() failure, uncharge will
1416 * be done automatically.
1419 spin_unlock(&info
->lock
);
1420 page_cache_release(filepage
);
1421 shmem_unacct_blocks(info
->flags
, 1);
1422 shmem_free_blocks(inode
, 1);
1428 info
->flags
|= SHMEM_PAGEIN
;
1432 spin_unlock(&info
->lock
);
1433 clear_highpage(filepage
);
1434 flush_dcache_page(filepage
);
1435 SetPageUptodate(filepage
);
1436 if (sgp
== SGP_DIRTY
)
1437 set_page_dirty(filepage
);
1444 if (*pagep
!= filepage
) {
1445 unlock_page(filepage
);
1446 page_cache_release(filepage
);
1451 static int shmem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1453 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1457 if (((loff_t
)vmf
->pgoff
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
1458 return VM_FAULT_SIGBUS
;
1460 error
= shmem_getpage(inode
, vmf
->pgoff
, &vmf
->page
, SGP_CACHE
, &ret
);
1462 return ((error
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
);
1464 return ret
| VM_FAULT_LOCKED
;
1468 static int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
1470 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1471 return mpol_set_shared_policy(&SHMEM_I(i
)->policy
, vma
, new);
1474 static struct mempolicy
*shmem_get_policy(struct vm_area_struct
*vma
,
1477 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1480 idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1481 return mpol_shared_policy_lookup(&SHMEM_I(i
)->policy
, idx
);
1485 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1487 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1488 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1489 int retval
= -ENOMEM
;
1491 spin_lock(&info
->lock
);
1492 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1493 if (!user_shm_lock(inode
->i_size
, user
))
1495 info
->flags
|= VM_LOCKED
;
1496 mapping_set_unevictable(file
->f_mapping
);
1498 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1499 user_shm_unlock(inode
->i_size
, user
);
1500 info
->flags
&= ~VM_LOCKED
;
1501 mapping_clear_unevictable(file
->f_mapping
);
1502 scan_mapping_unevictable_pages(file
->f_mapping
);
1507 spin_unlock(&info
->lock
);
1511 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1513 file_accessed(file
);
1514 vma
->vm_ops
= &shmem_vm_ops
;
1515 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1519 static struct inode
*shmem_get_inode(struct super_block
*sb
, int mode
,
1520 dev_t dev
, unsigned long flags
)
1522 struct inode
*inode
;
1523 struct shmem_inode_info
*info
;
1524 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1526 if (shmem_reserve_inode(sb
))
1529 inode
= new_inode(sb
);
1531 inode
->i_mode
= mode
;
1532 inode
->i_uid
= current_fsuid();
1533 inode
->i_gid
= current_fsgid();
1534 inode
->i_blocks
= 0;
1535 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1536 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1537 inode
->i_generation
= get_seconds();
1538 info
= SHMEM_I(inode
);
1539 memset(info
, 0, (char *)inode
- (char *)info
);
1540 spin_lock_init(&info
->lock
);
1541 info
->flags
= flags
& VM_NORESERVE
;
1542 INIT_LIST_HEAD(&info
->swaplist
);
1544 switch (mode
& S_IFMT
) {
1546 inode
->i_op
= &shmem_special_inode_operations
;
1547 init_special_inode(inode
, mode
, dev
);
1550 inode
->i_mapping
->a_ops
= &shmem_aops
;
1551 inode
->i_op
= &shmem_inode_operations
;
1552 inode
->i_fop
= &shmem_file_operations
;
1553 mpol_shared_policy_init(&info
->policy
,
1554 shmem_get_sbmpol(sbinfo
));
1558 /* Some things misbehave if size == 0 on a directory */
1559 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1560 inode
->i_op
= &shmem_dir_inode_operations
;
1561 inode
->i_fop
= &simple_dir_operations
;
1565 * Must not load anything in the rbtree,
1566 * mpol_free_shared_policy will not be called.
1568 mpol_shared_policy_init(&info
->policy
, NULL
);
1572 shmem_free_inode(sb
);
1577 static const struct inode_operations shmem_symlink_inode_operations
;
1578 static const struct inode_operations shmem_symlink_inline_operations
;
1581 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1582 * but providing them allows a tmpfs file to be used for splice, sendfile, and
1583 * below the loop driver, in the generic fashion that many filesystems support.
1585 static int shmem_readpage(struct file
*file
, struct page
*page
)
1587 struct inode
*inode
= page
->mapping
->host
;
1588 int error
= shmem_getpage(inode
, page
->index
, &page
, SGP_CACHE
, NULL
);
1594 shmem_write_begin(struct file
*file
, struct address_space
*mapping
,
1595 loff_t pos
, unsigned len
, unsigned flags
,
1596 struct page
**pagep
, void **fsdata
)
1598 struct inode
*inode
= mapping
->host
;
1599 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1601 return shmem_getpage(inode
, index
, pagep
, SGP_WRITE
, NULL
);
1605 shmem_write_end(struct file
*file
, struct address_space
*mapping
,
1606 loff_t pos
, unsigned len
, unsigned copied
,
1607 struct page
*page
, void *fsdata
)
1609 struct inode
*inode
= mapping
->host
;
1611 if (pos
+ copied
> inode
->i_size
)
1612 i_size_write(inode
, pos
+ copied
);
1615 set_page_dirty(page
);
1616 page_cache_release(page
);
1621 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1623 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1624 struct address_space
*mapping
= inode
->i_mapping
;
1625 unsigned long index
, offset
;
1626 enum sgp_type sgp
= SGP_READ
;
1629 * Might this read be for a stacking filesystem? Then when reading
1630 * holes of a sparse file, we actually need to allocate those pages,
1631 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1633 if (segment_eq(get_fs(), KERNEL_DS
))
1636 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1637 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1640 struct page
*page
= NULL
;
1641 unsigned long end_index
, nr
, ret
;
1642 loff_t i_size
= i_size_read(inode
);
1644 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1645 if (index
> end_index
)
1647 if (index
== end_index
) {
1648 nr
= i_size
& ~PAGE_CACHE_MASK
;
1653 desc
->error
= shmem_getpage(inode
, index
, &page
, sgp
, NULL
);
1655 if (desc
->error
== -EINVAL
)
1663 * We must evaluate after, since reads (unlike writes)
1664 * are called without i_mutex protection against truncate
1666 nr
= PAGE_CACHE_SIZE
;
1667 i_size
= i_size_read(inode
);
1668 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1669 if (index
== end_index
) {
1670 nr
= i_size
& ~PAGE_CACHE_MASK
;
1673 page_cache_release(page
);
1681 * If users can be writing to this page using arbitrary
1682 * virtual addresses, take care about potential aliasing
1683 * before reading the page on the kernel side.
1685 if (mapping_writably_mapped(mapping
))
1686 flush_dcache_page(page
);
1688 * Mark the page accessed if we read the beginning.
1691 mark_page_accessed(page
);
1693 page
= ZERO_PAGE(0);
1694 page_cache_get(page
);
1698 * Ok, we have the page, and it's up-to-date, so
1699 * now we can copy it to user space...
1701 * The actor routine returns how many bytes were actually used..
1702 * NOTE! This may not be the same as how much of a user buffer
1703 * we filled up (we may be padding etc), so we can only update
1704 * "pos" here (the actor routine has to update the user buffer
1705 * pointers and the remaining count).
1707 ret
= actor(desc
, page
, offset
, nr
);
1709 index
+= offset
>> PAGE_CACHE_SHIFT
;
1710 offset
&= ~PAGE_CACHE_MASK
;
1712 page_cache_release(page
);
1713 if (ret
!= nr
|| !desc
->count
)
1719 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1720 file_accessed(filp
);
1723 static ssize_t
shmem_file_aio_read(struct kiocb
*iocb
,
1724 const struct iovec
*iov
, unsigned long nr_segs
, loff_t pos
)
1726 struct file
*filp
= iocb
->ki_filp
;
1730 loff_t
*ppos
= &iocb
->ki_pos
;
1732 retval
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_WRITE
);
1736 for (seg
= 0; seg
< nr_segs
; seg
++) {
1737 read_descriptor_t desc
;
1740 desc
.arg
.buf
= iov
[seg
].iov_base
;
1741 desc
.count
= iov
[seg
].iov_len
;
1742 if (desc
.count
== 0)
1745 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1746 retval
+= desc
.written
;
1748 retval
= retval
?: desc
.error
;
1757 static int shmem_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1759 struct shmem_sb_info
*sbinfo
= SHMEM_SB(dentry
->d_sb
);
1761 buf
->f_type
= TMPFS_MAGIC
;
1762 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1763 buf
->f_namelen
= NAME_MAX
;
1764 spin_lock(&sbinfo
->stat_lock
);
1765 if (sbinfo
->max_blocks
) {
1766 buf
->f_blocks
= sbinfo
->max_blocks
;
1767 buf
->f_bavail
= buf
->f_bfree
= sbinfo
->free_blocks
;
1769 if (sbinfo
->max_inodes
) {
1770 buf
->f_files
= sbinfo
->max_inodes
;
1771 buf
->f_ffree
= sbinfo
->free_inodes
;
1773 /* else leave those fields 0 like simple_statfs */
1774 spin_unlock(&sbinfo
->stat_lock
);
1779 * File creation. Allocate an inode, and we're done..
1782 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1784 struct inode
*inode
;
1785 int error
= -ENOSPC
;
1787 inode
= shmem_get_inode(dir
->i_sb
, mode
, dev
, VM_NORESERVE
);
1789 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1792 if (error
!= -EOPNOTSUPP
) {
1797 error
= shmem_acl_init(inode
, dir
);
1802 if (dir
->i_mode
& S_ISGID
) {
1803 inode
->i_gid
= dir
->i_gid
;
1805 inode
->i_mode
|= S_ISGID
;
1807 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1808 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1809 d_instantiate(dentry
, inode
);
1810 dget(dentry
); /* Extra count - pin the dentry in core */
1815 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1819 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1825 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1826 struct nameidata
*nd
)
1828 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1834 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1836 struct inode
*inode
= old_dentry
->d_inode
;
1840 * No ordinary (disk based) filesystem counts links as inodes;
1841 * but each new link needs a new dentry, pinning lowmem, and
1842 * tmpfs dentries cannot be pruned until they are unlinked.
1844 ret
= shmem_reserve_inode(inode
->i_sb
);
1848 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1849 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1851 atomic_inc(&inode
->i_count
); /* New dentry reference */
1852 dget(dentry
); /* Extra pinning count for the created dentry */
1853 d_instantiate(dentry
, inode
);
1858 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
1860 struct inode
*inode
= dentry
->d_inode
;
1862 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
))
1863 shmem_free_inode(inode
->i_sb
);
1865 dir
->i_size
-= BOGO_DIRENT_SIZE
;
1866 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1868 dput(dentry
); /* Undo the count from "create" - this does all the work */
1872 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
1874 if (!simple_empty(dentry
))
1877 drop_nlink(dentry
->d_inode
);
1879 return shmem_unlink(dir
, dentry
);
1883 * The VFS layer already does all the dentry stuff for rename,
1884 * we just have to decrement the usage count for the target if
1885 * it exists so that the VFS layer correctly free's it when it
1888 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
1890 struct inode
*inode
= old_dentry
->d_inode
;
1891 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
1893 if (!simple_empty(new_dentry
))
1896 if (new_dentry
->d_inode
) {
1897 (void) shmem_unlink(new_dir
, new_dentry
);
1899 drop_nlink(old_dir
);
1900 } else if (they_are_dirs
) {
1901 drop_nlink(old_dir
);
1905 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
1906 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
1907 old_dir
->i_ctime
= old_dir
->i_mtime
=
1908 new_dir
->i_ctime
= new_dir
->i_mtime
=
1909 inode
->i_ctime
= CURRENT_TIME
;
1913 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
1917 struct inode
*inode
;
1918 struct page
*page
= NULL
;
1920 struct shmem_inode_info
*info
;
1922 len
= strlen(symname
) + 1;
1923 if (len
> PAGE_CACHE_SIZE
)
1924 return -ENAMETOOLONG
;
1926 inode
= shmem_get_inode(dir
->i_sb
, S_IFLNK
|S_IRWXUGO
, 0, VM_NORESERVE
);
1930 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1933 if (error
!= -EOPNOTSUPP
) {
1940 info
= SHMEM_I(inode
);
1941 inode
->i_size
= len
-1;
1942 if (len
<= (char *)inode
- (char *)info
) {
1944 memcpy(info
, symname
, len
);
1945 inode
->i_op
= &shmem_symlink_inline_operations
;
1947 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
1953 inode
->i_mapping
->a_ops
= &shmem_aops
;
1954 inode
->i_op
= &shmem_symlink_inode_operations
;
1955 kaddr
= kmap_atomic(page
, KM_USER0
);
1956 memcpy(kaddr
, symname
, len
);
1957 kunmap_atomic(kaddr
, KM_USER0
);
1958 set_page_dirty(page
);
1959 page_cache_release(page
);
1961 if (dir
->i_mode
& S_ISGID
)
1962 inode
->i_gid
= dir
->i_gid
;
1963 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1964 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1965 d_instantiate(dentry
, inode
);
1970 static void *shmem_follow_link_inline(struct dentry
*dentry
, struct nameidata
*nd
)
1972 nd_set_link(nd
, (char *)SHMEM_I(dentry
->d_inode
));
1976 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1978 struct page
*page
= NULL
;
1979 int res
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
1980 nd_set_link(nd
, res
? ERR_PTR(res
) : kmap(page
));
1986 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
1988 if (!IS_ERR(nd_get_link(nd
))) {
1989 struct page
*page
= cookie
;
1991 mark_page_accessed(page
);
1992 page_cache_release(page
);
1996 static const struct inode_operations shmem_symlink_inline_operations
= {
1997 .readlink
= generic_readlink
,
1998 .follow_link
= shmem_follow_link_inline
,
2001 static const struct inode_operations shmem_symlink_inode_operations
= {
2002 .truncate
= shmem_truncate
,
2003 .readlink
= generic_readlink
,
2004 .follow_link
= shmem_follow_link
,
2005 .put_link
= shmem_put_link
,
2008 #ifdef CONFIG_TMPFS_POSIX_ACL
2010 * Superblocks without xattr inode operations will get security.* xattr
2011 * support from the VFS "for free". As soon as we have any other xattrs
2012 * like ACLs, we also need to implement the security.* handlers at
2013 * filesystem level, though.
2016 static size_t shmem_xattr_security_list(struct inode
*inode
, char *list
,
2017 size_t list_len
, const char *name
,
2020 return security_inode_listsecurity(inode
, list
, list_len
);
2023 static int shmem_xattr_security_get(struct inode
*inode
, const char *name
,
2024 void *buffer
, size_t size
)
2026 if (strcmp(name
, "") == 0)
2028 return xattr_getsecurity(inode
, name
, buffer
, size
);
2031 static int shmem_xattr_security_set(struct inode
*inode
, const char *name
,
2032 const void *value
, size_t size
, int flags
)
2034 if (strcmp(name
, "") == 0)
2036 return security_inode_setsecurity(inode
, name
, value
, size
, flags
);
2039 static struct xattr_handler shmem_xattr_security_handler
= {
2040 .prefix
= XATTR_SECURITY_PREFIX
,
2041 .list
= shmem_xattr_security_list
,
2042 .get
= shmem_xattr_security_get
,
2043 .set
= shmem_xattr_security_set
,
2046 static struct xattr_handler
*shmem_xattr_handlers
[] = {
2047 &shmem_xattr_acl_access_handler
,
2048 &shmem_xattr_acl_default_handler
,
2049 &shmem_xattr_security_handler
,
2054 static struct dentry
*shmem_get_parent(struct dentry
*child
)
2056 return ERR_PTR(-ESTALE
);
2059 static int shmem_match(struct inode
*ino
, void *vfh
)
2063 inum
= (inum
<< 32) | fh
[1];
2064 return ino
->i_ino
== inum
&& fh
[0] == ino
->i_generation
;
2067 static struct dentry
*shmem_fh_to_dentry(struct super_block
*sb
,
2068 struct fid
*fid
, int fh_len
, int fh_type
)
2070 struct inode
*inode
;
2071 struct dentry
*dentry
= NULL
;
2072 u64 inum
= fid
->raw
[2];
2073 inum
= (inum
<< 32) | fid
->raw
[1];
2078 inode
= ilookup5(sb
, (unsigned long)(inum
+ fid
->raw
[0]),
2079 shmem_match
, fid
->raw
);
2081 dentry
= d_find_alias(inode
);
2088 static int shmem_encode_fh(struct dentry
*dentry
, __u32
*fh
, int *len
,
2091 struct inode
*inode
= dentry
->d_inode
;
2096 if (hlist_unhashed(&inode
->i_hash
)) {
2097 /* Unfortunately insert_inode_hash is not idempotent,
2098 * so as we hash inodes here rather than at creation
2099 * time, we need a lock to ensure we only try
2102 static DEFINE_SPINLOCK(lock
);
2104 if (hlist_unhashed(&inode
->i_hash
))
2105 __insert_inode_hash(inode
,
2106 inode
->i_ino
+ inode
->i_generation
);
2110 fh
[0] = inode
->i_generation
;
2111 fh
[1] = inode
->i_ino
;
2112 fh
[2] = ((__u64
)inode
->i_ino
) >> 32;
2118 static const struct export_operations shmem_export_ops
= {
2119 .get_parent
= shmem_get_parent
,
2120 .encode_fh
= shmem_encode_fh
,
2121 .fh_to_dentry
= shmem_fh_to_dentry
,
2124 static int shmem_parse_options(char *options
, struct shmem_sb_info
*sbinfo
,
2127 char *this_char
, *value
, *rest
;
2129 while (options
!= NULL
) {
2130 this_char
= options
;
2133 * NUL-terminate this option: unfortunately,
2134 * mount options form a comma-separated list,
2135 * but mpol's nodelist may also contain commas.
2137 options
= strchr(options
, ',');
2138 if (options
== NULL
)
2141 if (!isdigit(*options
)) {
2148 if ((value
= strchr(this_char
,'=')) != NULL
) {
2152 "tmpfs: No value for mount option '%s'\n",
2157 if (!strcmp(this_char
,"size")) {
2158 unsigned long long size
;
2159 size
= memparse(value
,&rest
);
2161 size
<<= PAGE_SHIFT
;
2162 size
*= totalram_pages
;
2168 sbinfo
->max_blocks
=
2169 DIV_ROUND_UP(size
, PAGE_CACHE_SIZE
);
2170 } else if (!strcmp(this_char
,"nr_blocks")) {
2171 sbinfo
->max_blocks
= memparse(value
, &rest
);
2174 } else if (!strcmp(this_char
,"nr_inodes")) {
2175 sbinfo
->max_inodes
= memparse(value
, &rest
);
2178 } else if (!strcmp(this_char
,"mode")) {
2181 sbinfo
->mode
= simple_strtoul(value
, &rest
, 8) & 07777;
2184 } else if (!strcmp(this_char
,"uid")) {
2187 sbinfo
->uid
= simple_strtoul(value
, &rest
, 0);
2190 } else if (!strcmp(this_char
,"gid")) {
2193 sbinfo
->gid
= simple_strtoul(value
, &rest
, 0);
2196 } else if (!strcmp(this_char
,"mpol")) {
2197 if (mpol_parse_str(value
, &sbinfo
->mpol
, 1))
2200 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
2208 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
2214 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2216 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2217 struct shmem_sb_info config
= *sbinfo
;
2218 unsigned long blocks
;
2219 unsigned long inodes
;
2220 int error
= -EINVAL
;
2222 if (shmem_parse_options(data
, &config
, true))
2225 spin_lock(&sbinfo
->stat_lock
);
2226 blocks
= sbinfo
->max_blocks
- sbinfo
->free_blocks
;
2227 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
2228 if (config
.max_blocks
< blocks
)
2230 if (config
.max_inodes
< inodes
)
2233 * Those tests also disallow limited->unlimited while any are in
2234 * use, so i_blocks will always be zero when max_blocks is zero;
2235 * but we must separately disallow unlimited->limited, because
2236 * in that case we have no record of how much is already in use.
2238 if (config
.max_blocks
&& !sbinfo
->max_blocks
)
2240 if (config
.max_inodes
&& !sbinfo
->max_inodes
)
2244 sbinfo
->max_blocks
= config
.max_blocks
;
2245 sbinfo
->free_blocks
= config
.max_blocks
- blocks
;
2246 sbinfo
->max_inodes
= config
.max_inodes
;
2247 sbinfo
->free_inodes
= config
.max_inodes
- inodes
;
2249 mpol_put(sbinfo
->mpol
);
2250 sbinfo
->mpol
= config
.mpol
; /* transfers initial ref */
2252 spin_unlock(&sbinfo
->stat_lock
);
2256 static int shmem_show_options(struct seq_file
*seq
, struct vfsmount
*vfs
)
2258 struct shmem_sb_info
*sbinfo
= SHMEM_SB(vfs
->mnt_sb
);
2260 if (sbinfo
->max_blocks
!= shmem_default_max_blocks())
2261 seq_printf(seq
, ",size=%luk",
2262 sbinfo
->max_blocks
<< (PAGE_CACHE_SHIFT
- 10));
2263 if (sbinfo
->max_inodes
!= shmem_default_max_inodes())
2264 seq_printf(seq
, ",nr_inodes=%lu", sbinfo
->max_inodes
);
2265 if (sbinfo
->mode
!= (S_IRWXUGO
| S_ISVTX
))
2266 seq_printf(seq
, ",mode=%03o", sbinfo
->mode
);
2267 if (sbinfo
->uid
!= 0)
2268 seq_printf(seq
, ",uid=%u", sbinfo
->uid
);
2269 if (sbinfo
->gid
!= 0)
2270 seq_printf(seq
, ",gid=%u", sbinfo
->gid
);
2271 shmem_show_mpol(seq
, sbinfo
->mpol
);
2274 #endif /* CONFIG_TMPFS */
2276 static void shmem_put_super(struct super_block
*sb
)
2278 kfree(sb
->s_fs_info
);
2279 sb
->s_fs_info
= NULL
;
2282 static int shmem_fill_super(struct super_block
*sb
,
2283 void *data
, int silent
)
2285 struct inode
*inode
;
2286 struct dentry
*root
;
2287 struct shmem_sb_info
*sbinfo
;
2290 /* Round up to L1_CACHE_BYTES to resist false sharing */
2291 sbinfo
= kmalloc(max((int)sizeof(struct shmem_sb_info
),
2292 L1_CACHE_BYTES
), GFP_KERNEL
);
2296 sbinfo
->max_blocks
= 0;
2297 sbinfo
->max_inodes
= 0;
2298 sbinfo
->mode
= S_IRWXUGO
| S_ISVTX
;
2299 sbinfo
->uid
= current_fsuid();
2300 sbinfo
->gid
= current_fsgid();
2301 sbinfo
->mpol
= NULL
;
2302 sb
->s_fs_info
= sbinfo
;
2306 * Per default we only allow half of the physical ram per
2307 * tmpfs instance, limiting inodes to one per page of lowmem;
2308 * but the internal instance is left unlimited.
2310 if (!(sb
->s_flags
& MS_NOUSER
)) {
2311 sbinfo
->max_blocks
= shmem_default_max_blocks();
2312 sbinfo
->max_inodes
= shmem_default_max_inodes();
2313 if (shmem_parse_options(data
, sbinfo
, false)) {
2318 sb
->s_export_op
= &shmem_export_ops
;
2320 sb
->s_flags
|= MS_NOUSER
;
2323 spin_lock_init(&sbinfo
->stat_lock
);
2324 sbinfo
->free_blocks
= sbinfo
->max_blocks
;
2325 sbinfo
->free_inodes
= sbinfo
->max_inodes
;
2327 sb
->s_maxbytes
= SHMEM_MAX_BYTES
;
2328 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
2329 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
2330 sb
->s_magic
= TMPFS_MAGIC
;
2331 sb
->s_op
= &shmem_ops
;
2332 sb
->s_time_gran
= 1;
2333 #ifdef CONFIG_TMPFS_POSIX_ACL
2334 sb
->s_xattr
= shmem_xattr_handlers
;
2335 sb
->s_flags
|= MS_POSIXACL
;
2338 inode
= shmem_get_inode(sb
, S_IFDIR
| sbinfo
->mode
, 0, VM_NORESERVE
);
2341 inode
->i_uid
= sbinfo
->uid
;
2342 inode
->i_gid
= sbinfo
->gid
;
2343 root
= d_alloc_root(inode
);
2352 shmem_put_super(sb
);
2356 static struct kmem_cache
*shmem_inode_cachep
;
2358 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2360 struct shmem_inode_info
*p
;
2361 p
= (struct shmem_inode_info
*)kmem_cache_alloc(shmem_inode_cachep
, GFP_KERNEL
);
2364 return &p
->vfs_inode
;
2367 static void shmem_destroy_inode(struct inode
*inode
)
2369 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
) {
2370 /* only struct inode is valid if it's an inline symlink */
2371 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2373 shmem_acl_destroy_inode(inode
);
2374 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2377 static void init_once(void *foo
)
2379 struct shmem_inode_info
*p
= (struct shmem_inode_info
*) foo
;
2381 inode_init_once(&p
->vfs_inode
);
2382 #ifdef CONFIG_TMPFS_POSIX_ACL
2384 p
->i_default_acl
= NULL
;
2388 static int init_inodecache(void)
2390 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2391 sizeof(struct shmem_inode_info
),
2392 0, SLAB_PANIC
, init_once
);
2396 static void destroy_inodecache(void)
2398 kmem_cache_destroy(shmem_inode_cachep
);
2401 static const struct address_space_operations shmem_aops
= {
2402 .writepage
= shmem_writepage
,
2403 .set_page_dirty
= __set_page_dirty_no_writeback
,
2405 .readpage
= shmem_readpage
,
2406 .write_begin
= shmem_write_begin
,
2407 .write_end
= shmem_write_end
,
2409 .migratepage
= migrate_page
,
2412 static const struct file_operations shmem_file_operations
= {
2415 .llseek
= generic_file_llseek
,
2416 .read
= do_sync_read
,
2417 .write
= do_sync_write
,
2418 .aio_read
= shmem_file_aio_read
,
2419 .aio_write
= generic_file_aio_write
,
2420 .fsync
= simple_sync_file
,
2421 .splice_read
= generic_file_splice_read
,
2422 .splice_write
= generic_file_splice_write
,
2426 static const struct inode_operations shmem_inode_operations
= {
2427 .truncate
= shmem_truncate
,
2428 .setattr
= shmem_notify_change
,
2429 .truncate_range
= shmem_truncate_range
,
2430 #ifdef CONFIG_TMPFS_POSIX_ACL
2431 .setxattr
= generic_setxattr
,
2432 .getxattr
= generic_getxattr
,
2433 .listxattr
= generic_listxattr
,
2434 .removexattr
= generic_removexattr
,
2435 .permission
= shmem_permission
,
2440 static const struct inode_operations shmem_dir_inode_operations
= {
2442 .create
= shmem_create
,
2443 .lookup
= simple_lookup
,
2445 .unlink
= shmem_unlink
,
2446 .symlink
= shmem_symlink
,
2447 .mkdir
= shmem_mkdir
,
2448 .rmdir
= shmem_rmdir
,
2449 .mknod
= shmem_mknod
,
2450 .rename
= shmem_rename
,
2452 #ifdef CONFIG_TMPFS_POSIX_ACL
2453 .setattr
= shmem_notify_change
,
2454 .setxattr
= generic_setxattr
,
2455 .getxattr
= generic_getxattr
,
2456 .listxattr
= generic_listxattr
,
2457 .removexattr
= generic_removexattr
,
2458 .permission
= shmem_permission
,
2462 static const struct inode_operations shmem_special_inode_operations
= {
2463 #ifdef CONFIG_TMPFS_POSIX_ACL
2464 .setattr
= shmem_notify_change
,
2465 .setxattr
= generic_setxattr
,
2466 .getxattr
= generic_getxattr
,
2467 .listxattr
= generic_listxattr
,
2468 .removexattr
= generic_removexattr
,
2469 .permission
= shmem_permission
,
2473 static const struct super_operations shmem_ops
= {
2474 .alloc_inode
= shmem_alloc_inode
,
2475 .destroy_inode
= shmem_destroy_inode
,
2477 .statfs
= shmem_statfs
,
2478 .remount_fs
= shmem_remount_fs
,
2479 .show_options
= shmem_show_options
,
2481 .delete_inode
= shmem_delete_inode
,
2482 .drop_inode
= generic_delete_inode
,
2483 .put_super
= shmem_put_super
,
2486 static struct vm_operations_struct shmem_vm_ops
= {
2487 .fault
= shmem_fault
,
2489 .set_policy
= shmem_set_policy
,
2490 .get_policy
= shmem_get_policy
,
2495 static int shmem_get_sb(struct file_system_type
*fs_type
,
2496 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
2498 return get_sb_nodev(fs_type
, flags
, data
, shmem_fill_super
, mnt
);
2501 static struct file_system_type tmpfs_fs_type
= {
2502 .owner
= THIS_MODULE
,
2504 .get_sb
= shmem_get_sb
,
2505 .kill_sb
= kill_litter_super
,
2508 static int __init
init_tmpfs(void)
2512 error
= bdi_init(&shmem_backing_dev_info
);
2516 error
= init_inodecache();
2520 error
= register_filesystem(&tmpfs_fs_type
);
2522 printk(KERN_ERR
"Could not register tmpfs\n");
2526 shm_mnt
= vfs_kern_mount(&tmpfs_fs_type
, MS_NOUSER
,
2527 tmpfs_fs_type
.name
, NULL
);
2528 if (IS_ERR(shm_mnt
)) {
2529 error
= PTR_ERR(shm_mnt
);
2530 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2536 unregister_filesystem(&tmpfs_fs_type
);
2538 destroy_inodecache();
2540 bdi_destroy(&shmem_backing_dev_info
);
2542 shm_mnt
= ERR_PTR(error
);
2546 #else /* !CONFIG_SHMEM */
2549 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2551 * This is intended for small system where the benefits of the full
2552 * shmem code (swap-backed and resource-limited) are outweighed by
2553 * their complexity. On systems without swap this code should be
2554 * effectively equivalent, but much lighter weight.
2557 #include <linux/ramfs.h>
2559 static struct file_system_type tmpfs_fs_type
= {
2561 .get_sb
= ramfs_get_sb
,
2562 .kill_sb
= kill_litter_super
,
2565 static int __init
init_tmpfs(void)
2567 BUG_ON(register_filesystem(&tmpfs_fs_type
) != 0);
2569 shm_mnt
= kern_mount(&tmpfs_fs_type
);
2570 BUG_ON(IS_ERR(shm_mnt
));
2575 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
2580 #define shmem_vm_ops generic_file_vm_ops
2581 #define shmem_file_operations ramfs_file_operations
2582 #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
2583 #define shmem_acct_size(flags, size) 0
2584 #define shmem_unacct_size(flags, size) do {} while (0)
2585 #define SHMEM_MAX_BYTES LLONG_MAX
2587 #endif /* CONFIG_SHMEM */
2592 * shmem_file_setup - get an unlinked file living in tmpfs
2593 * @name: name for dentry (to be seen in /proc/<pid>/maps
2594 * @size: size to be set for the file
2595 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2597 struct file
*shmem_file_setup(char *name
, loff_t size
, unsigned long flags
)
2601 struct inode
*inode
;
2602 struct dentry
*dentry
, *root
;
2605 if (IS_ERR(shm_mnt
))
2606 return (void *)shm_mnt
;
2608 if (size
< 0 || size
> SHMEM_MAX_BYTES
)
2609 return ERR_PTR(-EINVAL
);
2611 if (shmem_acct_size(flags
, size
))
2612 return ERR_PTR(-ENOMEM
);
2616 this.len
= strlen(name
);
2617 this.hash
= 0; /* will go */
2618 root
= shm_mnt
->mnt_root
;
2619 dentry
= d_alloc(root
, &this);
2624 file
= get_empty_filp();
2629 inode
= shmem_get_inode(root
->d_sb
, S_IFREG
| S_IRWXUGO
, 0, flags
);
2633 d_instantiate(dentry
, inode
);
2634 inode
->i_size
= size
;
2635 inode
->i_nlink
= 0; /* It is unlinked */
2636 init_file(file
, shm_mnt
, dentry
, FMODE_WRITE
| FMODE_READ
,
2637 &shmem_file_operations
);
2640 error
= ramfs_nommu_expand_for_mapping(inode
, size
);
2651 shmem_unacct_size(flags
, size
);
2652 return ERR_PTR(error
);
2654 EXPORT_SYMBOL_GPL(shmem_file_setup
);
2657 * shmem_zero_setup - setup a shared anonymous mapping
2658 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2660 int shmem_zero_setup(struct vm_area_struct
*vma
)
2663 loff_t size
= vma
->vm_end
- vma
->vm_start
;
2665 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
2667 return PTR_ERR(file
);
2669 ima_shm_check(file
);
2672 vma
->vm_file
= file
;
2673 vma
->vm_ops
= &shmem_vm_ops
;
2677 module_init(init_tmpfs
)