2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 * This file is released under the GPL.
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/pagemap.h>
28 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/swap.h>
32 #include <linux/ima.h>
34 static struct vfsmount
*shm_mnt
;
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/generic_acl.h>
46 #include <linux/mman.h>
47 #include <linux/string.h>
48 #include <linux/slab.h>
49 #include <linux/backing-dev.h>
50 #include <linux/shmem_fs.h>
51 #include <linux/writeback.h>
52 #include <linux/blkdev.h>
53 #include <linux/security.h>
54 #include <linux/swapops.h>
55 #include <linux/mempolicy.h>
56 #include <linux/namei.h>
57 #include <linux/ctype.h>
58 #include <linux/migrate.h>
59 #include <linux/highmem.h>
60 #include <linux/seq_file.h>
61 #include <linux/magic.h>
63 #include <asm/uaccess.h>
64 #include <asm/div64.h>
65 #include <asm/pgtable.h>
68 * The maximum size of a shmem/tmpfs file is limited by the maximum size of
69 * its triple-indirect swap vector - see illustration at shmem_swp_entry().
71 * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
72 * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
73 * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
74 * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
76 * We use / and * instead of shifts in the definitions below, so that the swap
77 * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
79 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
80 #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
82 #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
83 #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
85 #define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
86 #define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
88 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
89 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
91 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
92 #define SHMEM_PAGEIN VM_READ
93 #define SHMEM_TRUNCATE VM_WRITE
95 /* Definition to limit shmem_truncate's steps between cond_rescheds */
96 #define LATENCY_LIMIT 64
98 /* Pretend that each entry is of this size in directory's i_size */
99 #define BOGO_DIRENT_SIZE 20
101 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
103 SGP_READ
, /* don't exceed i_size, don't allocate page */
104 SGP_CACHE
, /* don't exceed i_size, may allocate page */
105 SGP_DIRTY
, /* like SGP_CACHE, but set new page dirty */
106 SGP_WRITE
, /* may exceed i_size, may allocate page */
110 static unsigned long shmem_default_max_blocks(void)
112 return totalram_pages
/ 2;
115 static unsigned long shmem_default_max_inodes(void)
117 return min(totalram_pages
- totalhigh_pages
, totalram_pages
/ 2);
121 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
122 struct page
**pagep
, enum sgp_type sgp
, int *type
);
124 static inline struct page
*shmem_dir_alloc(gfp_t gfp_mask
)
127 * The above definition of ENTRIES_PER_PAGE, and the use of
128 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
129 * might be reconsidered if it ever diverges from PAGE_SIZE.
131 * Mobility flags are masked out as swap vectors cannot move
133 return alloc_pages((gfp_mask
& ~GFP_MOVABLE_MASK
) | __GFP_ZERO
,
134 PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
137 static inline void shmem_dir_free(struct page
*page
)
139 __free_pages(page
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
142 static struct page
**shmem_dir_map(struct page
*page
)
144 return (struct page
**)kmap_atomic(page
, KM_USER0
);
147 static inline void shmem_dir_unmap(struct page
**dir
)
149 kunmap_atomic(dir
, KM_USER0
);
152 static swp_entry_t
*shmem_swp_map(struct page
*page
)
154 return (swp_entry_t
*)kmap_atomic(page
, KM_USER1
);
157 static inline void shmem_swp_balance_unmap(void)
160 * When passing a pointer to an i_direct entry, to code which
161 * also handles indirect entries and so will shmem_swp_unmap,
162 * we must arrange for the preempt count to remain in balance.
163 * What kmap_atomic of a lowmem page does depends on config
164 * and architecture, so pretend to kmap_atomic some lowmem page.
166 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1
);
169 static inline void shmem_swp_unmap(swp_entry_t
*entry
)
171 kunmap_atomic(entry
, KM_USER1
);
174 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
176 return sb
->s_fs_info
;
180 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
181 * for shared memory and for shared anonymous (/dev/zero) mappings
182 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
183 * consistent with the pre-accounting of private mappings ...
185 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
187 return (flags
& VM_NORESERVE
) ?
188 0 : security_vm_enough_memory_kern(VM_ACCT(size
));
191 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
193 if (!(flags
& VM_NORESERVE
))
194 vm_unacct_memory(VM_ACCT(size
));
198 * ... whereas tmpfs objects are accounted incrementally as
199 * pages are allocated, in order to allow huge sparse files.
200 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
201 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
203 static inline int shmem_acct_block(unsigned long flags
)
205 return (flags
& VM_NORESERVE
) ?
206 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE
)) : 0;
209 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
211 if (flags
& VM_NORESERVE
)
212 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
215 static const struct super_operations shmem_ops
;
216 static const struct address_space_operations shmem_aops
;
217 static const struct file_operations shmem_file_operations
;
218 static const struct inode_operations shmem_inode_operations
;
219 static const struct inode_operations shmem_dir_inode_operations
;
220 static const struct inode_operations shmem_special_inode_operations
;
221 static const struct vm_operations_struct shmem_vm_ops
;
223 static struct backing_dev_info shmem_backing_dev_info __read_mostly
= {
224 .ra_pages
= 0, /* No readahead */
225 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
| BDI_CAP_SWAP_BACKED
,
226 .unplug_io_fn
= default_unplug_io_fn
,
229 static LIST_HEAD(shmem_swaplist
);
230 static DEFINE_MUTEX(shmem_swaplist_mutex
);
232 static void shmem_free_blocks(struct inode
*inode
, long pages
)
234 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
235 if (sbinfo
->max_blocks
) {
236 spin_lock(&sbinfo
->stat_lock
);
237 sbinfo
->free_blocks
+= pages
;
238 inode
->i_blocks
-= pages
*BLOCKS_PER_PAGE
;
239 spin_unlock(&sbinfo
->stat_lock
);
243 static int shmem_reserve_inode(struct super_block
*sb
)
245 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
246 if (sbinfo
->max_inodes
) {
247 spin_lock(&sbinfo
->stat_lock
);
248 if (!sbinfo
->free_inodes
) {
249 spin_unlock(&sbinfo
->stat_lock
);
252 sbinfo
->free_inodes
--;
253 spin_unlock(&sbinfo
->stat_lock
);
258 static void shmem_free_inode(struct super_block
*sb
)
260 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
261 if (sbinfo
->max_inodes
) {
262 spin_lock(&sbinfo
->stat_lock
);
263 sbinfo
->free_inodes
++;
264 spin_unlock(&sbinfo
->stat_lock
);
269 * shmem_recalc_inode - recalculate the size of an inode
270 * @inode: inode to recalc
272 * We have to calculate the free blocks since the mm can drop
273 * undirtied hole pages behind our back.
275 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
276 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
278 * It has to be called with the spinlock held.
280 static void shmem_recalc_inode(struct inode
*inode
)
282 struct shmem_inode_info
*info
= SHMEM_I(inode
);
285 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
287 info
->alloced
-= freed
;
288 shmem_unacct_blocks(info
->flags
, freed
);
289 shmem_free_blocks(inode
, freed
);
294 * shmem_swp_entry - find the swap vector position in the info structure
295 * @info: info structure for the inode
296 * @index: index of the page to find
297 * @page: optional page to add to the structure. Has to be preset to
300 * If there is no space allocated yet it will return NULL when
301 * page is NULL, else it will use the page for the needed block,
302 * setting it to NULL on return to indicate that it has been used.
304 * The swap vector is organized the following way:
306 * There are SHMEM_NR_DIRECT entries directly stored in the
307 * shmem_inode_info structure. So small files do not need an addional
310 * For pages with index > SHMEM_NR_DIRECT there is the pointer
311 * i_indirect which points to a page which holds in the first half
312 * doubly indirect blocks, in the second half triple indirect blocks:
314 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
315 * following layout (for SHMEM_NR_DIRECT == 16):
317 * i_indirect -> dir --> 16-19
330 static swp_entry_t
*shmem_swp_entry(struct shmem_inode_info
*info
, unsigned long index
, struct page
**page
)
332 unsigned long offset
;
336 if (index
< SHMEM_NR_DIRECT
) {
337 shmem_swp_balance_unmap();
338 return info
->i_direct
+index
;
340 if (!info
->i_indirect
) {
342 info
->i_indirect
= *page
;
345 return NULL
; /* need another page */
348 index
-= SHMEM_NR_DIRECT
;
349 offset
= index
% ENTRIES_PER_PAGE
;
350 index
/= ENTRIES_PER_PAGE
;
351 dir
= shmem_dir_map(info
->i_indirect
);
353 if (index
>= ENTRIES_PER_PAGE
/2) {
354 index
-= ENTRIES_PER_PAGE
/2;
355 dir
+= ENTRIES_PER_PAGE
/2 + index
/ENTRIES_PER_PAGE
;
356 index
%= ENTRIES_PER_PAGE
;
363 shmem_dir_unmap(dir
);
364 return NULL
; /* need another page */
366 shmem_dir_unmap(dir
);
367 dir
= shmem_dir_map(subdir
);
373 if (!page
|| !(subdir
= *page
)) {
374 shmem_dir_unmap(dir
);
375 return NULL
; /* need a page */
380 shmem_dir_unmap(dir
);
381 return shmem_swp_map(subdir
) + offset
;
384 static void shmem_swp_set(struct shmem_inode_info
*info
, swp_entry_t
*entry
, unsigned long value
)
386 long incdec
= value
? 1: -1;
389 info
->swapped
+= incdec
;
390 if ((unsigned long)(entry
- info
->i_direct
) >= SHMEM_NR_DIRECT
) {
391 struct page
*page
= kmap_atomic_to_page(entry
);
392 set_page_private(page
, page_private(page
) + incdec
);
397 * shmem_swp_alloc - get the position of the swap entry for the page.
398 * @info: info structure for the inode
399 * @index: index of the page to find
400 * @sgp: check and recheck i_size? skip allocation?
402 * If the entry does not exist, allocate it.
404 static swp_entry_t
*shmem_swp_alloc(struct shmem_inode_info
*info
, unsigned long index
, enum sgp_type sgp
)
406 struct inode
*inode
= &info
->vfs_inode
;
407 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
408 struct page
*page
= NULL
;
411 if (sgp
!= SGP_WRITE
&&
412 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
413 return ERR_PTR(-EINVAL
);
415 while (!(entry
= shmem_swp_entry(info
, index
, &page
))) {
417 return shmem_swp_map(ZERO_PAGE(0));
419 * Test free_blocks against 1 not 0, since we have 1 data
420 * page (and perhaps indirect index pages) yet to allocate:
421 * a waste to allocate index if we cannot allocate data.
423 if (sbinfo
->max_blocks
) {
424 spin_lock(&sbinfo
->stat_lock
);
425 if (sbinfo
->free_blocks
<= 1) {
426 spin_unlock(&sbinfo
->stat_lock
);
427 return ERR_PTR(-ENOSPC
);
429 sbinfo
->free_blocks
--;
430 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
431 spin_unlock(&sbinfo
->stat_lock
);
434 spin_unlock(&info
->lock
);
435 page
= shmem_dir_alloc(mapping_gfp_mask(inode
->i_mapping
));
437 set_page_private(page
, 0);
438 spin_lock(&info
->lock
);
441 shmem_free_blocks(inode
, 1);
442 return ERR_PTR(-ENOMEM
);
444 if (sgp
!= SGP_WRITE
&&
445 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
446 entry
= ERR_PTR(-EINVAL
);
449 if (info
->next_index
<= index
)
450 info
->next_index
= index
+ 1;
453 /* another task gave its page, or truncated the file */
454 shmem_free_blocks(inode
, 1);
455 shmem_dir_free(page
);
457 if (info
->next_index
<= index
&& !IS_ERR(entry
))
458 info
->next_index
= index
+ 1;
463 * shmem_free_swp - free some swap entries in a directory
464 * @dir: pointer to the directory
465 * @edir: pointer after last entry of the directory
466 * @punch_lock: pointer to spinlock when needed for the holepunch case
468 static int shmem_free_swp(swp_entry_t
*dir
, swp_entry_t
*edir
,
469 spinlock_t
*punch_lock
)
471 spinlock_t
*punch_unlock
= NULL
;
475 for (ptr
= dir
; ptr
< edir
; ptr
++) {
477 if (unlikely(punch_lock
)) {
478 punch_unlock
= punch_lock
;
480 spin_lock(punch_unlock
);
484 free_swap_and_cache(*ptr
);
485 *ptr
= (swp_entry_t
){0};
490 spin_unlock(punch_unlock
);
494 static int shmem_map_and_free_swp(struct page
*subdir
, int offset
,
495 int limit
, struct page
***dir
, spinlock_t
*punch_lock
)
500 ptr
= shmem_swp_map(subdir
);
501 for (; offset
< limit
; offset
+= LATENCY_LIMIT
) {
502 int size
= limit
- offset
;
503 if (size
> LATENCY_LIMIT
)
504 size
= LATENCY_LIMIT
;
505 freed
+= shmem_free_swp(ptr
+offset
, ptr
+offset
+size
,
507 if (need_resched()) {
508 shmem_swp_unmap(ptr
);
510 shmem_dir_unmap(*dir
);
514 ptr
= shmem_swp_map(subdir
);
517 shmem_swp_unmap(ptr
);
521 static void shmem_free_pages(struct list_head
*next
)
527 page
= container_of(next
, struct page
, lru
);
529 shmem_dir_free(page
);
531 if (freed
>= LATENCY_LIMIT
) {
538 static void shmem_truncate_range(struct inode
*inode
, loff_t start
, loff_t end
)
540 struct shmem_inode_info
*info
= SHMEM_I(inode
);
545 unsigned long diroff
;
551 LIST_HEAD(pages_to_free
);
552 long nr_pages_to_free
= 0;
553 long nr_swaps_freed
= 0;
557 spinlock_t
*needs_lock
;
558 spinlock_t
*punch_lock
;
559 unsigned long upper_limit
;
561 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
562 idx
= (start
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
563 if (idx
>= info
->next_index
)
566 spin_lock(&info
->lock
);
567 info
->flags
|= SHMEM_TRUNCATE
;
568 if (likely(end
== (loff_t
) -1)) {
569 limit
= info
->next_index
;
570 upper_limit
= SHMEM_MAX_INDEX
;
571 info
->next_index
= idx
;
575 if (end
+ 1 >= inode
->i_size
) { /* we may free a little more */
576 limit
= (inode
->i_size
+ PAGE_CACHE_SIZE
- 1) >>
578 upper_limit
= SHMEM_MAX_INDEX
;
580 limit
= (end
+ 1) >> PAGE_CACHE_SHIFT
;
583 needs_lock
= &info
->lock
;
587 topdir
= info
->i_indirect
;
588 if (topdir
&& idx
<= SHMEM_NR_DIRECT
&& !punch_hole
) {
589 info
->i_indirect
= NULL
;
591 list_add(&topdir
->lru
, &pages_to_free
);
593 spin_unlock(&info
->lock
);
595 if (info
->swapped
&& idx
< SHMEM_NR_DIRECT
) {
596 ptr
= info
->i_direct
;
598 if (size
> SHMEM_NR_DIRECT
)
599 size
= SHMEM_NR_DIRECT
;
600 nr_swaps_freed
= shmem_free_swp(ptr
+idx
, ptr
+size
, needs_lock
);
604 * If there are no indirect blocks or we are punching a hole
605 * below indirect blocks, nothing to be done.
607 if (!topdir
|| limit
<= SHMEM_NR_DIRECT
)
611 * The truncation case has already dropped info->lock, and we're safe
612 * because i_size and next_index have already been lowered, preventing
613 * access beyond. But in the punch_hole case, we still need to take
614 * the lock when updating the swap directory, because there might be
615 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
616 * shmem_writepage. However, whenever we find we can remove a whole
617 * directory page (not at the misaligned start or end of the range),
618 * we first NULLify its pointer in the level above, and then have no
619 * need to take the lock when updating its contents: needs_lock and
620 * punch_lock (either pointing to info->lock or NULL) manage this.
623 upper_limit
-= SHMEM_NR_DIRECT
;
624 limit
-= SHMEM_NR_DIRECT
;
625 idx
= (idx
> SHMEM_NR_DIRECT
)? (idx
- SHMEM_NR_DIRECT
): 0;
626 offset
= idx
% ENTRIES_PER_PAGE
;
629 dir
= shmem_dir_map(topdir
);
630 stage
= ENTRIES_PER_PAGEPAGE
/2;
631 if (idx
< ENTRIES_PER_PAGEPAGE
/2) {
633 diroff
= idx
/ENTRIES_PER_PAGE
;
635 dir
+= ENTRIES_PER_PAGE
/2;
636 dir
+= (idx
- ENTRIES_PER_PAGEPAGE
/2)/ENTRIES_PER_PAGEPAGE
;
638 stage
+= ENTRIES_PER_PAGEPAGE
;
641 diroff
= ((idx
- ENTRIES_PER_PAGEPAGE
/2) %
642 ENTRIES_PER_PAGEPAGE
) / ENTRIES_PER_PAGE
;
643 if (!diroff
&& !offset
&& upper_limit
>= stage
) {
645 spin_lock(needs_lock
);
647 spin_unlock(needs_lock
);
652 list_add(&middir
->lru
, &pages_to_free
);
654 shmem_dir_unmap(dir
);
655 dir
= shmem_dir_map(middir
);
663 for (; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, diroff
++) {
664 if (unlikely(idx
== stage
)) {
665 shmem_dir_unmap(dir
);
666 dir
= shmem_dir_map(topdir
) +
667 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
670 idx
+= ENTRIES_PER_PAGEPAGE
;
674 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
677 needs_lock
= &info
->lock
;
678 if (upper_limit
>= stage
) {
680 spin_lock(needs_lock
);
682 spin_unlock(needs_lock
);
687 list_add(&middir
->lru
, &pages_to_free
);
689 shmem_dir_unmap(dir
);
691 dir
= shmem_dir_map(middir
);
694 punch_lock
= needs_lock
;
695 subdir
= dir
[diroff
];
696 if (subdir
&& !offset
&& upper_limit
-idx
>= ENTRIES_PER_PAGE
) {
698 spin_lock(needs_lock
);
700 spin_unlock(needs_lock
);
705 list_add(&subdir
->lru
, &pages_to_free
);
707 if (subdir
&& page_private(subdir
) /* has swap entries */) {
709 if (size
> ENTRIES_PER_PAGE
)
710 size
= ENTRIES_PER_PAGE
;
711 freed
= shmem_map_and_free_swp(subdir
,
712 offset
, size
, &dir
, punch_lock
);
714 dir
= shmem_dir_map(middir
);
715 nr_swaps_freed
+= freed
;
716 if (offset
|| punch_lock
) {
717 spin_lock(&info
->lock
);
718 set_page_private(subdir
,
719 page_private(subdir
) - freed
);
720 spin_unlock(&info
->lock
);
722 BUG_ON(page_private(subdir
) != freed
);
727 shmem_dir_unmap(dir
);
729 if (inode
->i_mapping
->nrpages
&& (info
->flags
& SHMEM_PAGEIN
)) {
731 * Call truncate_inode_pages again: racing shmem_unuse_inode
732 * may have swizzled a page in from swap since vmtruncate or
733 * generic_delete_inode did it, before we lowered next_index.
734 * Also, though shmem_getpage checks i_size before adding to
735 * cache, no recheck after: so fix the narrow window there too.
737 * Recalling truncate_inode_pages_range and unmap_mapping_range
738 * every time for punch_hole (which never got a chance to clear
739 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
740 * yet hardly ever necessary: try to optimize them out later.
742 truncate_inode_pages_range(inode
->i_mapping
, start
, end
);
744 unmap_mapping_range(inode
->i_mapping
, start
,
748 spin_lock(&info
->lock
);
749 info
->flags
&= ~SHMEM_TRUNCATE
;
750 info
->swapped
-= nr_swaps_freed
;
751 if (nr_pages_to_free
)
752 shmem_free_blocks(inode
, nr_pages_to_free
);
753 shmem_recalc_inode(inode
);
754 spin_unlock(&info
->lock
);
757 * Empty swap vector directory pages to be freed?
759 if (!list_empty(&pages_to_free
)) {
760 pages_to_free
.prev
->next
= NULL
;
761 shmem_free_pages(pages_to_free
.next
);
765 static void shmem_truncate(struct inode
*inode
)
767 shmem_truncate_range(inode
, inode
->i_size
, (loff_t
)-1);
770 static int shmem_notify_change(struct dentry
*dentry
, struct iattr
*attr
)
772 struct inode
*inode
= dentry
->d_inode
;
773 struct page
*page
= NULL
;
776 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
777 if (attr
->ia_size
< inode
->i_size
) {
779 * If truncating down to a partial page, then
780 * if that page is already allocated, hold it
781 * in memory until the truncation is over, so
782 * truncate_partial_page cannnot miss it were
783 * it assigned to swap.
785 if (attr
->ia_size
& (PAGE_CACHE_SIZE
-1)) {
786 (void) shmem_getpage(inode
,
787 attr
->ia_size
>>PAGE_CACHE_SHIFT
,
788 &page
, SGP_READ
, NULL
);
793 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
794 * detect if any pages might have been added to cache
795 * after truncate_inode_pages. But we needn't bother
796 * if it's being fully truncated to zero-length: the
797 * nrpages check is efficient enough in that case.
800 struct shmem_inode_info
*info
= SHMEM_I(inode
);
801 spin_lock(&info
->lock
);
802 info
->flags
&= ~SHMEM_PAGEIN
;
803 spin_unlock(&info
->lock
);
808 error
= inode_change_ok(inode
, attr
);
810 error
= inode_setattr(inode
, attr
);
811 #ifdef CONFIG_TMPFS_POSIX_ACL
812 if (!error
&& (attr
->ia_valid
& ATTR_MODE
))
813 error
= generic_acl_chmod(inode
, &shmem_acl_ops
);
816 page_cache_release(page
);
820 static void shmem_delete_inode(struct inode
*inode
)
822 struct shmem_inode_info
*info
= SHMEM_I(inode
);
824 if (inode
->i_op
->truncate
== shmem_truncate
) {
825 truncate_inode_pages(inode
->i_mapping
, 0);
826 shmem_unacct_size(info
->flags
, inode
->i_size
);
828 shmem_truncate(inode
);
829 if (!list_empty(&info
->swaplist
)) {
830 mutex_lock(&shmem_swaplist_mutex
);
831 list_del_init(&info
->swaplist
);
832 mutex_unlock(&shmem_swaplist_mutex
);
835 BUG_ON(inode
->i_blocks
);
836 shmem_free_inode(inode
->i_sb
);
840 static inline int shmem_find_swp(swp_entry_t entry
, swp_entry_t
*dir
, swp_entry_t
*edir
)
844 for (ptr
= dir
; ptr
< edir
; ptr
++) {
845 if (ptr
->val
== entry
.val
)
851 static int shmem_unuse_inode(struct shmem_inode_info
*info
, swp_entry_t entry
, struct page
*page
)
865 ptr
= info
->i_direct
;
866 spin_lock(&info
->lock
);
867 if (!info
->swapped
) {
868 list_del_init(&info
->swaplist
);
871 limit
= info
->next_index
;
873 if (size
> SHMEM_NR_DIRECT
)
874 size
= SHMEM_NR_DIRECT
;
875 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
878 if (!info
->i_indirect
)
881 dir
= shmem_dir_map(info
->i_indirect
);
882 stage
= SHMEM_NR_DIRECT
+ ENTRIES_PER_PAGEPAGE
/2;
884 for (idx
= SHMEM_NR_DIRECT
; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, dir
++) {
885 if (unlikely(idx
== stage
)) {
886 shmem_dir_unmap(dir
-1);
887 if (cond_resched_lock(&info
->lock
)) {
888 /* check it has not been truncated */
889 if (limit
> info
->next_index
) {
890 limit
= info
->next_index
;
895 dir
= shmem_dir_map(info
->i_indirect
) +
896 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
899 idx
+= ENTRIES_PER_PAGEPAGE
;
903 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
905 shmem_dir_unmap(dir
);
906 dir
= shmem_dir_map(subdir
);
909 if (subdir
&& page_private(subdir
)) {
910 ptr
= shmem_swp_map(subdir
);
912 if (size
> ENTRIES_PER_PAGE
)
913 size
= ENTRIES_PER_PAGE
;
914 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
915 shmem_swp_unmap(ptr
);
917 shmem_dir_unmap(dir
);
923 shmem_dir_unmap(dir
-1);
925 spin_unlock(&info
->lock
);
929 inode
= igrab(&info
->vfs_inode
);
930 spin_unlock(&info
->lock
);
933 * Move _head_ to start search for next from here.
934 * But be careful: shmem_delete_inode checks list_empty without taking
935 * mutex, and there's an instant in list_move_tail when info->swaplist
936 * would appear empty, if it were the only one on shmem_swaplist. We
937 * could avoid doing it if inode NULL; or use this minor optimization.
939 if (shmem_swaplist
.next
!= &info
->swaplist
)
940 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
941 mutex_unlock(&shmem_swaplist_mutex
);
947 * Charge page using GFP_KERNEL while we can wait.
948 * Charged back to the user(not to caller) when swap account is used.
949 * add_to_page_cache() will be called with GFP_NOWAIT.
951 error
= mem_cgroup_cache_charge(page
, current
->mm
, GFP_KERNEL
);
954 error
= radix_tree_preload(GFP_KERNEL
);
956 mem_cgroup_uncharge_cache_page(page
);
961 spin_lock(&info
->lock
);
962 ptr
= shmem_swp_entry(info
, idx
, NULL
);
963 if (ptr
&& ptr
->val
== entry
.val
) {
964 error
= add_to_page_cache_locked(page
, inode
->i_mapping
,
966 /* does mem_cgroup_uncharge_cache_page on error */
967 } else /* we must compensate for our precharge above */
968 mem_cgroup_uncharge_cache_page(page
);
970 if (error
== -EEXIST
) {
971 struct page
*filepage
= find_get_page(inode
->i_mapping
, idx
);
975 * There might be a more uptodate page coming down
976 * from a stacked writepage: forget our swappage if so.
978 if (PageUptodate(filepage
))
980 page_cache_release(filepage
);
984 delete_from_swap_cache(page
);
985 set_page_dirty(page
);
986 info
->flags
|= SHMEM_PAGEIN
;
987 shmem_swp_set(info
, ptr
, 0);
989 error
= 1; /* not an error, but entry was found */
992 shmem_swp_unmap(ptr
);
993 spin_unlock(&info
->lock
);
994 radix_tree_preload_end();
997 page_cache_release(page
);
998 iput(inode
); /* allows for NULL */
1003 * shmem_unuse() search for an eventually swapped out shmem page.
1005 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
1007 struct list_head
*p
, *next
;
1008 struct shmem_inode_info
*info
;
1011 mutex_lock(&shmem_swaplist_mutex
);
1012 list_for_each_safe(p
, next
, &shmem_swaplist
) {
1013 info
= list_entry(p
, struct shmem_inode_info
, swaplist
);
1014 found
= shmem_unuse_inode(info
, entry
, page
);
1019 mutex_unlock(&shmem_swaplist_mutex
);
1020 out
: return found
; /* 0 or 1 or -ENOMEM */
1024 * Move the page from the page cache to the swap cache.
1026 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
1028 struct shmem_inode_info
*info
;
1029 swp_entry_t
*entry
, swap
;
1030 struct address_space
*mapping
;
1031 unsigned long index
;
1032 struct inode
*inode
;
1034 BUG_ON(!PageLocked(page
));
1035 mapping
= page
->mapping
;
1036 index
= page
->index
;
1037 inode
= mapping
->host
;
1038 info
= SHMEM_I(inode
);
1039 if (info
->flags
& VM_LOCKED
)
1041 if (!total_swap_pages
)
1045 * shmem_backing_dev_info's capabilities prevent regular writeback or
1046 * sync from ever calling shmem_writepage; but a stacking filesystem
1047 * may use the ->writepage of its underlying filesystem, in which case
1048 * tmpfs should write out to swap only in response to memory pressure,
1049 * and not for the writeback threads or sync. However, in those cases,
1050 * we do still want to check if there's a redundant swappage to be
1053 if (wbc
->for_reclaim
)
1054 swap
= get_swap_page();
1058 spin_lock(&info
->lock
);
1059 if (index
>= info
->next_index
) {
1060 BUG_ON(!(info
->flags
& SHMEM_TRUNCATE
));
1063 entry
= shmem_swp_entry(info
, index
, NULL
);
1066 * The more uptodate page coming down from a stacked
1067 * writepage should replace our old swappage.
1069 free_swap_and_cache(*entry
);
1070 shmem_swp_set(info
, entry
, 0);
1072 shmem_recalc_inode(inode
);
1074 if (swap
.val
&& add_to_swap_cache(page
, swap
, GFP_ATOMIC
) == 0) {
1075 remove_from_page_cache(page
);
1076 shmem_swp_set(info
, entry
, swap
.val
);
1077 shmem_swp_unmap(entry
);
1078 if (list_empty(&info
->swaplist
))
1079 inode
= igrab(inode
);
1082 spin_unlock(&info
->lock
);
1083 swap_duplicate(swap
);
1084 BUG_ON(page_mapped(page
));
1085 page_cache_release(page
); /* pagecache ref */
1086 swap_writepage(page
, wbc
);
1088 mutex_lock(&shmem_swaplist_mutex
);
1089 /* move instead of add in case we're racing */
1090 list_move_tail(&info
->swaplist
, &shmem_swaplist
);
1091 mutex_unlock(&shmem_swaplist_mutex
);
1097 shmem_swp_unmap(entry
);
1099 spin_unlock(&info
->lock
);
1101 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1102 * clear SWAP_HAS_CACHE flag.
1104 swapcache_free(swap
, NULL
);
1106 set_page_dirty(page
);
1107 if (wbc
->for_reclaim
)
1108 return AOP_WRITEPAGE_ACTIVATE
; /* Return with page locked */
1115 static void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*mpol
)
1119 if (!mpol
|| mpol
->mode
== MPOL_DEFAULT
)
1120 return; /* show nothing */
1122 mpol_to_str(buffer
, sizeof(buffer
), mpol
, 1);
1124 seq_printf(seq
, ",mpol=%s", buffer
);
1127 static struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
1129 struct mempolicy
*mpol
= NULL
;
1131 spin_lock(&sbinfo
->stat_lock
); /* prevent replace/use races */
1132 mpol
= sbinfo
->mpol
;
1134 spin_unlock(&sbinfo
->stat_lock
);
1138 #endif /* CONFIG_TMPFS */
1140 static struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1141 struct shmem_inode_info
*info
, unsigned long idx
)
1143 struct mempolicy mpol
, *spol
;
1144 struct vm_area_struct pvma
;
1147 spol
= mpol_cond_copy(&mpol
,
1148 mpol_shared_policy_lookup(&info
->policy
, idx
));
1150 /* Create a pseudo vma that just contains the policy */
1152 pvma
.vm_pgoff
= idx
;
1154 pvma
.vm_policy
= spol
;
1155 page
= swapin_readahead(entry
, gfp
, &pvma
, 0);
1159 static struct page
*shmem_alloc_page(gfp_t gfp
,
1160 struct shmem_inode_info
*info
, unsigned long idx
)
1162 struct vm_area_struct pvma
;
1164 /* Create a pseudo vma that just contains the policy */
1166 pvma
.vm_pgoff
= idx
;
1168 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, idx
);
1171 * alloc_page_vma() will drop the shared policy reference
1173 return alloc_page_vma(gfp
, &pvma
, 0);
1175 #else /* !CONFIG_NUMA */
1177 static inline void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*p
)
1180 #endif /* CONFIG_TMPFS */
1182 static inline struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1183 struct shmem_inode_info
*info
, unsigned long idx
)
1185 return swapin_readahead(entry
, gfp
, NULL
, 0);
1188 static inline struct page
*shmem_alloc_page(gfp_t gfp
,
1189 struct shmem_inode_info
*info
, unsigned long idx
)
1191 return alloc_page(gfp
);
1193 #endif /* CONFIG_NUMA */
1195 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1196 static inline struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
1203 * shmem_getpage - either get the page from swap or allocate a new one
1205 * If we allocate a new one we do not mark it dirty. That's up to the
1206 * vm. If we swap it in we mark it dirty since we also free the swap
1207 * entry since a page cannot live in both the swap and page cache
1209 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
1210 struct page
**pagep
, enum sgp_type sgp
, int *type
)
1212 struct address_space
*mapping
= inode
->i_mapping
;
1213 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1214 struct shmem_sb_info
*sbinfo
;
1215 struct page
*filepage
= *pagep
;
1216 struct page
*swappage
;
1222 if (idx
>= SHMEM_MAX_INDEX
)
1229 * Normally, filepage is NULL on entry, and either found
1230 * uptodate immediately, or allocated and zeroed, or read
1231 * in under swappage, which is then assigned to filepage.
1232 * But shmem_readpage (required for splice) passes in a locked
1233 * filepage, which may be found not uptodate by other callers
1234 * too, and may need to be copied from the swappage read in.
1238 filepage
= find_lock_page(mapping
, idx
);
1239 if (filepage
&& PageUptodate(filepage
))
1242 gfp
= mapping_gfp_mask(mapping
);
1245 * Try to preload while we can wait, to not make a habit of
1246 * draining atomic reserves; but don't latch on to this cpu.
1248 error
= radix_tree_preload(gfp
& ~__GFP_HIGHMEM
);
1251 radix_tree_preload_end();
1254 spin_lock(&info
->lock
);
1255 shmem_recalc_inode(inode
);
1256 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1257 if (IS_ERR(entry
)) {
1258 spin_unlock(&info
->lock
);
1259 error
= PTR_ERR(entry
);
1265 /* Look it up and read it in.. */
1266 swappage
= lookup_swap_cache(swap
);
1268 shmem_swp_unmap(entry
);
1269 /* here we actually do the io */
1270 if (type
&& !(*type
& VM_FAULT_MAJOR
)) {
1271 __count_vm_event(PGMAJFAULT
);
1272 *type
|= VM_FAULT_MAJOR
;
1274 spin_unlock(&info
->lock
);
1275 swappage
= shmem_swapin(swap
, gfp
, info
, idx
);
1277 spin_lock(&info
->lock
);
1278 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1280 error
= PTR_ERR(entry
);
1282 if (entry
->val
== swap
.val
)
1284 shmem_swp_unmap(entry
);
1286 spin_unlock(&info
->lock
);
1291 wait_on_page_locked(swappage
);
1292 page_cache_release(swappage
);
1296 /* We have to do this with page locked to prevent races */
1297 if (!trylock_page(swappage
)) {
1298 shmem_swp_unmap(entry
);
1299 spin_unlock(&info
->lock
);
1300 wait_on_page_locked(swappage
);
1301 page_cache_release(swappage
);
1304 if (PageWriteback(swappage
)) {
1305 shmem_swp_unmap(entry
);
1306 spin_unlock(&info
->lock
);
1307 wait_on_page_writeback(swappage
);
1308 unlock_page(swappage
);
1309 page_cache_release(swappage
);
1312 if (!PageUptodate(swappage
)) {
1313 shmem_swp_unmap(entry
);
1314 spin_unlock(&info
->lock
);
1315 unlock_page(swappage
);
1316 page_cache_release(swappage
);
1322 shmem_swp_set(info
, entry
, 0);
1323 shmem_swp_unmap(entry
);
1324 delete_from_swap_cache(swappage
);
1325 spin_unlock(&info
->lock
);
1326 copy_highpage(filepage
, swappage
);
1327 unlock_page(swappage
);
1328 page_cache_release(swappage
);
1329 flush_dcache_page(filepage
);
1330 SetPageUptodate(filepage
);
1331 set_page_dirty(filepage
);
1333 } else if (!(error
= add_to_page_cache_locked(swappage
, mapping
,
1334 idx
, GFP_NOWAIT
))) {
1335 info
->flags
|= SHMEM_PAGEIN
;
1336 shmem_swp_set(info
, entry
, 0);
1337 shmem_swp_unmap(entry
);
1338 delete_from_swap_cache(swappage
);
1339 spin_unlock(&info
->lock
);
1340 filepage
= swappage
;
1341 set_page_dirty(filepage
);
1344 shmem_swp_unmap(entry
);
1345 spin_unlock(&info
->lock
);
1346 if (error
== -ENOMEM
) {
1348 * reclaim from proper memory cgroup and
1349 * call memcg's OOM if needed.
1351 error
= mem_cgroup_shmem_charge_fallback(
1356 unlock_page(swappage
);
1357 page_cache_release(swappage
);
1361 unlock_page(swappage
);
1362 page_cache_release(swappage
);
1365 } else if (sgp
== SGP_READ
&& !filepage
) {
1366 shmem_swp_unmap(entry
);
1367 filepage
= find_get_page(mapping
, idx
);
1369 (!PageUptodate(filepage
) || !trylock_page(filepage
))) {
1370 spin_unlock(&info
->lock
);
1371 wait_on_page_locked(filepage
);
1372 page_cache_release(filepage
);
1376 spin_unlock(&info
->lock
);
1378 shmem_swp_unmap(entry
);
1379 sbinfo
= SHMEM_SB(inode
->i_sb
);
1380 if (sbinfo
->max_blocks
) {
1381 spin_lock(&sbinfo
->stat_lock
);
1382 if (sbinfo
->free_blocks
== 0 ||
1383 shmem_acct_block(info
->flags
)) {
1384 spin_unlock(&sbinfo
->stat_lock
);
1385 spin_unlock(&info
->lock
);
1389 sbinfo
->free_blocks
--;
1390 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
1391 spin_unlock(&sbinfo
->stat_lock
);
1392 } else if (shmem_acct_block(info
->flags
)) {
1393 spin_unlock(&info
->lock
);
1401 spin_unlock(&info
->lock
);
1402 filepage
= shmem_alloc_page(gfp
, info
, idx
);
1404 shmem_unacct_blocks(info
->flags
, 1);
1405 shmem_free_blocks(inode
, 1);
1409 SetPageSwapBacked(filepage
);
1411 /* Precharge page while we can wait, compensate after */
1412 error
= mem_cgroup_cache_charge(filepage
, current
->mm
,
1415 page_cache_release(filepage
);
1416 shmem_unacct_blocks(info
->flags
, 1);
1417 shmem_free_blocks(inode
, 1);
1422 spin_lock(&info
->lock
);
1423 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1425 error
= PTR_ERR(entry
);
1428 shmem_swp_unmap(entry
);
1430 ret
= error
|| swap
.val
;
1432 mem_cgroup_uncharge_cache_page(filepage
);
1434 ret
= add_to_page_cache_lru(filepage
, mapping
,
1437 * At add_to_page_cache_lru() failure, uncharge will
1438 * be done automatically.
1441 spin_unlock(&info
->lock
);
1442 page_cache_release(filepage
);
1443 shmem_unacct_blocks(info
->flags
, 1);
1444 shmem_free_blocks(inode
, 1);
1450 info
->flags
|= SHMEM_PAGEIN
;
1454 spin_unlock(&info
->lock
);
1455 clear_highpage(filepage
);
1456 flush_dcache_page(filepage
);
1457 SetPageUptodate(filepage
);
1458 if (sgp
== SGP_DIRTY
)
1459 set_page_dirty(filepage
);
1466 if (*pagep
!= filepage
) {
1467 unlock_page(filepage
);
1468 page_cache_release(filepage
);
1473 static int shmem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1475 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1479 if (((loff_t
)vmf
->pgoff
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
1480 return VM_FAULT_SIGBUS
;
1482 error
= shmem_getpage(inode
, vmf
->pgoff
, &vmf
->page
, SGP_CACHE
, &ret
);
1484 return ((error
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
);
1486 return ret
| VM_FAULT_LOCKED
;
1490 static int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
1492 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1493 return mpol_set_shared_policy(&SHMEM_I(i
)->policy
, vma
, new);
1496 static struct mempolicy
*shmem_get_policy(struct vm_area_struct
*vma
,
1499 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1502 idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1503 return mpol_shared_policy_lookup(&SHMEM_I(i
)->policy
, idx
);
1507 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1509 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1510 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1511 int retval
= -ENOMEM
;
1513 spin_lock(&info
->lock
);
1514 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1515 if (!user_shm_lock(inode
->i_size
, user
))
1517 info
->flags
|= VM_LOCKED
;
1518 mapping_set_unevictable(file
->f_mapping
);
1520 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1521 user_shm_unlock(inode
->i_size
, user
);
1522 info
->flags
&= ~VM_LOCKED
;
1523 mapping_clear_unevictable(file
->f_mapping
);
1524 scan_mapping_unevictable_pages(file
->f_mapping
);
1529 spin_unlock(&info
->lock
);
1533 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1535 file_accessed(file
);
1536 vma
->vm_ops
= &shmem_vm_ops
;
1537 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1541 static struct inode
*shmem_get_inode(struct super_block
*sb
, int mode
,
1542 dev_t dev
, unsigned long flags
)
1544 struct inode
*inode
;
1545 struct shmem_inode_info
*info
;
1546 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1548 if (shmem_reserve_inode(sb
))
1551 inode
= new_inode(sb
);
1553 inode
->i_mode
= mode
;
1554 inode
->i_uid
= current_fsuid();
1555 inode
->i_gid
= current_fsgid();
1556 inode
->i_blocks
= 0;
1557 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1558 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1559 inode
->i_generation
= get_seconds();
1560 info
= SHMEM_I(inode
);
1561 memset(info
, 0, (char *)inode
- (char *)info
);
1562 spin_lock_init(&info
->lock
);
1563 info
->flags
= flags
& VM_NORESERVE
;
1564 INIT_LIST_HEAD(&info
->swaplist
);
1565 cache_no_acl(inode
);
1567 switch (mode
& S_IFMT
) {
1569 inode
->i_op
= &shmem_special_inode_operations
;
1570 init_special_inode(inode
, mode
, dev
);
1573 inode
->i_mapping
->a_ops
= &shmem_aops
;
1574 inode
->i_op
= &shmem_inode_operations
;
1575 inode
->i_fop
= &shmem_file_operations
;
1576 mpol_shared_policy_init(&info
->policy
,
1577 shmem_get_sbmpol(sbinfo
));
1581 /* Some things misbehave if size == 0 on a directory */
1582 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1583 inode
->i_op
= &shmem_dir_inode_operations
;
1584 inode
->i_fop
= &simple_dir_operations
;
1588 * Must not load anything in the rbtree,
1589 * mpol_free_shared_policy will not be called.
1591 mpol_shared_policy_init(&info
->policy
, NULL
);
1595 shmem_free_inode(sb
);
1600 static const struct inode_operations shmem_symlink_inode_operations
;
1601 static const struct inode_operations shmem_symlink_inline_operations
;
1604 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1605 * but providing them allows a tmpfs file to be used for splice, sendfile, and
1606 * below the loop driver, in the generic fashion that many filesystems support.
1608 static int shmem_readpage(struct file
*file
, struct page
*page
)
1610 struct inode
*inode
= page
->mapping
->host
;
1611 int error
= shmem_getpage(inode
, page
->index
, &page
, SGP_CACHE
, NULL
);
1617 shmem_write_begin(struct file
*file
, struct address_space
*mapping
,
1618 loff_t pos
, unsigned len
, unsigned flags
,
1619 struct page
**pagep
, void **fsdata
)
1621 struct inode
*inode
= mapping
->host
;
1622 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1624 return shmem_getpage(inode
, index
, pagep
, SGP_WRITE
, NULL
);
1628 shmem_write_end(struct file
*file
, struct address_space
*mapping
,
1629 loff_t pos
, unsigned len
, unsigned copied
,
1630 struct page
*page
, void *fsdata
)
1632 struct inode
*inode
= mapping
->host
;
1634 if (pos
+ copied
> inode
->i_size
)
1635 i_size_write(inode
, pos
+ copied
);
1637 set_page_dirty(page
);
1639 page_cache_release(page
);
1644 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1646 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1647 struct address_space
*mapping
= inode
->i_mapping
;
1648 unsigned long index
, offset
;
1649 enum sgp_type sgp
= SGP_READ
;
1652 * Might this read be for a stacking filesystem? Then when reading
1653 * holes of a sparse file, we actually need to allocate those pages,
1654 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1656 if (segment_eq(get_fs(), KERNEL_DS
))
1659 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1660 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1663 struct page
*page
= NULL
;
1664 unsigned long end_index
, nr
, ret
;
1665 loff_t i_size
= i_size_read(inode
);
1667 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1668 if (index
> end_index
)
1670 if (index
== end_index
) {
1671 nr
= i_size
& ~PAGE_CACHE_MASK
;
1676 desc
->error
= shmem_getpage(inode
, index
, &page
, sgp
, NULL
);
1678 if (desc
->error
== -EINVAL
)
1686 * We must evaluate after, since reads (unlike writes)
1687 * are called without i_mutex protection against truncate
1689 nr
= PAGE_CACHE_SIZE
;
1690 i_size
= i_size_read(inode
);
1691 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1692 if (index
== end_index
) {
1693 nr
= i_size
& ~PAGE_CACHE_MASK
;
1696 page_cache_release(page
);
1704 * If users can be writing to this page using arbitrary
1705 * virtual addresses, take care about potential aliasing
1706 * before reading the page on the kernel side.
1708 if (mapping_writably_mapped(mapping
))
1709 flush_dcache_page(page
);
1711 * Mark the page accessed if we read the beginning.
1714 mark_page_accessed(page
);
1716 page
= ZERO_PAGE(0);
1717 page_cache_get(page
);
1721 * Ok, we have the page, and it's up-to-date, so
1722 * now we can copy it to user space...
1724 * The actor routine returns how many bytes were actually used..
1725 * NOTE! This may not be the same as how much of a user buffer
1726 * we filled up (we may be padding etc), so we can only update
1727 * "pos" here (the actor routine has to update the user buffer
1728 * pointers and the remaining count).
1730 ret
= actor(desc
, page
, offset
, nr
);
1732 index
+= offset
>> PAGE_CACHE_SHIFT
;
1733 offset
&= ~PAGE_CACHE_MASK
;
1735 page_cache_release(page
);
1736 if (ret
!= nr
|| !desc
->count
)
1742 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1743 file_accessed(filp
);
1746 static ssize_t
shmem_file_aio_read(struct kiocb
*iocb
,
1747 const struct iovec
*iov
, unsigned long nr_segs
, loff_t pos
)
1749 struct file
*filp
= iocb
->ki_filp
;
1753 loff_t
*ppos
= &iocb
->ki_pos
;
1755 retval
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_WRITE
);
1759 for (seg
= 0; seg
< nr_segs
; seg
++) {
1760 read_descriptor_t desc
;
1763 desc
.arg
.buf
= iov
[seg
].iov_base
;
1764 desc
.count
= iov
[seg
].iov_len
;
1765 if (desc
.count
== 0)
1768 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1769 retval
+= desc
.written
;
1771 retval
= retval
?: desc
.error
;
1780 static int shmem_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1782 struct shmem_sb_info
*sbinfo
= SHMEM_SB(dentry
->d_sb
);
1784 buf
->f_type
= TMPFS_MAGIC
;
1785 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1786 buf
->f_namelen
= NAME_MAX
;
1787 spin_lock(&sbinfo
->stat_lock
);
1788 if (sbinfo
->max_blocks
) {
1789 buf
->f_blocks
= sbinfo
->max_blocks
;
1790 buf
->f_bavail
= buf
->f_bfree
= sbinfo
->free_blocks
;
1792 if (sbinfo
->max_inodes
) {
1793 buf
->f_files
= sbinfo
->max_inodes
;
1794 buf
->f_ffree
= sbinfo
->free_inodes
;
1796 /* else leave those fields 0 like simple_statfs */
1797 spin_unlock(&sbinfo
->stat_lock
);
1802 * File creation. Allocate an inode, and we're done..
1805 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1807 struct inode
*inode
;
1808 int error
= -ENOSPC
;
1810 inode
= shmem_get_inode(dir
->i_sb
, mode
, dev
, VM_NORESERVE
);
1812 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1815 if (error
!= -EOPNOTSUPP
) {
1820 error
= shmem_acl_init(inode
, dir
);
1825 if (dir
->i_mode
& S_ISGID
) {
1826 inode
->i_gid
= dir
->i_gid
;
1828 inode
->i_mode
|= S_ISGID
;
1830 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1831 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1832 d_instantiate(dentry
, inode
);
1833 dget(dentry
); /* Extra count - pin the dentry in core */
1838 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1842 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1848 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1849 struct nameidata
*nd
)
1851 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1857 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1859 struct inode
*inode
= old_dentry
->d_inode
;
1863 * No ordinary (disk based) filesystem counts links as inodes;
1864 * but each new link needs a new dentry, pinning lowmem, and
1865 * tmpfs dentries cannot be pruned until they are unlinked.
1867 ret
= shmem_reserve_inode(inode
->i_sb
);
1871 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1872 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1874 atomic_inc(&inode
->i_count
); /* New dentry reference */
1875 dget(dentry
); /* Extra pinning count for the created dentry */
1876 d_instantiate(dentry
, inode
);
1881 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
1883 struct inode
*inode
= dentry
->d_inode
;
1885 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
))
1886 shmem_free_inode(inode
->i_sb
);
1888 dir
->i_size
-= BOGO_DIRENT_SIZE
;
1889 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1891 dput(dentry
); /* Undo the count from "create" - this does all the work */
1895 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
1897 if (!simple_empty(dentry
))
1900 drop_nlink(dentry
->d_inode
);
1902 return shmem_unlink(dir
, dentry
);
1906 * The VFS layer already does all the dentry stuff for rename,
1907 * we just have to decrement the usage count for the target if
1908 * it exists so that the VFS layer correctly free's it when it
1911 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
1913 struct inode
*inode
= old_dentry
->d_inode
;
1914 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
1916 if (!simple_empty(new_dentry
))
1919 if (new_dentry
->d_inode
) {
1920 (void) shmem_unlink(new_dir
, new_dentry
);
1922 drop_nlink(old_dir
);
1923 } else if (they_are_dirs
) {
1924 drop_nlink(old_dir
);
1928 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
1929 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
1930 old_dir
->i_ctime
= old_dir
->i_mtime
=
1931 new_dir
->i_ctime
= new_dir
->i_mtime
=
1932 inode
->i_ctime
= CURRENT_TIME
;
1936 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
1940 struct inode
*inode
;
1941 struct page
*page
= NULL
;
1943 struct shmem_inode_info
*info
;
1945 len
= strlen(symname
) + 1;
1946 if (len
> PAGE_CACHE_SIZE
)
1947 return -ENAMETOOLONG
;
1949 inode
= shmem_get_inode(dir
->i_sb
, S_IFLNK
|S_IRWXUGO
, 0, VM_NORESERVE
);
1953 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1956 if (error
!= -EOPNOTSUPP
) {
1963 info
= SHMEM_I(inode
);
1964 inode
->i_size
= len
-1;
1965 if (len
<= (char *)inode
- (char *)info
) {
1967 memcpy(info
, symname
, len
);
1968 inode
->i_op
= &shmem_symlink_inline_operations
;
1970 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
1975 inode
->i_mapping
->a_ops
= &shmem_aops
;
1976 inode
->i_op
= &shmem_symlink_inode_operations
;
1977 kaddr
= kmap_atomic(page
, KM_USER0
);
1978 memcpy(kaddr
, symname
, len
);
1979 kunmap_atomic(kaddr
, KM_USER0
);
1980 set_page_dirty(page
);
1982 page_cache_release(page
);
1984 if (dir
->i_mode
& S_ISGID
)
1985 inode
->i_gid
= dir
->i_gid
;
1986 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1987 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1988 d_instantiate(dentry
, inode
);
1993 static void *shmem_follow_link_inline(struct dentry
*dentry
, struct nameidata
*nd
)
1995 nd_set_link(nd
, (char *)SHMEM_I(dentry
->d_inode
));
1999 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
2001 struct page
*page
= NULL
;
2002 int res
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
2003 nd_set_link(nd
, res
? ERR_PTR(res
) : kmap(page
));
2009 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
2011 if (!IS_ERR(nd_get_link(nd
))) {
2012 struct page
*page
= cookie
;
2014 mark_page_accessed(page
);
2015 page_cache_release(page
);
2019 static const struct inode_operations shmem_symlink_inline_operations
= {
2020 .readlink
= generic_readlink
,
2021 .follow_link
= shmem_follow_link_inline
,
2024 static const struct inode_operations shmem_symlink_inode_operations
= {
2025 .truncate
= shmem_truncate
,
2026 .readlink
= generic_readlink
,
2027 .follow_link
= shmem_follow_link
,
2028 .put_link
= shmem_put_link
,
2031 #ifdef CONFIG_TMPFS_POSIX_ACL
2033 * Superblocks without xattr inode operations will get security.* xattr
2034 * support from the VFS "for free". As soon as we have any other xattrs
2035 * like ACLs, we also need to implement the security.* handlers at
2036 * filesystem level, though.
2039 static size_t shmem_xattr_security_list(struct inode
*inode
, char *list
,
2040 size_t list_len
, const char *name
,
2043 return security_inode_listsecurity(inode
, list
, list_len
);
2046 static int shmem_xattr_security_get(struct inode
*inode
, const char *name
,
2047 void *buffer
, size_t size
)
2049 if (strcmp(name
, "") == 0)
2051 return xattr_getsecurity(inode
, name
, buffer
, size
);
2054 static int shmem_xattr_security_set(struct inode
*inode
, const char *name
,
2055 const void *value
, size_t size
, int flags
)
2057 if (strcmp(name
, "") == 0)
2059 return security_inode_setsecurity(inode
, name
, value
, size
, flags
);
2062 static struct xattr_handler shmem_xattr_security_handler
= {
2063 .prefix
= XATTR_SECURITY_PREFIX
,
2064 .list
= shmem_xattr_security_list
,
2065 .get
= shmem_xattr_security_get
,
2066 .set
= shmem_xattr_security_set
,
2069 static struct xattr_handler
*shmem_xattr_handlers
[] = {
2070 &shmem_xattr_acl_access_handler
,
2071 &shmem_xattr_acl_default_handler
,
2072 &shmem_xattr_security_handler
,
2077 static struct dentry
*shmem_get_parent(struct dentry
*child
)
2079 return ERR_PTR(-ESTALE
);
2082 static int shmem_match(struct inode
*ino
, void *vfh
)
2086 inum
= (inum
<< 32) | fh
[1];
2087 return ino
->i_ino
== inum
&& fh
[0] == ino
->i_generation
;
2090 static struct dentry
*shmem_fh_to_dentry(struct super_block
*sb
,
2091 struct fid
*fid
, int fh_len
, int fh_type
)
2093 struct inode
*inode
;
2094 struct dentry
*dentry
= NULL
;
2095 u64 inum
= fid
->raw
[2];
2096 inum
= (inum
<< 32) | fid
->raw
[1];
2101 inode
= ilookup5(sb
, (unsigned long)(inum
+ fid
->raw
[0]),
2102 shmem_match
, fid
->raw
);
2104 dentry
= d_find_alias(inode
);
2111 static int shmem_encode_fh(struct dentry
*dentry
, __u32
*fh
, int *len
,
2114 struct inode
*inode
= dentry
->d_inode
;
2119 if (hlist_unhashed(&inode
->i_hash
)) {
2120 /* Unfortunately insert_inode_hash is not idempotent,
2121 * so as we hash inodes here rather than at creation
2122 * time, we need a lock to ensure we only try
2125 static DEFINE_SPINLOCK(lock
);
2127 if (hlist_unhashed(&inode
->i_hash
))
2128 __insert_inode_hash(inode
,
2129 inode
->i_ino
+ inode
->i_generation
);
2133 fh
[0] = inode
->i_generation
;
2134 fh
[1] = inode
->i_ino
;
2135 fh
[2] = ((__u64
)inode
->i_ino
) >> 32;
2141 static const struct export_operations shmem_export_ops
= {
2142 .get_parent
= shmem_get_parent
,
2143 .encode_fh
= shmem_encode_fh
,
2144 .fh_to_dentry
= shmem_fh_to_dentry
,
2147 static int shmem_parse_options(char *options
, struct shmem_sb_info
*sbinfo
,
2150 char *this_char
, *value
, *rest
;
2152 while (options
!= NULL
) {
2153 this_char
= options
;
2156 * NUL-terminate this option: unfortunately,
2157 * mount options form a comma-separated list,
2158 * but mpol's nodelist may also contain commas.
2160 options
= strchr(options
, ',');
2161 if (options
== NULL
)
2164 if (!isdigit(*options
)) {
2171 if ((value
= strchr(this_char
,'=')) != NULL
) {
2175 "tmpfs: No value for mount option '%s'\n",
2180 if (!strcmp(this_char
,"size")) {
2181 unsigned long long size
;
2182 size
= memparse(value
,&rest
);
2184 size
<<= PAGE_SHIFT
;
2185 size
*= totalram_pages
;
2191 sbinfo
->max_blocks
=
2192 DIV_ROUND_UP(size
, PAGE_CACHE_SIZE
);
2193 } else if (!strcmp(this_char
,"nr_blocks")) {
2194 sbinfo
->max_blocks
= memparse(value
, &rest
);
2197 } else if (!strcmp(this_char
,"nr_inodes")) {
2198 sbinfo
->max_inodes
= memparse(value
, &rest
);
2201 } else if (!strcmp(this_char
,"mode")) {
2204 sbinfo
->mode
= simple_strtoul(value
, &rest
, 8) & 07777;
2207 } else if (!strcmp(this_char
,"uid")) {
2210 sbinfo
->uid
= simple_strtoul(value
, &rest
, 0);
2213 } else if (!strcmp(this_char
,"gid")) {
2216 sbinfo
->gid
= simple_strtoul(value
, &rest
, 0);
2219 } else if (!strcmp(this_char
,"mpol")) {
2220 if (mpol_parse_str(value
, &sbinfo
->mpol
, 1))
2223 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
2231 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
2237 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2239 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2240 struct shmem_sb_info config
= *sbinfo
;
2241 unsigned long blocks
;
2242 unsigned long inodes
;
2243 int error
= -EINVAL
;
2245 if (shmem_parse_options(data
, &config
, true))
2248 spin_lock(&sbinfo
->stat_lock
);
2249 blocks
= sbinfo
->max_blocks
- sbinfo
->free_blocks
;
2250 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
2251 if (config
.max_blocks
< blocks
)
2253 if (config
.max_inodes
< inodes
)
2256 * Those tests also disallow limited->unlimited while any are in
2257 * use, so i_blocks will always be zero when max_blocks is zero;
2258 * but we must separately disallow unlimited->limited, because
2259 * in that case we have no record of how much is already in use.
2261 if (config
.max_blocks
&& !sbinfo
->max_blocks
)
2263 if (config
.max_inodes
&& !sbinfo
->max_inodes
)
2267 sbinfo
->max_blocks
= config
.max_blocks
;
2268 sbinfo
->free_blocks
= config
.max_blocks
- blocks
;
2269 sbinfo
->max_inodes
= config
.max_inodes
;
2270 sbinfo
->free_inodes
= config
.max_inodes
- inodes
;
2272 mpol_put(sbinfo
->mpol
);
2273 sbinfo
->mpol
= config
.mpol
; /* transfers initial ref */
2275 spin_unlock(&sbinfo
->stat_lock
);
2279 static int shmem_show_options(struct seq_file
*seq
, struct vfsmount
*vfs
)
2281 struct shmem_sb_info
*sbinfo
= SHMEM_SB(vfs
->mnt_sb
);
2283 if (sbinfo
->max_blocks
!= shmem_default_max_blocks())
2284 seq_printf(seq
, ",size=%luk",
2285 sbinfo
->max_blocks
<< (PAGE_CACHE_SHIFT
- 10));
2286 if (sbinfo
->max_inodes
!= shmem_default_max_inodes())
2287 seq_printf(seq
, ",nr_inodes=%lu", sbinfo
->max_inodes
);
2288 if (sbinfo
->mode
!= (S_IRWXUGO
| S_ISVTX
))
2289 seq_printf(seq
, ",mode=%03o", sbinfo
->mode
);
2290 if (sbinfo
->uid
!= 0)
2291 seq_printf(seq
, ",uid=%u", sbinfo
->uid
);
2292 if (sbinfo
->gid
!= 0)
2293 seq_printf(seq
, ",gid=%u", sbinfo
->gid
);
2294 shmem_show_mpol(seq
, sbinfo
->mpol
);
2297 #endif /* CONFIG_TMPFS */
2299 static void shmem_put_super(struct super_block
*sb
)
2301 kfree(sb
->s_fs_info
);
2302 sb
->s_fs_info
= NULL
;
2305 int shmem_fill_super(struct super_block
*sb
, void *data
, int silent
)
2307 struct inode
*inode
;
2308 struct dentry
*root
;
2309 struct shmem_sb_info
*sbinfo
;
2312 /* Round up to L1_CACHE_BYTES to resist false sharing */
2313 sbinfo
= kzalloc(max((int)sizeof(struct shmem_sb_info
),
2314 L1_CACHE_BYTES
), GFP_KERNEL
);
2318 sbinfo
->mode
= S_IRWXUGO
| S_ISVTX
;
2319 sbinfo
->uid
= current_fsuid();
2320 sbinfo
->gid
= current_fsgid();
2321 sb
->s_fs_info
= sbinfo
;
2325 * Per default we only allow half of the physical ram per
2326 * tmpfs instance, limiting inodes to one per page of lowmem;
2327 * but the internal instance is left unlimited.
2329 if (!(sb
->s_flags
& MS_NOUSER
)) {
2330 sbinfo
->max_blocks
= shmem_default_max_blocks();
2331 sbinfo
->max_inodes
= shmem_default_max_inodes();
2332 if (shmem_parse_options(data
, sbinfo
, false)) {
2337 sb
->s_export_op
= &shmem_export_ops
;
2339 sb
->s_flags
|= MS_NOUSER
;
2342 spin_lock_init(&sbinfo
->stat_lock
);
2343 sbinfo
->free_blocks
= sbinfo
->max_blocks
;
2344 sbinfo
->free_inodes
= sbinfo
->max_inodes
;
2346 sb
->s_maxbytes
= SHMEM_MAX_BYTES
;
2347 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
2348 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
2349 sb
->s_magic
= TMPFS_MAGIC
;
2350 sb
->s_op
= &shmem_ops
;
2351 sb
->s_time_gran
= 1;
2352 #ifdef CONFIG_TMPFS_POSIX_ACL
2353 sb
->s_xattr
= shmem_xattr_handlers
;
2354 sb
->s_flags
|= MS_POSIXACL
;
2357 inode
= shmem_get_inode(sb
, S_IFDIR
| sbinfo
->mode
, 0, VM_NORESERVE
);
2360 inode
->i_uid
= sbinfo
->uid
;
2361 inode
->i_gid
= sbinfo
->gid
;
2362 root
= d_alloc_root(inode
);
2371 shmem_put_super(sb
);
2375 static struct kmem_cache
*shmem_inode_cachep
;
2377 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2379 struct shmem_inode_info
*p
;
2380 p
= (struct shmem_inode_info
*)kmem_cache_alloc(shmem_inode_cachep
, GFP_KERNEL
);
2383 return &p
->vfs_inode
;
2386 static void shmem_destroy_inode(struct inode
*inode
)
2388 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
) {
2389 /* only struct inode is valid if it's an inline symlink */
2390 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2392 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2395 static void init_once(void *foo
)
2397 struct shmem_inode_info
*p
= (struct shmem_inode_info
*) foo
;
2399 inode_init_once(&p
->vfs_inode
);
2402 static int init_inodecache(void)
2404 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2405 sizeof(struct shmem_inode_info
),
2406 0, SLAB_PANIC
, init_once
);
2410 static void destroy_inodecache(void)
2412 kmem_cache_destroy(shmem_inode_cachep
);
2415 static const struct address_space_operations shmem_aops
= {
2416 .writepage
= shmem_writepage
,
2417 .set_page_dirty
= __set_page_dirty_no_writeback
,
2419 .readpage
= shmem_readpage
,
2420 .write_begin
= shmem_write_begin
,
2421 .write_end
= shmem_write_end
,
2423 .migratepage
= migrate_page
,
2424 .error_remove_page
= generic_error_remove_page
,
2427 static const struct file_operations shmem_file_operations
= {
2430 .llseek
= generic_file_llseek
,
2431 .read
= do_sync_read
,
2432 .write
= do_sync_write
,
2433 .aio_read
= shmem_file_aio_read
,
2434 .aio_write
= generic_file_aio_write
,
2435 .fsync
= simple_sync_file
,
2436 .splice_read
= generic_file_splice_read
,
2437 .splice_write
= generic_file_splice_write
,
2441 static const struct inode_operations shmem_inode_operations
= {
2442 .truncate
= shmem_truncate
,
2443 .setattr
= shmem_notify_change
,
2444 .truncate_range
= shmem_truncate_range
,
2445 #ifdef CONFIG_TMPFS_POSIX_ACL
2446 .setxattr
= generic_setxattr
,
2447 .getxattr
= generic_getxattr
,
2448 .listxattr
= generic_listxattr
,
2449 .removexattr
= generic_removexattr
,
2450 .check_acl
= shmem_check_acl
,
2455 static const struct inode_operations shmem_dir_inode_operations
= {
2457 .create
= shmem_create
,
2458 .lookup
= simple_lookup
,
2460 .unlink
= shmem_unlink
,
2461 .symlink
= shmem_symlink
,
2462 .mkdir
= shmem_mkdir
,
2463 .rmdir
= shmem_rmdir
,
2464 .mknod
= shmem_mknod
,
2465 .rename
= shmem_rename
,
2467 #ifdef CONFIG_TMPFS_POSIX_ACL
2468 .setattr
= shmem_notify_change
,
2469 .setxattr
= generic_setxattr
,
2470 .getxattr
= generic_getxattr
,
2471 .listxattr
= generic_listxattr
,
2472 .removexattr
= generic_removexattr
,
2473 .check_acl
= shmem_check_acl
,
2477 static const struct inode_operations shmem_special_inode_operations
= {
2478 #ifdef CONFIG_TMPFS_POSIX_ACL
2479 .setattr
= shmem_notify_change
,
2480 .setxattr
= generic_setxattr
,
2481 .getxattr
= generic_getxattr
,
2482 .listxattr
= generic_listxattr
,
2483 .removexattr
= generic_removexattr
,
2484 .check_acl
= shmem_check_acl
,
2488 static const struct super_operations shmem_ops
= {
2489 .alloc_inode
= shmem_alloc_inode
,
2490 .destroy_inode
= shmem_destroy_inode
,
2492 .statfs
= shmem_statfs
,
2493 .remount_fs
= shmem_remount_fs
,
2494 .show_options
= shmem_show_options
,
2496 .delete_inode
= shmem_delete_inode
,
2497 .drop_inode
= generic_delete_inode
,
2498 .put_super
= shmem_put_super
,
2501 static const struct vm_operations_struct shmem_vm_ops
= {
2502 .fault
= shmem_fault
,
2504 .set_policy
= shmem_set_policy
,
2505 .get_policy
= shmem_get_policy
,
2510 static int shmem_get_sb(struct file_system_type
*fs_type
,
2511 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
2513 return get_sb_nodev(fs_type
, flags
, data
, shmem_fill_super
, mnt
);
2516 static struct file_system_type tmpfs_fs_type
= {
2517 .owner
= THIS_MODULE
,
2519 .get_sb
= shmem_get_sb
,
2520 .kill_sb
= kill_litter_super
,
2523 int __init
init_tmpfs(void)
2527 error
= bdi_init(&shmem_backing_dev_info
);
2531 error
= init_inodecache();
2535 error
= register_filesystem(&tmpfs_fs_type
);
2537 printk(KERN_ERR
"Could not register tmpfs\n");
2541 shm_mnt
= vfs_kern_mount(&tmpfs_fs_type
, MS_NOUSER
,
2542 tmpfs_fs_type
.name
, NULL
);
2543 if (IS_ERR(shm_mnt
)) {
2544 error
= PTR_ERR(shm_mnt
);
2545 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2551 unregister_filesystem(&tmpfs_fs_type
);
2553 destroy_inodecache();
2555 bdi_destroy(&shmem_backing_dev_info
);
2557 shm_mnt
= ERR_PTR(error
);
2561 #else /* !CONFIG_SHMEM */
2564 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2566 * This is intended for small system where the benefits of the full
2567 * shmem code (swap-backed and resource-limited) are outweighed by
2568 * their complexity. On systems without swap this code should be
2569 * effectively equivalent, but much lighter weight.
2572 #include <linux/ramfs.h>
2574 static struct file_system_type tmpfs_fs_type
= {
2576 .get_sb
= ramfs_get_sb
,
2577 .kill_sb
= kill_litter_super
,
2580 int __init
init_tmpfs(void)
2582 BUG_ON(register_filesystem(&tmpfs_fs_type
) != 0);
2584 shm_mnt
= kern_mount(&tmpfs_fs_type
);
2585 BUG_ON(IS_ERR(shm_mnt
));
2590 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
2595 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
2600 #define shmem_vm_ops generic_file_vm_ops
2601 #define shmem_file_operations ramfs_file_operations
2602 #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
2603 #define shmem_acct_size(flags, size) 0
2604 #define shmem_unacct_size(flags, size) do {} while (0)
2605 #define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
2607 #endif /* CONFIG_SHMEM */
2612 * shmem_file_setup - get an unlinked file living in tmpfs
2613 * @name: name for dentry (to be seen in /proc/<pid>/maps
2614 * @size: size to be set for the file
2615 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2617 struct file
*shmem_file_setup(const char *name
, loff_t size
, unsigned long flags
)
2621 struct inode
*inode
;
2622 struct dentry
*dentry
, *root
;
2625 if (IS_ERR(shm_mnt
))
2626 return (void *)shm_mnt
;
2628 if (size
< 0 || size
> SHMEM_MAX_BYTES
)
2629 return ERR_PTR(-EINVAL
);
2631 if (shmem_acct_size(flags
, size
))
2632 return ERR_PTR(-ENOMEM
);
2636 this.len
= strlen(name
);
2637 this.hash
= 0; /* will go */
2638 root
= shm_mnt
->mnt_root
;
2639 dentry
= d_alloc(root
, &this);
2644 file
= get_empty_filp();
2649 inode
= shmem_get_inode(root
->d_sb
, S_IFREG
| S_IRWXUGO
, 0, flags
);
2653 d_instantiate(dentry
, inode
);
2654 inode
->i_size
= size
;
2655 inode
->i_nlink
= 0; /* It is unlinked */
2656 init_file(file
, shm_mnt
, dentry
, FMODE_WRITE
| FMODE_READ
,
2657 &shmem_file_operations
);
2660 error
= ramfs_nommu_expand_for_mapping(inode
, size
);
2664 ima_counts_get(file
);
2672 shmem_unacct_size(flags
, size
);
2673 return ERR_PTR(error
);
2675 EXPORT_SYMBOL_GPL(shmem_file_setup
);
2678 * shmem_zero_setup - setup a shared anonymous mapping
2679 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2681 int shmem_zero_setup(struct vm_area_struct
*vma
)
2684 loff_t size
= vma
->vm_end
- vma
->vm_start
;
2686 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
2688 return PTR_ERR(file
);
2692 vma
->vm_file
= file
;
2693 vma
->vm_ops
= &shmem_vm_ops
;
2694 vma
->vm_flags
|= VM_CAN_NONLINEAR
;