2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 * This file is released under the GPL.
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <linux/xattr.h>
30 #include <linux/exportfs.h>
31 #include <linux/generic_acl.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/swap.h>
36 #include <linux/pagemap.h>
37 #include <linux/string.h>
38 #include <linux/slab.h>
39 #include <linux/backing-dev.h>
40 #include <linux/shmem_fs.h>
41 #include <linux/mount.h>
42 #include <linux/writeback.h>
43 #include <linux/vfs.h>
44 #include <linux/blkdev.h>
45 #include <linux/security.h>
46 #include <linux/swapops.h>
47 #include <linux/mempolicy.h>
48 #include <linux/namei.h>
49 #include <linux/ctype.h>
50 #include <linux/migrate.h>
51 #include <linux/highmem.h>
52 #include <linux/backing-dev.h>
54 #include <asm/uaccess.h>
55 #include <asm/div64.h>
56 #include <asm/pgtable.h>
58 /* This magic number is used in glibc for posix shared memory */
59 #define TMPFS_MAGIC 0x01021994
61 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
62 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
63 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
65 #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
66 #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
68 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
70 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
71 #define SHMEM_PAGEIN VM_READ
72 #define SHMEM_TRUNCATE VM_WRITE
74 /* Definition to limit shmem_truncate's steps between cond_rescheds */
75 #define LATENCY_LIMIT 64
77 /* Pretend that each entry is of this size in directory's i_size */
78 #define BOGO_DIRENT_SIZE 20
80 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
82 SGP_QUICK
, /* don't try more than file page cache lookup */
83 SGP_READ
, /* don't exceed i_size, don't allocate page */
84 SGP_CACHE
, /* don't exceed i_size, may allocate page */
85 SGP_WRITE
, /* may exceed i_size, may allocate page */
86 SGP_FAULT
, /* same as SGP_CACHE, return with page locked */
89 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
90 struct page
**pagep
, enum sgp_type sgp
, int *type
);
92 static inline struct page
*shmem_dir_alloc(gfp_t gfp_mask
)
95 * The above definition of ENTRIES_PER_PAGE, and the use of
96 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
97 * might be reconsidered if it ever diverges from PAGE_SIZE.
99 * __GFP_MOVABLE is masked out as swap vectors cannot move
101 return alloc_pages((gfp_mask
& ~__GFP_MOVABLE
) | __GFP_ZERO
,
102 PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
105 static inline void shmem_dir_free(struct page
*page
)
107 __free_pages(page
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
110 static struct page
**shmem_dir_map(struct page
*page
)
112 return (struct page
**)kmap_atomic(page
, KM_USER0
);
115 static inline void shmem_dir_unmap(struct page
**dir
)
117 kunmap_atomic(dir
, KM_USER0
);
120 static swp_entry_t
*shmem_swp_map(struct page
*page
)
122 return (swp_entry_t
*)kmap_atomic(page
, KM_USER1
);
125 static inline void shmem_swp_balance_unmap(void)
128 * When passing a pointer to an i_direct entry, to code which
129 * also handles indirect entries and so will shmem_swp_unmap,
130 * we must arrange for the preempt count to remain in balance.
131 * What kmap_atomic of a lowmem page does depends on config
132 * and architecture, so pretend to kmap_atomic some lowmem page.
134 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1
);
137 static inline void shmem_swp_unmap(swp_entry_t
*entry
)
139 kunmap_atomic(entry
, KM_USER1
);
142 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
144 return sb
->s_fs_info
;
148 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
149 * for shared memory and for shared anonymous (/dev/zero) mappings
150 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
151 * consistent with the pre-accounting of private mappings ...
153 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
155 return (flags
& VM_ACCOUNT
)?
156 security_vm_enough_memory(VM_ACCT(size
)): 0;
159 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
161 if (flags
& VM_ACCOUNT
)
162 vm_unacct_memory(VM_ACCT(size
));
166 * ... whereas tmpfs objects are accounted incrementally as
167 * pages are allocated, in order to allow huge sparse files.
168 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
169 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
171 static inline int shmem_acct_block(unsigned long flags
)
173 return (flags
& VM_ACCOUNT
)?
174 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE
));
177 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
179 if (!(flags
& VM_ACCOUNT
))
180 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
183 static const struct super_operations shmem_ops
;
184 static const struct address_space_operations shmem_aops
;
185 static const struct file_operations shmem_file_operations
;
186 static const struct inode_operations shmem_inode_operations
;
187 static const struct inode_operations shmem_dir_inode_operations
;
188 static const struct inode_operations shmem_special_inode_operations
;
189 static struct vm_operations_struct shmem_vm_ops
;
191 static struct backing_dev_info shmem_backing_dev_info __read_mostly
= {
192 .ra_pages
= 0, /* No readahead */
193 .capabilities
= BDI_CAP_NO_ACCT_DIRTY
| BDI_CAP_NO_WRITEBACK
,
194 .unplug_io_fn
= default_unplug_io_fn
,
197 static LIST_HEAD(shmem_swaplist
);
198 static DEFINE_SPINLOCK(shmem_swaplist_lock
);
200 static void shmem_free_blocks(struct inode
*inode
, long pages
)
202 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
203 if (sbinfo
->max_blocks
) {
204 spin_lock(&sbinfo
->stat_lock
);
205 sbinfo
->free_blocks
+= pages
;
206 inode
->i_blocks
-= pages
*BLOCKS_PER_PAGE
;
207 spin_unlock(&sbinfo
->stat_lock
);
212 * shmem_recalc_inode - recalculate the size of an inode
214 * @inode: inode to recalc
216 * We have to calculate the free blocks since the mm can drop
217 * undirtied hole pages behind our back.
219 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
220 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
222 * It has to be called with the spinlock held.
224 static void shmem_recalc_inode(struct inode
*inode
)
226 struct shmem_inode_info
*info
= SHMEM_I(inode
);
229 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
231 info
->alloced
-= freed
;
232 shmem_unacct_blocks(info
->flags
, freed
);
233 shmem_free_blocks(inode
, freed
);
238 * shmem_swp_entry - find the swap vector position in the info structure
240 * @info: info structure for the inode
241 * @index: index of the page to find
242 * @page: optional page to add to the structure. Has to be preset to
245 * If there is no space allocated yet it will return NULL when
246 * page is NULL, else it will use the page for the needed block,
247 * setting it to NULL on return to indicate that it has been used.
249 * The swap vector is organized the following way:
251 * There are SHMEM_NR_DIRECT entries directly stored in the
252 * shmem_inode_info structure. So small files do not need an addional
255 * For pages with index > SHMEM_NR_DIRECT there is the pointer
256 * i_indirect which points to a page which holds in the first half
257 * doubly indirect blocks, in the second half triple indirect blocks:
259 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
260 * following layout (for SHMEM_NR_DIRECT == 16):
262 * i_indirect -> dir --> 16-19
275 static swp_entry_t
*shmem_swp_entry(struct shmem_inode_info
*info
, unsigned long index
, struct page
**page
)
277 unsigned long offset
;
281 if (index
< SHMEM_NR_DIRECT
) {
282 shmem_swp_balance_unmap();
283 return info
->i_direct
+index
;
285 if (!info
->i_indirect
) {
287 info
->i_indirect
= *page
;
290 return NULL
; /* need another page */
293 index
-= SHMEM_NR_DIRECT
;
294 offset
= index
% ENTRIES_PER_PAGE
;
295 index
/= ENTRIES_PER_PAGE
;
296 dir
= shmem_dir_map(info
->i_indirect
);
298 if (index
>= ENTRIES_PER_PAGE
/2) {
299 index
-= ENTRIES_PER_PAGE
/2;
300 dir
+= ENTRIES_PER_PAGE
/2 + index
/ENTRIES_PER_PAGE
;
301 index
%= ENTRIES_PER_PAGE
;
308 shmem_dir_unmap(dir
);
309 return NULL
; /* need another page */
311 shmem_dir_unmap(dir
);
312 dir
= shmem_dir_map(subdir
);
318 if (!page
|| !(subdir
= *page
)) {
319 shmem_dir_unmap(dir
);
320 return NULL
; /* need a page */
325 shmem_dir_unmap(dir
);
326 return shmem_swp_map(subdir
) + offset
;
329 static void shmem_swp_set(struct shmem_inode_info
*info
, swp_entry_t
*entry
, unsigned long value
)
331 long incdec
= value
? 1: -1;
334 info
->swapped
+= incdec
;
335 if ((unsigned long)(entry
- info
->i_direct
) >= SHMEM_NR_DIRECT
) {
336 struct page
*page
= kmap_atomic_to_page(entry
);
337 set_page_private(page
, page_private(page
) + incdec
);
342 * shmem_swp_alloc - get the position of the swap entry for the page.
343 * If it does not exist allocate the entry.
345 * @info: info structure for the inode
346 * @index: index of the page to find
347 * @sgp: check and recheck i_size? skip allocation?
349 static swp_entry_t
*shmem_swp_alloc(struct shmem_inode_info
*info
, unsigned long index
, enum sgp_type sgp
)
351 struct inode
*inode
= &info
->vfs_inode
;
352 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
353 struct page
*page
= NULL
;
356 if (sgp
!= SGP_WRITE
&&
357 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
358 return ERR_PTR(-EINVAL
);
360 while (!(entry
= shmem_swp_entry(info
, index
, &page
))) {
362 return shmem_swp_map(ZERO_PAGE(0));
364 * Test free_blocks against 1 not 0, since we have 1 data
365 * page (and perhaps indirect index pages) yet to allocate:
366 * a waste to allocate index if we cannot allocate data.
368 if (sbinfo
->max_blocks
) {
369 spin_lock(&sbinfo
->stat_lock
);
370 if (sbinfo
->free_blocks
<= 1) {
371 spin_unlock(&sbinfo
->stat_lock
);
372 return ERR_PTR(-ENOSPC
);
374 sbinfo
->free_blocks
--;
375 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
376 spin_unlock(&sbinfo
->stat_lock
);
379 spin_unlock(&info
->lock
);
380 page
= shmem_dir_alloc(mapping_gfp_mask(inode
->i_mapping
));
382 set_page_private(page
, 0);
383 spin_lock(&info
->lock
);
386 shmem_free_blocks(inode
, 1);
387 return ERR_PTR(-ENOMEM
);
389 if (sgp
!= SGP_WRITE
&&
390 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
391 entry
= ERR_PTR(-EINVAL
);
394 if (info
->next_index
<= index
)
395 info
->next_index
= index
+ 1;
398 /* another task gave its page, or truncated the file */
399 shmem_free_blocks(inode
, 1);
400 shmem_dir_free(page
);
402 if (info
->next_index
<= index
&& !IS_ERR(entry
))
403 info
->next_index
= index
+ 1;
408 * shmem_free_swp - free some swap entries in a directory
410 * @dir: pointer to the directory
411 * @edir: pointer after last entry of the directory
412 * @punch_lock: pointer to spinlock when needed for the holepunch case
414 static int shmem_free_swp(swp_entry_t
*dir
, swp_entry_t
*edir
,
415 spinlock_t
*punch_lock
)
417 spinlock_t
*punch_unlock
= NULL
;
421 for (ptr
= dir
; ptr
< edir
; ptr
++) {
423 if (unlikely(punch_lock
)) {
424 punch_unlock
= punch_lock
;
426 spin_lock(punch_unlock
);
430 free_swap_and_cache(*ptr
);
431 *ptr
= (swp_entry_t
){0};
436 spin_unlock(punch_unlock
);
440 static int shmem_map_and_free_swp(struct page
*subdir
, int offset
,
441 int limit
, struct page
***dir
, spinlock_t
*punch_lock
)
446 ptr
= shmem_swp_map(subdir
);
447 for (; offset
< limit
; offset
+= LATENCY_LIMIT
) {
448 int size
= limit
- offset
;
449 if (size
> LATENCY_LIMIT
)
450 size
= LATENCY_LIMIT
;
451 freed
+= shmem_free_swp(ptr
+offset
, ptr
+offset
+size
,
453 if (need_resched()) {
454 shmem_swp_unmap(ptr
);
456 shmem_dir_unmap(*dir
);
460 ptr
= shmem_swp_map(subdir
);
463 shmem_swp_unmap(ptr
);
467 static void shmem_free_pages(struct list_head
*next
)
473 page
= container_of(next
, struct page
, lru
);
475 shmem_dir_free(page
);
477 if (freed
>= LATENCY_LIMIT
) {
484 static void shmem_truncate_range(struct inode
*inode
, loff_t start
, loff_t end
)
486 struct shmem_inode_info
*info
= SHMEM_I(inode
);
491 unsigned long diroff
;
497 LIST_HEAD(pages_to_free
);
498 long nr_pages_to_free
= 0;
499 long nr_swaps_freed
= 0;
503 spinlock_t
*needs_lock
;
504 spinlock_t
*punch_lock
;
505 unsigned long upper_limit
;
507 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
508 idx
= (start
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
509 if (idx
>= info
->next_index
)
512 spin_lock(&info
->lock
);
513 info
->flags
|= SHMEM_TRUNCATE
;
514 if (likely(end
== (loff_t
) -1)) {
515 limit
= info
->next_index
;
516 upper_limit
= SHMEM_MAX_INDEX
;
517 info
->next_index
= idx
;
521 if (end
+ 1 >= inode
->i_size
) { /* we may free a little more */
522 limit
= (inode
->i_size
+ PAGE_CACHE_SIZE
- 1) >>
524 upper_limit
= SHMEM_MAX_INDEX
;
526 limit
= (end
+ 1) >> PAGE_CACHE_SHIFT
;
529 needs_lock
= &info
->lock
;
533 topdir
= info
->i_indirect
;
534 if (topdir
&& idx
<= SHMEM_NR_DIRECT
&& !punch_hole
) {
535 info
->i_indirect
= NULL
;
537 list_add(&topdir
->lru
, &pages_to_free
);
539 spin_unlock(&info
->lock
);
541 if (info
->swapped
&& idx
< SHMEM_NR_DIRECT
) {
542 ptr
= info
->i_direct
;
544 if (size
> SHMEM_NR_DIRECT
)
545 size
= SHMEM_NR_DIRECT
;
546 nr_swaps_freed
= shmem_free_swp(ptr
+idx
, ptr
+size
, needs_lock
);
550 * If there are no indirect blocks or we are punching a hole
551 * below indirect blocks, nothing to be done.
553 if (!topdir
|| limit
<= SHMEM_NR_DIRECT
)
557 * The truncation case has already dropped info->lock, and we're safe
558 * because i_size and next_index have already been lowered, preventing
559 * access beyond. But in the punch_hole case, we still need to take
560 * the lock when updating the swap directory, because there might be
561 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
562 * shmem_writepage. However, whenever we find we can remove a whole
563 * directory page (not at the misaligned start or end of the range),
564 * we first NULLify its pointer in the level above, and then have no
565 * need to take the lock when updating its contents: needs_lock and
566 * punch_lock (either pointing to info->lock or NULL) manage this.
569 upper_limit
-= SHMEM_NR_DIRECT
;
570 limit
-= SHMEM_NR_DIRECT
;
571 idx
= (idx
> SHMEM_NR_DIRECT
)? (idx
- SHMEM_NR_DIRECT
): 0;
572 offset
= idx
% ENTRIES_PER_PAGE
;
575 dir
= shmem_dir_map(topdir
);
576 stage
= ENTRIES_PER_PAGEPAGE
/2;
577 if (idx
< ENTRIES_PER_PAGEPAGE
/2) {
579 diroff
= idx
/ENTRIES_PER_PAGE
;
581 dir
+= ENTRIES_PER_PAGE
/2;
582 dir
+= (idx
- ENTRIES_PER_PAGEPAGE
/2)/ENTRIES_PER_PAGEPAGE
;
584 stage
+= ENTRIES_PER_PAGEPAGE
;
587 diroff
= ((idx
- ENTRIES_PER_PAGEPAGE
/2) %
588 ENTRIES_PER_PAGEPAGE
) / ENTRIES_PER_PAGE
;
589 if (!diroff
&& !offset
&& upper_limit
>= stage
) {
591 spin_lock(needs_lock
);
593 spin_unlock(needs_lock
);
598 list_add(&middir
->lru
, &pages_to_free
);
600 shmem_dir_unmap(dir
);
601 dir
= shmem_dir_map(middir
);
609 for (; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, diroff
++) {
610 if (unlikely(idx
== stage
)) {
611 shmem_dir_unmap(dir
);
612 dir
= shmem_dir_map(topdir
) +
613 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
616 idx
+= ENTRIES_PER_PAGEPAGE
;
620 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
623 needs_lock
= &info
->lock
;
624 if (upper_limit
>= stage
) {
626 spin_lock(needs_lock
);
628 spin_unlock(needs_lock
);
633 list_add(&middir
->lru
, &pages_to_free
);
635 shmem_dir_unmap(dir
);
637 dir
= shmem_dir_map(middir
);
640 punch_lock
= needs_lock
;
641 subdir
= dir
[diroff
];
642 if (subdir
&& !offset
&& upper_limit
-idx
>= ENTRIES_PER_PAGE
) {
644 spin_lock(needs_lock
);
646 spin_unlock(needs_lock
);
651 list_add(&subdir
->lru
, &pages_to_free
);
653 if (subdir
&& page_private(subdir
) /* has swap entries */) {
655 if (size
> ENTRIES_PER_PAGE
)
656 size
= ENTRIES_PER_PAGE
;
657 freed
= shmem_map_and_free_swp(subdir
,
658 offset
, size
, &dir
, punch_lock
);
660 dir
= shmem_dir_map(middir
);
661 nr_swaps_freed
+= freed
;
662 if (offset
|| punch_lock
) {
663 spin_lock(&info
->lock
);
664 set_page_private(subdir
,
665 page_private(subdir
) - freed
);
666 spin_unlock(&info
->lock
);
668 BUG_ON(page_private(subdir
) != freed
);
673 shmem_dir_unmap(dir
);
675 if (inode
->i_mapping
->nrpages
&& (info
->flags
& SHMEM_PAGEIN
)) {
677 * Call truncate_inode_pages again: racing shmem_unuse_inode
678 * may have swizzled a page in from swap since vmtruncate or
679 * generic_delete_inode did it, before we lowered next_index.
680 * Also, though shmem_getpage checks i_size before adding to
681 * cache, no recheck after: so fix the narrow window there too.
683 * Recalling truncate_inode_pages_range and unmap_mapping_range
684 * every time for punch_hole (which never got a chance to clear
685 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
686 * yet hardly ever necessary: try to optimize them out later.
688 truncate_inode_pages_range(inode
->i_mapping
, start
, end
);
690 unmap_mapping_range(inode
->i_mapping
, start
,
694 spin_lock(&info
->lock
);
695 info
->flags
&= ~SHMEM_TRUNCATE
;
696 info
->swapped
-= nr_swaps_freed
;
697 if (nr_pages_to_free
)
698 shmem_free_blocks(inode
, nr_pages_to_free
);
699 shmem_recalc_inode(inode
);
700 spin_unlock(&info
->lock
);
703 * Empty swap vector directory pages to be freed?
705 if (!list_empty(&pages_to_free
)) {
706 pages_to_free
.prev
->next
= NULL
;
707 shmem_free_pages(pages_to_free
.next
);
711 static void shmem_truncate(struct inode
*inode
)
713 shmem_truncate_range(inode
, inode
->i_size
, (loff_t
)-1);
716 static int shmem_notify_change(struct dentry
*dentry
, struct iattr
*attr
)
718 struct inode
*inode
= dentry
->d_inode
;
719 struct page
*page
= NULL
;
722 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
723 if (attr
->ia_size
< inode
->i_size
) {
725 * If truncating down to a partial page, then
726 * if that page is already allocated, hold it
727 * in memory until the truncation is over, so
728 * truncate_partial_page cannnot miss it were
729 * it assigned to swap.
731 if (attr
->ia_size
& (PAGE_CACHE_SIZE
-1)) {
732 (void) shmem_getpage(inode
,
733 attr
->ia_size
>>PAGE_CACHE_SHIFT
,
734 &page
, SGP_READ
, NULL
);
737 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
738 * detect if any pages might have been added to cache
739 * after truncate_inode_pages. But we needn't bother
740 * if it's being fully truncated to zero-length: the
741 * nrpages check is efficient enough in that case.
744 struct shmem_inode_info
*info
= SHMEM_I(inode
);
745 spin_lock(&info
->lock
);
746 info
->flags
&= ~SHMEM_PAGEIN
;
747 spin_unlock(&info
->lock
);
752 error
= inode_change_ok(inode
, attr
);
754 error
= inode_setattr(inode
, attr
);
755 #ifdef CONFIG_TMPFS_POSIX_ACL
756 if (!error
&& (attr
->ia_valid
& ATTR_MODE
))
757 error
= generic_acl_chmod(inode
, &shmem_acl_ops
);
760 page_cache_release(page
);
764 static void shmem_delete_inode(struct inode
*inode
)
766 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
767 struct shmem_inode_info
*info
= SHMEM_I(inode
);
769 if (inode
->i_op
->truncate
== shmem_truncate
) {
770 truncate_inode_pages(inode
->i_mapping
, 0);
771 shmem_unacct_size(info
->flags
, inode
->i_size
);
773 shmem_truncate(inode
);
774 if (!list_empty(&info
->swaplist
)) {
775 spin_lock(&shmem_swaplist_lock
);
776 list_del_init(&info
->swaplist
);
777 spin_unlock(&shmem_swaplist_lock
);
780 BUG_ON(inode
->i_blocks
);
781 if (sbinfo
->max_inodes
) {
782 spin_lock(&sbinfo
->stat_lock
);
783 sbinfo
->free_inodes
++;
784 spin_unlock(&sbinfo
->stat_lock
);
789 static inline int shmem_find_swp(swp_entry_t entry
, swp_entry_t
*dir
, swp_entry_t
*edir
)
793 for (ptr
= dir
; ptr
< edir
; ptr
++) {
794 if (ptr
->val
== entry
.val
)
800 static int shmem_unuse_inode(struct shmem_inode_info
*info
, swp_entry_t entry
, struct page
*page
)
813 ptr
= info
->i_direct
;
814 spin_lock(&info
->lock
);
815 limit
= info
->next_index
;
817 if (size
> SHMEM_NR_DIRECT
)
818 size
= SHMEM_NR_DIRECT
;
819 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
821 shmem_swp_balance_unmap();
824 if (!info
->i_indirect
)
827 dir
= shmem_dir_map(info
->i_indirect
);
828 stage
= SHMEM_NR_DIRECT
+ ENTRIES_PER_PAGEPAGE
/2;
830 for (idx
= SHMEM_NR_DIRECT
; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, dir
++) {
831 if (unlikely(idx
== stage
)) {
832 shmem_dir_unmap(dir
-1);
833 dir
= shmem_dir_map(info
->i_indirect
) +
834 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
837 idx
+= ENTRIES_PER_PAGEPAGE
;
841 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
843 shmem_dir_unmap(dir
);
844 dir
= shmem_dir_map(subdir
);
847 if (subdir
&& page_private(subdir
)) {
848 ptr
= shmem_swp_map(subdir
);
850 if (size
> ENTRIES_PER_PAGE
)
851 size
= ENTRIES_PER_PAGE
;
852 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
854 shmem_dir_unmap(dir
);
857 shmem_swp_unmap(ptr
);
861 shmem_dir_unmap(dir
-1);
863 spin_unlock(&info
->lock
);
867 inode
= &info
->vfs_inode
;
868 if (move_from_swap_cache(page
, idx
, inode
->i_mapping
) == 0) {
869 info
->flags
|= SHMEM_PAGEIN
;
870 shmem_swp_set(info
, ptr
+ offset
, 0);
872 shmem_swp_unmap(ptr
);
873 spin_unlock(&info
->lock
);
875 * Decrement swap count even when the entry is left behind:
876 * try_to_unuse will skip over mms, then reincrement count.
883 * shmem_unuse() search for an eventually swapped out shmem page.
885 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
887 struct list_head
*p
, *next
;
888 struct shmem_inode_info
*info
;
891 spin_lock(&shmem_swaplist_lock
);
892 list_for_each_safe(p
, next
, &shmem_swaplist
) {
893 info
= list_entry(p
, struct shmem_inode_info
, swaplist
);
895 list_del_init(&info
->swaplist
);
896 else if (shmem_unuse_inode(info
, entry
, page
)) {
897 /* move head to start search for next from here */
898 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
903 spin_unlock(&shmem_swaplist_lock
);
908 * Move the page from the page cache to the swap cache.
910 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
912 struct shmem_inode_info
*info
;
913 swp_entry_t
*entry
, swap
;
914 struct address_space
*mapping
;
918 BUG_ON(!PageLocked(page
));
919 BUG_ON(page_mapped(page
));
921 mapping
= page
->mapping
;
923 inode
= mapping
->host
;
924 info
= SHMEM_I(inode
);
925 if (info
->flags
& VM_LOCKED
)
927 swap
= get_swap_page();
931 spin_lock(&info
->lock
);
932 shmem_recalc_inode(inode
);
933 if (index
>= info
->next_index
) {
934 BUG_ON(!(info
->flags
& SHMEM_TRUNCATE
));
937 entry
= shmem_swp_entry(info
, index
, NULL
);
941 if (move_to_swap_cache(page
, swap
) == 0) {
942 shmem_swp_set(info
, entry
, swap
.val
);
943 shmem_swp_unmap(entry
);
944 spin_unlock(&info
->lock
);
945 if (list_empty(&info
->swaplist
)) {
946 spin_lock(&shmem_swaplist_lock
);
947 /* move instead of add in case we're racing */
948 list_move_tail(&info
->swaplist
, &shmem_swaplist
);
949 spin_unlock(&shmem_swaplist_lock
);
955 shmem_swp_unmap(entry
);
957 spin_unlock(&info
->lock
);
960 set_page_dirty(page
);
961 return AOP_WRITEPAGE_ACTIVATE
; /* Return with the page locked */
965 static inline int shmem_parse_mpol(char *value
, int *policy
, nodemask_t
*policy_nodes
)
967 char *nodelist
= strchr(value
, ':');
971 /* NUL-terminate policy string */
973 if (nodelist_parse(nodelist
, *policy_nodes
))
975 if (!nodes_subset(*policy_nodes
, node_online_map
))
978 if (!strcmp(value
, "default")) {
979 *policy
= MPOL_DEFAULT
;
980 /* Don't allow a nodelist */
983 } else if (!strcmp(value
, "prefer")) {
984 *policy
= MPOL_PREFERRED
;
985 /* Insist on a nodelist of one node only */
987 char *rest
= nodelist
;
988 while (isdigit(*rest
))
993 } else if (!strcmp(value
, "bind")) {
995 /* Insist on a nodelist */
998 } else if (!strcmp(value
, "interleave")) {
999 *policy
= MPOL_INTERLEAVE
;
1000 /* Default to nodes online if no nodelist */
1002 *policy_nodes
= node_online_map
;
1006 /* Restore string for error message */
1012 static struct page
*shmem_swapin_async(struct shared_policy
*p
,
1013 swp_entry_t entry
, unsigned long idx
)
1016 struct vm_area_struct pvma
;
1018 /* Create a pseudo vma that just contains the policy */
1019 memset(&pvma
, 0, sizeof(struct vm_area_struct
));
1020 pvma
.vm_end
= PAGE_SIZE
;
1021 pvma
.vm_pgoff
= idx
;
1022 pvma
.vm_policy
= mpol_shared_policy_lookup(p
, idx
);
1023 page
= read_swap_cache_async(entry
, &pvma
, 0);
1024 mpol_free(pvma
.vm_policy
);
1028 struct page
*shmem_swapin(struct shmem_inode_info
*info
, swp_entry_t entry
,
1031 struct shared_policy
*p
= &info
->policy
;
1034 unsigned long offset
;
1036 num
= valid_swaphandles(entry
, &offset
);
1037 for (i
= 0; i
< num
; offset
++, i
++) {
1038 page
= shmem_swapin_async(p
,
1039 swp_entry(swp_type(entry
), offset
), idx
);
1042 page_cache_release(page
);
1044 lru_add_drain(); /* Push any new pages onto the LRU now */
1045 return shmem_swapin_async(p
, entry
, idx
);
1048 static struct page
*
1049 shmem_alloc_page(gfp_t gfp
, struct shmem_inode_info
*info
,
1052 struct vm_area_struct pvma
;
1055 memset(&pvma
, 0, sizeof(struct vm_area_struct
));
1056 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, idx
);
1057 pvma
.vm_pgoff
= idx
;
1058 pvma
.vm_end
= PAGE_SIZE
;
1059 page
= alloc_page_vma(gfp
| __GFP_ZERO
, &pvma
, 0);
1060 mpol_free(pvma
.vm_policy
);
1064 static inline int shmem_parse_mpol(char *value
, int *policy
, nodemask_t
*policy_nodes
)
1069 static inline struct page
*
1070 shmem_swapin(struct shmem_inode_info
*info
,swp_entry_t entry
,unsigned long idx
)
1072 swapin_readahead(entry
, 0, NULL
);
1073 return read_swap_cache_async(entry
, NULL
, 0);
1076 static inline struct page
*
1077 shmem_alloc_page(gfp_t gfp
,struct shmem_inode_info
*info
, unsigned long idx
)
1079 return alloc_page(gfp
| __GFP_ZERO
);
1084 * shmem_getpage - either get the page from swap or allocate a new one
1086 * If we allocate a new one we do not mark it dirty. That's up to the
1087 * vm. If we swap it in we mark it dirty since we also free the swap
1088 * entry since a page cannot live in both the swap and page cache
1090 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
1091 struct page
**pagep
, enum sgp_type sgp
, int *type
)
1093 struct address_space
*mapping
= inode
->i_mapping
;
1094 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1095 struct shmem_sb_info
*sbinfo
;
1096 struct page
*filepage
= *pagep
;
1097 struct page
*swappage
;
1102 if (idx
>= SHMEM_MAX_INDEX
)
1109 * Normally, filepage is NULL on entry, and either found
1110 * uptodate immediately, or allocated and zeroed, or read
1111 * in under swappage, which is then assigned to filepage.
1112 * But shmem_readpage and shmem_prepare_write pass in a locked
1113 * filepage, which may be found not uptodate by other callers
1114 * too, and may need to be copied from the swappage read in.
1118 filepage
= find_lock_page(mapping
, idx
);
1119 if (filepage
&& PageUptodate(filepage
))
1122 if (sgp
== SGP_QUICK
)
1125 spin_lock(&info
->lock
);
1126 shmem_recalc_inode(inode
);
1127 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1128 if (IS_ERR(entry
)) {
1129 spin_unlock(&info
->lock
);
1130 error
= PTR_ERR(entry
);
1136 /* Look it up and read it in.. */
1137 swappage
= lookup_swap_cache(swap
);
1139 shmem_swp_unmap(entry
);
1140 /* here we actually do the io */
1141 if (type
&& !(*type
& VM_FAULT_MAJOR
)) {
1142 __count_vm_event(PGMAJFAULT
);
1143 *type
|= VM_FAULT_MAJOR
;
1145 spin_unlock(&info
->lock
);
1146 swappage
= shmem_swapin(info
, swap
, idx
);
1148 spin_lock(&info
->lock
);
1149 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1151 error
= PTR_ERR(entry
);
1153 if (entry
->val
== swap
.val
)
1155 shmem_swp_unmap(entry
);
1157 spin_unlock(&info
->lock
);
1162 wait_on_page_locked(swappage
);
1163 page_cache_release(swappage
);
1167 /* We have to do this with page locked to prevent races */
1168 if (TestSetPageLocked(swappage
)) {
1169 shmem_swp_unmap(entry
);
1170 spin_unlock(&info
->lock
);
1171 wait_on_page_locked(swappage
);
1172 page_cache_release(swappage
);
1175 if (PageWriteback(swappage
)) {
1176 shmem_swp_unmap(entry
);
1177 spin_unlock(&info
->lock
);
1178 wait_on_page_writeback(swappage
);
1179 unlock_page(swappage
);
1180 page_cache_release(swappage
);
1183 if (!PageUptodate(swappage
)) {
1184 shmem_swp_unmap(entry
);
1185 spin_unlock(&info
->lock
);
1186 unlock_page(swappage
);
1187 page_cache_release(swappage
);
1193 shmem_swp_set(info
, entry
, 0);
1194 shmem_swp_unmap(entry
);
1195 delete_from_swap_cache(swappage
);
1196 spin_unlock(&info
->lock
);
1197 copy_highpage(filepage
, swappage
);
1198 unlock_page(swappage
);
1199 page_cache_release(swappage
);
1200 flush_dcache_page(filepage
);
1201 SetPageUptodate(filepage
);
1202 set_page_dirty(filepage
);
1204 } else if (!(error
= move_from_swap_cache(
1205 swappage
, idx
, mapping
))) {
1206 info
->flags
|= SHMEM_PAGEIN
;
1207 shmem_swp_set(info
, entry
, 0);
1208 shmem_swp_unmap(entry
);
1209 spin_unlock(&info
->lock
);
1210 filepage
= swappage
;
1213 shmem_swp_unmap(entry
);
1214 spin_unlock(&info
->lock
);
1215 unlock_page(swappage
);
1216 page_cache_release(swappage
);
1217 if (error
== -ENOMEM
) {
1218 /* let kswapd refresh zone for GFP_ATOMICs */
1219 congestion_wait(WRITE
, HZ
/50);
1223 } else if (sgp
== SGP_READ
&& !filepage
) {
1224 shmem_swp_unmap(entry
);
1225 filepage
= find_get_page(mapping
, idx
);
1227 (!PageUptodate(filepage
) || TestSetPageLocked(filepage
))) {
1228 spin_unlock(&info
->lock
);
1229 wait_on_page_locked(filepage
);
1230 page_cache_release(filepage
);
1234 spin_unlock(&info
->lock
);
1236 shmem_swp_unmap(entry
);
1237 sbinfo
= SHMEM_SB(inode
->i_sb
);
1238 if (sbinfo
->max_blocks
) {
1239 spin_lock(&sbinfo
->stat_lock
);
1240 if (sbinfo
->free_blocks
== 0 ||
1241 shmem_acct_block(info
->flags
)) {
1242 spin_unlock(&sbinfo
->stat_lock
);
1243 spin_unlock(&info
->lock
);
1247 sbinfo
->free_blocks
--;
1248 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
1249 spin_unlock(&sbinfo
->stat_lock
);
1250 } else if (shmem_acct_block(info
->flags
)) {
1251 spin_unlock(&info
->lock
);
1257 spin_unlock(&info
->lock
);
1258 filepage
= shmem_alloc_page(mapping_gfp_mask(mapping
),
1262 shmem_unacct_blocks(info
->flags
, 1);
1263 shmem_free_blocks(inode
, 1);
1268 spin_lock(&info
->lock
);
1269 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1271 error
= PTR_ERR(entry
);
1274 shmem_swp_unmap(entry
);
1276 if (error
|| swap
.val
|| 0 != add_to_page_cache_lru(
1277 filepage
, mapping
, idx
, GFP_ATOMIC
)) {
1278 spin_unlock(&info
->lock
);
1279 page_cache_release(filepage
);
1280 shmem_unacct_blocks(info
->flags
, 1);
1281 shmem_free_blocks(inode
, 1);
1287 info
->flags
|= SHMEM_PAGEIN
;
1291 spin_unlock(&info
->lock
);
1292 flush_dcache_page(filepage
);
1293 SetPageUptodate(filepage
);
1296 if (*pagep
!= filepage
) {
1298 if (sgp
!= SGP_FAULT
)
1299 unlock_page(filepage
);
1305 if (*pagep
!= filepage
) {
1306 unlock_page(filepage
);
1307 page_cache_release(filepage
);
1312 static int shmem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1314 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1318 if (((loff_t
)vmf
->pgoff
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
1319 return VM_FAULT_SIGBUS
;
1321 error
= shmem_getpage(inode
, vmf
->pgoff
, &vmf
->page
, SGP_FAULT
, &ret
);
1323 return ((error
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
);
1325 mark_page_accessed(vmf
->page
);
1326 return ret
| VM_FAULT_LOCKED
;
1330 int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
1332 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1333 return mpol_set_shared_policy(&SHMEM_I(i
)->policy
, vma
, new);
1337 shmem_get_policy(struct vm_area_struct
*vma
, unsigned long addr
)
1339 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1342 idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1343 return mpol_shared_policy_lookup(&SHMEM_I(i
)->policy
, idx
);
1347 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1349 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1350 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1351 int retval
= -ENOMEM
;
1353 spin_lock(&info
->lock
);
1354 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1355 if (!user_shm_lock(inode
->i_size
, user
))
1357 info
->flags
|= VM_LOCKED
;
1359 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1360 user_shm_unlock(inode
->i_size
, user
);
1361 info
->flags
&= ~VM_LOCKED
;
1365 spin_unlock(&info
->lock
);
1369 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1371 file_accessed(file
);
1372 vma
->vm_ops
= &shmem_vm_ops
;
1373 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1377 static struct inode
*
1378 shmem_get_inode(struct super_block
*sb
, int mode
, dev_t dev
)
1380 struct inode
*inode
;
1381 struct shmem_inode_info
*info
;
1382 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1384 if (sbinfo
->max_inodes
) {
1385 spin_lock(&sbinfo
->stat_lock
);
1386 if (!sbinfo
->free_inodes
) {
1387 spin_unlock(&sbinfo
->stat_lock
);
1390 sbinfo
->free_inodes
--;
1391 spin_unlock(&sbinfo
->stat_lock
);
1394 inode
= new_inode(sb
);
1396 inode
->i_mode
= mode
;
1397 inode
->i_uid
= current
->fsuid
;
1398 inode
->i_gid
= current
->fsgid
;
1399 inode
->i_blocks
= 0;
1400 inode
->i_mapping
->a_ops
= &shmem_aops
;
1401 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1402 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1403 inode
->i_generation
= get_seconds();
1404 info
= SHMEM_I(inode
);
1405 memset(info
, 0, (char *)inode
- (char *)info
);
1406 spin_lock_init(&info
->lock
);
1407 INIT_LIST_HEAD(&info
->swaplist
);
1409 switch (mode
& S_IFMT
) {
1411 inode
->i_op
= &shmem_special_inode_operations
;
1412 init_special_inode(inode
, mode
, dev
);
1415 inode
->i_op
= &shmem_inode_operations
;
1416 inode
->i_fop
= &shmem_file_operations
;
1417 mpol_shared_policy_init(&info
->policy
, sbinfo
->policy
,
1418 &sbinfo
->policy_nodes
);
1422 /* Some things misbehave if size == 0 on a directory */
1423 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1424 inode
->i_op
= &shmem_dir_inode_operations
;
1425 inode
->i_fop
= &simple_dir_operations
;
1429 * Must not load anything in the rbtree,
1430 * mpol_free_shared_policy will not be called.
1432 mpol_shared_policy_init(&info
->policy
, MPOL_DEFAULT
,
1436 } else if (sbinfo
->max_inodes
) {
1437 spin_lock(&sbinfo
->stat_lock
);
1438 sbinfo
->free_inodes
++;
1439 spin_unlock(&sbinfo
->stat_lock
);
1445 static const struct inode_operations shmem_symlink_inode_operations
;
1446 static const struct inode_operations shmem_symlink_inline_operations
;
1449 * Normally tmpfs avoids the use of shmem_readpage and shmem_prepare_write;
1450 * but providing them allows a tmpfs file to be used for splice, sendfile, and
1451 * below the loop driver, in the generic fashion that many filesystems support.
1453 static int shmem_readpage(struct file
*file
, struct page
*page
)
1455 struct inode
*inode
= page
->mapping
->host
;
1456 int error
= shmem_getpage(inode
, page
->index
, &page
, SGP_CACHE
, NULL
);
1462 shmem_prepare_write(struct file
*file
, struct page
*page
, unsigned offset
, unsigned to
)
1464 struct inode
*inode
= page
->mapping
->host
;
1465 return shmem_getpage(inode
, page
->index
, &page
, SGP_WRITE
, NULL
);
1469 shmem_file_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
1471 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1473 unsigned long written
;
1476 if ((ssize_t
) count
< 0)
1479 if (!access_ok(VERIFY_READ
, buf
, count
))
1482 mutex_lock(&inode
->i_mutex
);
1487 err
= generic_write_checks(file
, &pos
, &count
, 0);
1491 err
= remove_suid(file
->f_path
.dentry
);
1495 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
1498 struct page
*page
= NULL
;
1499 unsigned long bytes
, index
, offset
;
1503 offset
= (pos
& (PAGE_CACHE_SIZE
-1)); /* Within page */
1504 index
= pos
>> PAGE_CACHE_SHIFT
;
1505 bytes
= PAGE_CACHE_SIZE
- offset
;
1510 * We don't hold page lock across copy from user -
1511 * what would it guard against? - so no deadlock here.
1512 * But it still may be a good idea to prefault below.
1515 err
= shmem_getpage(inode
, index
, &page
, SGP_WRITE
, NULL
);
1520 if (PageHighMem(page
)) {
1521 volatile unsigned char dummy
;
1522 __get_user(dummy
, buf
);
1523 __get_user(dummy
, buf
+ bytes
- 1);
1525 kaddr
= kmap_atomic(page
, KM_USER0
);
1526 left
= __copy_from_user_inatomic(kaddr
+ offset
,
1528 kunmap_atomic(kaddr
, KM_USER0
);
1532 left
= __copy_from_user(kaddr
+ offset
, buf
, bytes
);
1540 if (pos
> inode
->i_size
)
1541 i_size_write(inode
, pos
);
1543 flush_dcache_page(page
);
1544 set_page_dirty(page
);
1545 mark_page_accessed(page
);
1546 page_cache_release(page
);
1556 * Our dirty pages are not counted in nr_dirty,
1557 * and we do not attempt to balance dirty pages.
1567 mutex_unlock(&inode
->i_mutex
);
1571 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1573 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1574 struct address_space
*mapping
= inode
->i_mapping
;
1575 unsigned long index
, offset
;
1577 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1578 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1581 struct page
*page
= NULL
;
1582 unsigned long end_index
, nr
, ret
;
1583 loff_t i_size
= i_size_read(inode
);
1585 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1586 if (index
> end_index
)
1588 if (index
== end_index
) {
1589 nr
= i_size
& ~PAGE_CACHE_MASK
;
1594 desc
->error
= shmem_getpage(inode
, index
, &page
, SGP_READ
, NULL
);
1596 if (desc
->error
== -EINVAL
)
1602 * We must evaluate after, since reads (unlike writes)
1603 * are called without i_mutex protection against truncate
1605 nr
= PAGE_CACHE_SIZE
;
1606 i_size
= i_size_read(inode
);
1607 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1608 if (index
== end_index
) {
1609 nr
= i_size
& ~PAGE_CACHE_MASK
;
1612 page_cache_release(page
);
1620 * If users can be writing to this page using arbitrary
1621 * virtual addresses, take care about potential aliasing
1622 * before reading the page on the kernel side.
1624 if (mapping_writably_mapped(mapping
))
1625 flush_dcache_page(page
);
1627 * Mark the page accessed if we read the beginning.
1630 mark_page_accessed(page
);
1632 page
= ZERO_PAGE(0);
1633 page_cache_get(page
);
1637 * Ok, we have the page, and it's up-to-date, so
1638 * now we can copy it to user space...
1640 * The actor routine returns how many bytes were actually used..
1641 * NOTE! This may not be the same as how much of a user buffer
1642 * we filled up (we may be padding etc), so we can only update
1643 * "pos" here (the actor routine has to update the user buffer
1644 * pointers and the remaining count).
1646 ret
= actor(desc
, page
, offset
, nr
);
1648 index
+= offset
>> PAGE_CACHE_SHIFT
;
1649 offset
&= ~PAGE_CACHE_MASK
;
1651 page_cache_release(page
);
1652 if (ret
!= nr
|| !desc
->count
)
1658 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1659 file_accessed(filp
);
1662 static ssize_t
shmem_file_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*ppos
)
1664 read_descriptor_t desc
;
1666 if ((ssize_t
) count
< 0)
1668 if (!access_ok(VERIFY_WRITE
, buf
, count
))
1678 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1680 return desc
.written
;
1684 static int shmem_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1686 struct shmem_sb_info
*sbinfo
= SHMEM_SB(dentry
->d_sb
);
1688 buf
->f_type
= TMPFS_MAGIC
;
1689 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1690 buf
->f_namelen
= NAME_MAX
;
1691 spin_lock(&sbinfo
->stat_lock
);
1692 if (sbinfo
->max_blocks
) {
1693 buf
->f_blocks
= sbinfo
->max_blocks
;
1694 buf
->f_bavail
= buf
->f_bfree
= sbinfo
->free_blocks
;
1696 if (sbinfo
->max_inodes
) {
1697 buf
->f_files
= sbinfo
->max_inodes
;
1698 buf
->f_ffree
= sbinfo
->free_inodes
;
1700 /* else leave those fields 0 like simple_statfs */
1701 spin_unlock(&sbinfo
->stat_lock
);
1706 * File creation. Allocate an inode, and we're done..
1709 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1711 struct inode
*inode
= shmem_get_inode(dir
->i_sb
, mode
, dev
);
1712 int error
= -ENOSPC
;
1715 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1718 if (error
!= -EOPNOTSUPP
) {
1723 error
= shmem_acl_init(inode
, dir
);
1728 if (dir
->i_mode
& S_ISGID
) {
1729 inode
->i_gid
= dir
->i_gid
;
1731 inode
->i_mode
|= S_ISGID
;
1733 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1734 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1735 d_instantiate(dentry
, inode
);
1736 dget(dentry
); /* Extra count - pin the dentry in core */
1741 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1745 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1751 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1752 struct nameidata
*nd
)
1754 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1760 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1762 struct inode
*inode
= old_dentry
->d_inode
;
1763 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
1766 * No ordinary (disk based) filesystem counts links as inodes;
1767 * but each new link needs a new dentry, pinning lowmem, and
1768 * tmpfs dentries cannot be pruned until they are unlinked.
1770 if (sbinfo
->max_inodes
) {
1771 spin_lock(&sbinfo
->stat_lock
);
1772 if (!sbinfo
->free_inodes
) {
1773 spin_unlock(&sbinfo
->stat_lock
);
1776 sbinfo
->free_inodes
--;
1777 spin_unlock(&sbinfo
->stat_lock
);
1780 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1781 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1783 atomic_inc(&inode
->i_count
); /* New dentry reference */
1784 dget(dentry
); /* Extra pinning count for the created dentry */
1785 d_instantiate(dentry
, inode
);
1789 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
1791 struct inode
*inode
= dentry
->d_inode
;
1793 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
)) {
1794 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
1795 if (sbinfo
->max_inodes
) {
1796 spin_lock(&sbinfo
->stat_lock
);
1797 sbinfo
->free_inodes
++;
1798 spin_unlock(&sbinfo
->stat_lock
);
1802 dir
->i_size
-= BOGO_DIRENT_SIZE
;
1803 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1805 dput(dentry
); /* Undo the count from "create" - this does all the work */
1809 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
1811 if (!simple_empty(dentry
))
1814 drop_nlink(dentry
->d_inode
);
1816 return shmem_unlink(dir
, dentry
);
1820 * The VFS layer already does all the dentry stuff for rename,
1821 * we just have to decrement the usage count for the target if
1822 * it exists so that the VFS layer correctly free's it when it
1825 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
1827 struct inode
*inode
= old_dentry
->d_inode
;
1828 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
1830 if (!simple_empty(new_dentry
))
1833 if (new_dentry
->d_inode
) {
1834 (void) shmem_unlink(new_dir
, new_dentry
);
1836 drop_nlink(old_dir
);
1837 } else if (they_are_dirs
) {
1838 drop_nlink(old_dir
);
1842 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
1843 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
1844 old_dir
->i_ctime
= old_dir
->i_mtime
=
1845 new_dir
->i_ctime
= new_dir
->i_mtime
=
1846 inode
->i_ctime
= CURRENT_TIME
;
1850 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
1854 struct inode
*inode
;
1855 struct page
*page
= NULL
;
1857 struct shmem_inode_info
*info
;
1859 len
= strlen(symname
) + 1;
1860 if (len
> PAGE_CACHE_SIZE
)
1861 return -ENAMETOOLONG
;
1863 inode
= shmem_get_inode(dir
->i_sb
, S_IFLNK
|S_IRWXUGO
, 0);
1867 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1870 if (error
!= -EOPNOTSUPP
) {
1877 info
= SHMEM_I(inode
);
1878 inode
->i_size
= len
-1;
1879 if (len
<= (char *)inode
- (char *)info
) {
1881 memcpy(info
, symname
, len
);
1882 inode
->i_op
= &shmem_symlink_inline_operations
;
1884 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
1889 inode
->i_op
= &shmem_symlink_inode_operations
;
1890 kaddr
= kmap_atomic(page
, KM_USER0
);
1891 memcpy(kaddr
, symname
, len
);
1892 kunmap_atomic(kaddr
, KM_USER0
);
1893 set_page_dirty(page
);
1894 page_cache_release(page
);
1896 if (dir
->i_mode
& S_ISGID
)
1897 inode
->i_gid
= dir
->i_gid
;
1898 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1899 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1900 d_instantiate(dentry
, inode
);
1905 static void *shmem_follow_link_inline(struct dentry
*dentry
, struct nameidata
*nd
)
1907 nd_set_link(nd
, (char *)SHMEM_I(dentry
->d_inode
));
1911 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1913 struct page
*page
= NULL
;
1914 int res
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
1915 nd_set_link(nd
, res
? ERR_PTR(res
) : kmap(page
));
1919 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
1921 if (!IS_ERR(nd_get_link(nd
))) {
1922 struct page
*page
= cookie
;
1924 mark_page_accessed(page
);
1925 page_cache_release(page
);
1929 static const struct inode_operations shmem_symlink_inline_operations
= {
1930 .readlink
= generic_readlink
,
1931 .follow_link
= shmem_follow_link_inline
,
1934 static const struct inode_operations shmem_symlink_inode_operations
= {
1935 .truncate
= shmem_truncate
,
1936 .readlink
= generic_readlink
,
1937 .follow_link
= shmem_follow_link
,
1938 .put_link
= shmem_put_link
,
1941 #ifdef CONFIG_TMPFS_POSIX_ACL
1943 * Superblocks without xattr inode operations will get security.* xattr
1944 * support from the VFS "for free". As soon as we have any other xattrs
1945 * like ACLs, we also need to implement the security.* handlers at
1946 * filesystem level, though.
1949 static size_t shmem_xattr_security_list(struct inode
*inode
, char *list
,
1950 size_t list_len
, const char *name
,
1953 return security_inode_listsecurity(inode
, list
, list_len
);
1956 static int shmem_xattr_security_get(struct inode
*inode
, const char *name
,
1957 void *buffer
, size_t size
)
1959 if (strcmp(name
, "") == 0)
1961 return security_inode_getsecurity(inode
, name
, buffer
, size
,
1965 static int shmem_xattr_security_set(struct inode
*inode
, const char *name
,
1966 const void *value
, size_t size
, int flags
)
1968 if (strcmp(name
, "") == 0)
1970 return security_inode_setsecurity(inode
, name
, value
, size
, flags
);
1973 static struct xattr_handler shmem_xattr_security_handler
= {
1974 .prefix
= XATTR_SECURITY_PREFIX
,
1975 .list
= shmem_xattr_security_list
,
1976 .get
= shmem_xattr_security_get
,
1977 .set
= shmem_xattr_security_set
,
1980 static struct xattr_handler
*shmem_xattr_handlers
[] = {
1981 &shmem_xattr_acl_access_handler
,
1982 &shmem_xattr_acl_default_handler
,
1983 &shmem_xattr_security_handler
,
1988 static struct dentry
*shmem_get_parent(struct dentry
*child
)
1990 return ERR_PTR(-ESTALE
);
1993 static int shmem_match(struct inode
*ino
, void *vfh
)
1997 inum
= (inum
<< 32) | fh
[1];
1998 return ino
->i_ino
== inum
&& fh
[0] == ino
->i_generation
;
2001 static struct dentry
*shmem_get_dentry(struct super_block
*sb
, void *vfh
)
2003 struct dentry
*de
= NULL
;
2004 struct inode
*inode
;
2007 inum
= (inum
<< 32) | fh
[1];
2009 inode
= ilookup5(sb
, (unsigned long)(inum
+fh
[0]), shmem_match
, vfh
);
2011 de
= d_find_alias(inode
);
2015 return de
? de
: ERR_PTR(-ESTALE
);
2018 static struct dentry
*shmem_decode_fh(struct super_block
*sb
, __u32
*fh
,
2020 int (*acceptable
)(void *context
, struct dentry
*de
),
2024 return ERR_PTR(-ESTALE
);
2026 return sb
->s_export_op
->find_exported_dentry(sb
, fh
, NULL
, acceptable
,
2030 static int shmem_encode_fh(struct dentry
*dentry
, __u32
*fh
, int *len
,
2033 struct inode
*inode
= dentry
->d_inode
;
2038 if (hlist_unhashed(&inode
->i_hash
)) {
2039 /* Unfortunately insert_inode_hash is not idempotent,
2040 * so as we hash inodes here rather than at creation
2041 * time, we need a lock to ensure we only try
2044 static DEFINE_SPINLOCK(lock
);
2046 if (hlist_unhashed(&inode
->i_hash
))
2047 __insert_inode_hash(inode
,
2048 inode
->i_ino
+ inode
->i_generation
);
2052 fh
[0] = inode
->i_generation
;
2053 fh
[1] = inode
->i_ino
;
2054 fh
[2] = ((__u64
)inode
->i_ino
) >> 32;
2060 static struct export_operations shmem_export_ops
= {
2061 .get_parent
= shmem_get_parent
,
2062 .get_dentry
= shmem_get_dentry
,
2063 .encode_fh
= shmem_encode_fh
,
2064 .decode_fh
= shmem_decode_fh
,
2067 static int shmem_parse_options(char *options
, int *mode
, uid_t
*uid
,
2068 gid_t
*gid
, unsigned long *blocks
, unsigned long *inodes
,
2069 int *policy
, nodemask_t
*policy_nodes
)
2071 char *this_char
, *value
, *rest
;
2073 while (options
!= NULL
) {
2074 this_char
= options
;
2077 * NUL-terminate this option: unfortunately,
2078 * mount options form a comma-separated list,
2079 * but mpol's nodelist may also contain commas.
2081 options
= strchr(options
, ',');
2082 if (options
== NULL
)
2085 if (!isdigit(*options
)) {
2092 if ((value
= strchr(this_char
,'=')) != NULL
) {
2096 "tmpfs: No value for mount option '%s'\n",
2101 if (!strcmp(this_char
,"size")) {
2102 unsigned long long size
;
2103 size
= memparse(value
,&rest
);
2105 size
<<= PAGE_SHIFT
;
2106 size
*= totalram_pages
;
2112 *blocks
= size
>> PAGE_CACHE_SHIFT
;
2113 } else if (!strcmp(this_char
,"nr_blocks")) {
2114 *blocks
= memparse(value
,&rest
);
2117 } else if (!strcmp(this_char
,"nr_inodes")) {
2118 *inodes
= memparse(value
,&rest
);
2121 } else if (!strcmp(this_char
,"mode")) {
2124 *mode
= simple_strtoul(value
,&rest
,8);
2127 } else if (!strcmp(this_char
,"uid")) {
2130 *uid
= simple_strtoul(value
,&rest
,0);
2133 } else if (!strcmp(this_char
,"gid")) {
2136 *gid
= simple_strtoul(value
,&rest
,0);
2139 } else if (!strcmp(this_char
,"mpol")) {
2140 if (shmem_parse_mpol(value
,policy
,policy_nodes
))
2143 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
2151 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
2157 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2159 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2160 unsigned long max_blocks
= sbinfo
->max_blocks
;
2161 unsigned long max_inodes
= sbinfo
->max_inodes
;
2162 int policy
= sbinfo
->policy
;
2163 nodemask_t policy_nodes
= sbinfo
->policy_nodes
;
2164 unsigned long blocks
;
2165 unsigned long inodes
;
2166 int error
= -EINVAL
;
2168 if (shmem_parse_options(data
, NULL
, NULL
, NULL
, &max_blocks
,
2169 &max_inodes
, &policy
, &policy_nodes
))
2172 spin_lock(&sbinfo
->stat_lock
);
2173 blocks
= sbinfo
->max_blocks
- sbinfo
->free_blocks
;
2174 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
2175 if (max_blocks
< blocks
)
2177 if (max_inodes
< inodes
)
2180 * Those tests also disallow limited->unlimited while any are in
2181 * use, so i_blocks will always be zero when max_blocks is zero;
2182 * but we must separately disallow unlimited->limited, because
2183 * in that case we have no record of how much is already in use.
2185 if (max_blocks
&& !sbinfo
->max_blocks
)
2187 if (max_inodes
&& !sbinfo
->max_inodes
)
2191 sbinfo
->max_blocks
= max_blocks
;
2192 sbinfo
->free_blocks
= max_blocks
- blocks
;
2193 sbinfo
->max_inodes
= max_inodes
;
2194 sbinfo
->free_inodes
= max_inodes
- inodes
;
2195 sbinfo
->policy
= policy
;
2196 sbinfo
->policy_nodes
= policy_nodes
;
2198 spin_unlock(&sbinfo
->stat_lock
);
2203 static void shmem_put_super(struct super_block
*sb
)
2205 kfree(sb
->s_fs_info
);
2206 sb
->s_fs_info
= NULL
;
2209 static int shmem_fill_super(struct super_block
*sb
,
2210 void *data
, int silent
)
2212 struct inode
*inode
;
2213 struct dentry
*root
;
2214 int mode
= S_IRWXUGO
| S_ISVTX
;
2215 uid_t uid
= current
->fsuid
;
2216 gid_t gid
= current
->fsgid
;
2218 struct shmem_sb_info
*sbinfo
;
2219 unsigned long blocks
= 0;
2220 unsigned long inodes
= 0;
2221 int policy
= MPOL_DEFAULT
;
2222 nodemask_t policy_nodes
= node_online_map
;
2226 * Per default we only allow half of the physical ram per
2227 * tmpfs instance, limiting inodes to one per page of lowmem;
2228 * but the internal instance is left unlimited.
2230 if (!(sb
->s_flags
& MS_NOUSER
)) {
2231 blocks
= totalram_pages
/ 2;
2232 inodes
= totalram_pages
- totalhigh_pages
;
2233 if (inodes
> blocks
)
2235 if (shmem_parse_options(data
, &mode
, &uid
, &gid
, &blocks
,
2236 &inodes
, &policy
, &policy_nodes
))
2239 sb
->s_export_op
= &shmem_export_ops
;
2241 sb
->s_flags
|= MS_NOUSER
;
2244 /* Round up to L1_CACHE_BYTES to resist false sharing */
2245 sbinfo
= kmalloc(max((int)sizeof(struct shmem_sb_info
),
2246 L1_CACHE_BYTES
), GFP_KERNEL
);
2250 spin_lock_init(&sbinfo
->stat_lock
);
2251 sbinfo
->max_blocks
= blocks
;
2252 sbinfo
->free_blocks
= blocks
;
2253 sbinfo
->max_inodes
= inodes
;
2254 sbinfo
->free_inodes
= inodes
;
2255 sbinfo
->policy
= policy
;
2256 sbinfo
->policy_nodes
= policy_nodes
;
2258 sb
->s_fs_info
= sbinfo
;
2259 sb
->s_maxbytes
= SHMEM_MAX_BYTES
;
2260 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
2261 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
2262 sb
->s_magic
= TMPFS_MAGIC
;
2263 sb
->s_op
= &shmem_ops
;
2264 sb
->s_time_gran
= 1;
2265 #ifdef CONFIG_TMPFS_POSIX_ACL
2266 sb
->s_xattr
= shmem_xattr_handlers
;
2267 sb
->s_flags
|= MS_POSIXACL
;
2270 inode
= shmem_get_inode(sb
, S_IFDIR
| mode
, 0);
2275 root
= d_alloc_root(inode
);
2284 shmem_put_super(sb
);
2288 static struct kmem_cache
*shmem_inode_cachep
;
2290 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2292 struct shmem_inode_info
*p
;
2293 p
= (struct shmem_inode_info
*)kmem_cache_alloc(shmem_inode_cachep
, GFP_KERNEL
);
2296 return &p
->vfs_inode
;
2299 static void shmem_destroy_inode(struct inode
*inode
)
2301 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
) {
2302 /* only struct inode is valid if it's an inline symlink */
2303 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2305 shmem_acl_destroy_inode(inode
);
2306 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2309 static void init_once(void *foo
, struct kmem_cache
*cachep
,
2310 unsigned long flags
)
2312 struct shmem_inode_info
*p
= (struct shmem_inode_info
*) foo
;
2314 inode_init_once(&p
->vfs_inode
);
2315 #ifdef CONFIG_TMPFS_POSIX_ACL
2317 p
->i_default_acl
= NULL
;
2321 static int init_inodecache(void)
2323 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2324 sizeof(struct shmem_inode_info
),
2326 if (shmem_inode_cachep
== NULL
)
2331 static void destroy_inodecache(void)
2333 kmem_cache_destroy(shmem_inode_cachep
);
2336 static const struct address_space_operations shmem_aops
= {
2337 .writepage
= shmem_writepage
,
2338 .set_page_dirty
= __set_page_dirty_no_writeback
,
2340 .readpage
= shmem_readpage
,
2341 .prepare_write
= shmem_prepare_write
,
2342 .commit_write
= simple_commit_write
,
2344 .migratepage
= migrate_page
,
2347 static const struct file_operations shmem_file_operations
= {
2350 .llseek
= generic_file_llseek
,
2351 .read
= shmem_file_read
,
2352 .write
= shmem_file_write
,
2353 .fsync
= simple_sync_file
,
2354 .splice_read
= generic_file_splice_read
,
2355 .splice_write
= generic_file_splice_write
,
2359 static const struct inode_operations shmem_inode_operations
= {
2360 .truncate
= shmem_truncate
,
2361 .setattr
= shmem_notify_change
,
2362 .truncate_range
= shmem_truncate_range
,
2363 #ifdef CONFIG_TMPFS_POSIX_ACL
2364 .setxattr
= generic_setxattr
,
2365 .getxattr
= generic_getxattr
,
2366 .listxattr
= generic_listxattr
,
2367 .removexattr
= generic_removexattr
,
2368 .permission
= shmem_permission
,
2373 static const struct inode_operations shmem_dir_inode_operations
= {
2375 .create
= shmem_create
,
2376 .lookup
= simple_lookup
,
2378 .unlink
= shmem_unlink
,
2379 .symlink
= shmem_symlink
,
2380 .mkdir
= shmem_mkdir
,
2381 .rmdir
= shmem_rmdir
,
2382 .mknod
= shmem_mknod
,
2383 .rename
= shmem_rename
,
2385 #ifdef CONFIG_TMPFS_POSIX_ACL
2386 .setattr
= shmem_notify_change
,
2387 .setxattr
= generic_setxattr
,
2388 .getxattr
= generic_getxattr
,
2389 .listxattr
= generic_listxattr
,
2390 .removexattr
= generic_removexattr
,
2391 .permission
= shmem_permission
,
2395 static const struct inode_operations shmem_special_inode_operations
= {
2396 #ifdef CONFIG_TMPFS_POSIX_ACL
2397 .setattr
= shmem_notify_change
,
2398 .setxattr
= generic_setxattr
,
2399 .getxattr
= generic_getxattr
,
2400 .listxattr
= generic_listxattr
,
2401 .removexattr
= generic_removexattr
,
2402 .permission
= shmem_permission
,
2406 static const struct super_operations shmem_ops
= {
2407 .alloc_inode
= shmem_alloc_inode
,
2408 .destroy_inode
= shmem_destroy_inode
,
2410 .statfs
= shmem_statfs
,
2411 .remount_fs
= shmem_remount_fs
,
2413 .delete_inode
= shmem_delete_inode
,
2414 .drop_inode
= generic_delete_inode
,
2415 .put_super
= shmem_put_super
,
2418 static struct vm_operations_struct shmem_vm_ops
= {
2419 .fault
= shmem_fault
,
2421 .set_policy
= shmem_set_policy
,
2422 .get_policy
= shmem_get_policy
,
2427 static int shmem_get_sb(struct file_system_type
*fs_type
,
2428 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
2430 return get_sb_nodev(fs_type
, flags
, data
, shmem_fill_super
, mnt
);
2433 static struct file_system_type tmpfs_fs_type
= {
2434 .owner
= THIS_MODULE
,
2436 .get_sb
= shmem_get_sb
,
2437 .kill_sb
= kill_litter_super
,
2439 static struct vfsmount
*shm_mnt
;
2441 static int __init
init_tmpfs(void)
2445 error
= init_inodecache();
2449 error
= register_filesystem(&tmpfs_fs_type
);
2451 printk(KERN_ERR
"Could not register tmpfs\n");
2455 shm_mnt
= vfs_kern_mount(&tmpfs_fs_type
, MS_NOUSER
,
2456 tmpfs_fs_type
.name
, NULL
);
2457 if (IS_ERR(shm_mnt
)) {
2458 error
= PTR_ERR(shm_mnt
);
2459 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2465 unregister_filesystem(&tmpfs_fs_type
);
2467 destroy_inodecache();
2469 shm_mnt
= ERR_PTR(error
);
2472 module_init(init_tmpfs
)
2475 * shmem_file_setup - get an unlinked file living in tmpfs
2477 * @name: name for dentry (to be seen in /proc/<pid>/maps
2478 * @size: size to be set for the file
2481 struct file
*shmem_file_setup(char *name
, loff_t size
, unsigned long flags
)
2485 struct inode
*inode
;
2486 struct dentry
*dentry
, *root
;
2489 if (IS_ERR(shm_mnt
))
2490 return (void *)shm_mnt
;
2492 if (size
< 0 || size
> SHMEM_MAX_BYTES
)
2493 return ERR_PTR(-EINVAL
);
2495 if (shmem_acct_size(flags
, size
))
2496 return ERR_PTR(-ENOMEM
);
2500 this.len
= strlen(name
);
2501 this.hash
= 0; /* will go */
2502 root
= shm_mnt
->mnt_root
;
2503 dentry
= d_alloc(root
, &this);
2508 file
= get_empty_filp();
2513 inode
= shmem_get_inode(root
->d_sb
, S_IFREG
| S_IRWXUGO
, 0);
2517 SHMEM_I(inode
)->flags
= flags
& VM_ACCOUNT
;
2518 d_instantiate(dentry
, inode
);
2519 inode
->i_size
= size
;
2520 inode
->i_nlink
= 0; /* It is unlinked */
2521 file
->f_path
.mnt
= mntget(shm_mnt
);
2522 file
->f_path
.dentry
= dentry
;
2523 file
->f_mapping
= inode
->i_mapping
;
2524 file
->f_op
= &shmem_file_operations
;
2525 file
->f_mode
= FMODE_WRITE
| FMODE_READ
;
2533 shmem_unacct_size(flags
, size
);
2534 return ERR_PTR(error
);
2538 * shmem_zero_setup - setup a shared anonymous mapping
2540 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2542 int shmem_zero_setup(struct vm_area_struct
*vma
)
2545 loff_t size
= vma
->vm_end
- vma
->vm_start
;
2547 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
2549 return PTR_ERR(file
);
2553 vma
->vm_file
= file
;
2554 vma
->vm_ops
= &shmem_vm_ops
;