2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 * This file is released under the GPL.
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <linux/xattr.h>
30 #include <linux/generic_acl.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/swap.h>
35 #include <linux/pagemap.h>
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/backing-dev.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/mount.h>
41 #include <linux/writeback.h>
42 #include <linux/vfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/security.h>
45 #include <linux/swapops.h>
46 #include <linux/mempolicy.h>
47 #include <linux/namei.h>
48 #include <linux/ctype.h>
49 #include <linux/migrate.h>
50 #include <linux/highmem.h>
51 #include <linux/backing-dev.h>
53 #include <asm/uaccess.h>
54 #include <asm/div64.h>
55 #include <asm/pgtable.h>
57 /* This magic number is used in glibc for posix shared memory */
58 #define TMPFS_MAGIC 0x01021994
60 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
62 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
64 #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
65 #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
67 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
69 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
70 #define SHMEM_PAGEIN VM_READ
71 #define SHMEM_TRUNCATE VM_WRITE
73 /* Definition to limit shmem_truncate's steps between cond_rescheds */
74 #define LATENCY_LIMIT 64
76 /* Pretend that each entry is of this size in directory's i_size */
77 #define BOGO_DIRENT_SIZE 20
79 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
81 SGP_QUICK
, /* don't try more than file page cache lookup */
82 SGP_READ
, /* don't exceed i_size, don't allocate page */
83 SGP_CACHE
, /* don't exceed i_size, may allocate page */
84 SGP_WRITE
, /* may exceed i_size, may allocate page */
87 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
88 struct page
**pagep
, enum sgp_type sgp
, int *type
);
90 static inline struct page
*shmem_dir_alloc(gfp_t gfp_mask
)
93 * The above definition of ENTRIES_PER_PAGE, and the use of
94 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
95 * might be reconsidered if it ever diverges from PAGE_SIZE.
97 return alloc_pages(gfp_mask
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
100 static inline void shmem_dir_free(struct page
*page
)
102 __free_pages(page
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
105 static struct page
**shmem_dir_map(struct page
*page
)
107 return (struct page
**)kmap_atomic(page
, KM_USER0
);
110 static inline void shmem_dir_unmap(struct page
**dir
)
112 kunmap_atomic(dir
, KM_USER0
);
115 static swp_entry_t
*shmem_swp_map(struct page
*page
)
117 return (swp_entry_t
*)kmap_atomic(page
, KM_USER1
);
120 static inline void shmem_swp_balance_unmap(void)
123 * When passing a pointer to an i_direct entry, to code which
124 * also handles indirect entries and so will shmem_swp_unmap,
125 * we must arrange for the preempt count to remain in balance.
126 * What kmap_atomic of a lowmem page does depends on config
127 * and architecture, so pretend to kmap_atomic some lowmem page.
129 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1
);
132 static inline void shmem_swp_unmap(swp_entry_t
*entry
)
134 kunmap_atomic(entry
, KM_USER1
);
137 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
139 return sb
->s_fs_info
;
143 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
144 * for shared memory and for shared anonymous (/dev/zero) mappings
145 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
146 * consistent with the pre-accounting of private mappings ...
148 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
150 return (flags
& VM_ACCOUNT
)?
151 security_vm_enough_memory(VM_ACCT(size
)): 0;
154 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
156 if (flags
& VM_ACCOUNT
)
157 vm_unacct_memory(VM_ACCT(size
));
161 * ... whereas tmpfs objects are accounted incrementally as
162 * pages are allocated, in order to allow huge sparse files.
163 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
164 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
166 static inline int shmem_acct_block(unsigned long flags
)
168 return (flags
& VM_ACCOUNT
)?
169 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE
));
172 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
174 if (!(flags
& VM_ACCOUNT
))
175 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
178 static const struct super_operations shmem_ops
;
179 static const struct address_space_operations shmem_aops
;
180 static const struct file_operations shmem_file_operations
;
181 static const struct inode_operations shmem_inode_operations
;
182 static const struct inode_operations shmem_dir_inode_operations
;
183 static const struct inode_operations shmem_special_inode_operations
;
184 static struct vm_operations_struct shmem_vm_ops
;
186 static struct backing_dev_info shmem_backing_dev_info __read_mostly
= {
187 .ra_pages
= 0, /* No readahead */
188 .capabilities
= BDI_CAP_NO_ACCT_DIRTY
| BDI_CAP_NO_WRITEBACK
,
189 .unplug_io_fn
= default_unplug_io_fn
,
192 static LIST_HEAD(shmem_swaplist
);
193 static DEFINE_SPINLOCK(shmem_swaplist_lock
);
195 static void shmem_free_blocks(struct inode
*inode
, long pages
)
197 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
198 if (sbinfo
->max_blocks
) {
199 spin_lock(&sbinfo
->stat_lock
);
200 sbinfo
->free_blocks
+= pages
;
201 inode
->i_blocks
-= pages
*BLOCKS_PER_PAGE
;
202 spin_unlock(&sbinfo
->stat_lock
);
207 * shmem_recalc_inode - recalculate the size of an inode
209 * @inode: inode to recalc
211 * We have to calculate the free blocks since the mm can drop
212 * undirtied hole pages behind our back.
214 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
215 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
217 * It has to be called with the spinlock held.
219 static void shmem_recalc_inode(struct inode
*inode
)
221 struct shmem_inode_info
*info
= SHMEM_I(inode
);
224 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
226 info
->alloced
-= freed
;
227 shmem_unacct_blocks(info
->flags
, freed
);
228 shmem_free_blocks(inode
, freed
);
233 * shmem_swp_entry - find the swap vector position in the info structure
235 * @info: info structure for the inode
236 * @index: index of the page to find
237 * @page: optional page to add to the structure. Has to be preset to
240 * If there is no space allocated yet it will return NULL when
241 * page is NULL, else it will use the page for the needed block,
242 * setting it to NULL on return to indicate that it has been used.
244 * The swap vector is organized the following way:
246 * There are SHMEM_NR_DIRECT entries directly stored in the
247 * shmem_inode_info structure. So small files do not need an addional
250 * For pages with index > SHMEM_NR_DIRECT there is the pointer
251 * i_indirect which points to a page which holds in the first half
252 * doubly indirect blocks, in the second half triple indirect blocks:
254 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
255 * following layout (for SHMEM_NR_DIRECT == 16):
257 * i_indirect -> dir --> 16-19
270 static swp_entry_t
*shmem_swp_entry(struct shmem_inode_info
*info
, unsigned long index
, struct page
**page
)
272 unsigned long offset
;
276 if (index
< SHMEM_NR_DIRECT
) {
277 shmem_swp_balance_unmap();
278 return info
->i_direct
+index
;
280 if (!info
->i_indirect
) {
282 info
->i_indirect
= *page
;
285 return NULL
; /* need another page */
288 index
-= SHMEM_NR_DIRECT
;
289 offset
= index
% ENTRIES_PER_PAGE
;
290 index
/= ENTRIES_PER_PAGE
;
291 dir
= shmem_dir_map(info
->i_indirect
);
293 if (index
>= ENTRIES_PER_PAGE
/2) {
294 index
-= ENTRIES_PER_PAGE
/2;
295 dir
+= ENTRIES_PER_PAGE
/2 + index
/ENTRIES_PER_PAGE
;
296 index
%= ENTRIES_PER_PAGE
;
303 shmem_dir_unmap(dir
);
304 return NULL
; /* need another page */
306 shmem_dir_unmap(dir
);
307 dir
= shmem_dir_map(subdir
);
313 if (!page
|| !(subdir
= *page
)) {
314 shmem_dir_unmap(dir
);
315 return NULL
; /* need a page */
320 shmem_dir_unmap(dir
);
321 return shmem_swp_map(subdir
) + offset
;
324 static void shmem_swp_set(struct shmem_inode_info
*info
, swp_entry_t
*entry
, unsigned long value
)
326 long incdec
= value
? 1: -1;
329 info
->swapped
+= incdec
;
330 if ((unsigned long)(entry
- info
->i_direct
) >= SHMEM_NR_DIRECT
) {
331 struct page
*page
= kmap_atomic_to_page(entry
);
332 set_page_private(page
, page_private(page
) + incdec
);
337 * shmem_swp_alloc - get the position of the swap entry for the page.
338 * If it does not exist allocate the entry.
340 * @info: info structure for the inode
341 * @index: index of the page to find
342 * @sgp: check and recheck i_size? skip allocation?
344 static swp_entry_t
*shmem_swp_alloc(struct shmem_inode_info
*info
, unsigned long index
, enum sgp_type sgp
)
346 struct inode
*inode
= &info
->vfs_inode
;
347 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
348 struct page
*page
= NULL
;
351 if (sgp
!= SGP_WRITE
&&
352 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
353 return ERR_PTR(-EINVAL
);
355 while (!(entry
= shmem_swp_entry(info
, index
, &page
))) {
357 return shmem_swp_map(ZERO_PAGE(0));
359 * Test free_blocks against 1 not 0, since we have 1 data
360 * page (and perhaps indirect index pages) yet to allocate:
361 * a waste to allocate index if we cannot allocate data.
363 if (sbinfo
->max_blocks
) {
364 spin_lock(&sbinfo
->stat_lock
);
365 if (sbinfo
->free_blocks
<= 1) {
366 spin_unlock(&sbinfo
->stat_lock
);
367 return ERR_PTR(-ENOSPC
);
369 sbinfo
->free_blocks
--;
370 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
371 spin_unlock(&sbinfo
->stat_lock
);
374 spin_unlock(&info
->lock
);
375 page
= shmem_dir_alloc(mapping_gfp_mask(inode
->i_mapping
) | __GFP_ZERO
);
377 set_page_private(page
, 0);
378 spin_lock(&info
->lock
);
381 shmem_free_blocks(inode
, 1);
382 return ERR_PTR(-ENOMEM
);
384 if (sgp
!= SGP_WRITE
&&
385 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
386 entry
= ERR_PTR(-EINVAL
);
389 if (info
->next_index
<= index
)
390 info
->next_index
= index
+ 1;
393 /* another task gave its page, or truncated the file */
394 shmem_free_blocks(inode
, 1);
395 shmem_dir_free(page
);
397 if (info
->next_index
<= index
&& !IS_ERR(entry
))
398 info
->next_index
= index
+ 1;
403 * shmem_free_swp - free some swap entries in a directory
405 * @dir: pointer to the directory
406 * @edir: pointer after last entry of the directory
407 * @punch_lock: pointer to spinlock when needed for the holepunch case
409 static int shmem_free_swp(swp_entry_t
*dir
, swp_entry_t
*edir
,
410 spinlock_t
*punch_lock
)
412 spinlock_t
*punch_unlock
= NULL
;
416 for (ptr
= dir
; ptr
< edir
; ptr
++) {
418 if (unlikely(punch_lock
)) {
419 punch_unlock
= punch_lock
;
421 spin_lock(punch_unlock
);
425 free_swap_and_cache(*ptr
);
426 *ptr
= (swp_entry_t
){0};
431 spin_unlock(punch_unlock
);
435 static int shmem_map_and_free_swp(struct page
*subdir
, int offset
,
436 int limit
, struct page
***dir
, spinlock_t
*punch_lock
)
441 ptr
= shmem_swp_map(subdir
);
442 for (; offset
< limit
; offset
+= LATENCY_LIMIT
) {
443 int size
= limit
- offset
;
444 if (size
> LATENCY_LIMIT
)
445 size
= LATENCY_LIMIT
;
446 freed
+= shmem_free_swp(ptr
+offset
, ptr
+offset
+size
,
448 if (need_resched()) {
449 shmem_swp_unmap(ptr
);
451 shmem_dir_unmap(*dir
);
455 ptr
= shmem_swp_map(subdir
);
458 shmem_swp_unmap(ptr
);
462 static void shmem_free_pages(struct list_head
*next
)
468 page
= container_of(next
, struct page
, lru
);
470 shmem_dir_free(page
);
472 if (freed
>= LATENCY_LIMIT
) {
479 static void shmem_truncate_range(struct inode
*inode
, loff_t start
, loff_t end
)
481 struct shmem_inode_info
*info
= SHMEM_I(inode
);
486 unsigned long diroff
;
492 LIST_HEAD(pages_to_free
);
493 long nr_pages_to_free
= 0;
494 long nr_swaps_freed
= 0;
498 spinlock_t
*needs_lock
;
499 spinlock_t
*punch_lock
;
500 unsigned long upper_limit
;
502 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
503 idx
= (start
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
504 if (idx
>= info
->next_index
)
507 spin_lock(&info
->lock
);
508 info
->flags
|= SHMEM_TRUNCATE
;
509 if (likely(end
== (loff_t
) -1)) {
510 limit
= info
->next_index
;
511 upper_limit
= SHMEM_MAX_INDEX
;
512 info
->next_index
= idx
;
516 if (end
+ 1 >= inode
->i_size
) { /* we may free a little more */
517 limit
= (inode
->i_size
+ PAGE_CACHE_SIZE
- 1) >>
519 upper_limit
= SHMEM_MAX_INDEX
;
521 limit
= (end
+ 1) >> PAGE_CACHE_SHIFT
;
524 needs_lock
= &info
->lock
;
528 topdir
= info
->i_indirect
;
529 if (topdir
&& idx
<= SHMEM_NR_DIRECT
&& !punch_hole
) {
530 info
->i_indirect
= NULL
;
532 list_add(&topdir
->lru
, &pages_to_free
);
534 spin_unlock(&info
->lock
);
536 if (info
->swapped
&& idx
< SHMEM_NR_DIRECT
) {
537 ptr
= info
->i_direct
;
539 if (size
> SHMEM_NR_DIRECT
)
540 size
= SHMEM_NR_DIRECT
;
541 nr_swaps_freed
= shmem_free_swp(ptr
+idx
, ptr
+size
, needs_lock
);
545 * If there are no indirect blocks or we are punching a hole
546 * below indirect blocks, nothing to be done.
548 if (!topdir
|| limit
<= SHMEM_NR_DIRECT
)
552 * The truncation case has already dropped info->lock, and we're safe
553 * because i_size and next_index have already been lowered, preventing
554 * access beyond. But in the punch_hole case, we still need to take
555 * the lock when updating the swap directory, because there might be
556 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
557 * shmem_writepage. However, whenever we find we can remove a whole
558 * directory page (not at the misaligned start or end of the range),
559 * we first NULLify its pointer in the level above, and then have no
560 * need to take the lock when updating its contents: needs_lock and
561 * punch_lock (either pointing to info->lock or NULL) manage this.
564 upper_limit
-= SHMEM_NR_DIRECT
;
565 limit
-= SHMEM_NR_DIRECT
;
566 idx
= (idx
> SHMEM_NR_DIRECT
)? (idx
- SHMEM_NR_DIRECT
): 0;
567 offset
= idx
% ENTRIES_PER_PAGE
;
570 dir
= shmem_dir_map(topdir
);
571 stage
= ENTRIES_PER_PAGEPAGE
/2;
572 if (idx
< ENTRIES_PER_PAGEPAGE
/2) {
574 diroff
= idx
/ENTRIES_PER_PAGE
;
576 dir
+= ENTRIES_PER_PAGE
/2;
577 dir
+= (idx
- ENTRIES_PER_PAGEPAGE
/2)/ENTRIES_PER_PAGEPAGE
;
579 stage
+= ENTRIES_PER_PAGEPAGE
;
582 diroff
= ((idx
- ENTRIES_PER_PAGEPAGE
/2) %
583 ENTRIES_PER_PAGEPAGE
) / ENTRIES_PER_PAGE
;
584 if (!diroff
&& !offset
&& upper_limit
>= stage
) {
586 spin_lock(needs_lock
);
588 spin_unlock(needs_lock
);
593 list_add(&middir
->lru
, &pages_to_free
);
595 shmem_dir_unmap(dir
);
596 dir
= shmem_dir_map(middir
);
604 for (; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, diroff
++) {
605 if (unlikely(idx
== stage
)) {
606 shmem_dir_unmap(dir
);
607 dir
= shmem_dir_map(topdir
) +
608 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
611 idx
+= ENTRIES_PER_PAGEPAGE
;
615 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
618 needs_lock
= &info
->lock
;
619 if (upper_limit
>= stage
) {
621 spin_lock(needs_lock
);
623 spin_unlock(needs_lock
);
628 list_add(&middir
->lru
, &pages_to_free
);
630 shmem_dir_unmap(dir
);
632 dir
= shmem_dir_map(middir
);
635 punch_lock
= needs_lock
;
636 subdir
= dir
[diroff
];
637 if (subdir
&& !offset
&& upper_limit
-idx
>= ENTRIES_PER_PAGE
) {
639 spin_lock(needs_lock
);
641 spin_unlock(needs_lock
);
646 list_add(&subdir
->lru
, &pages_to_free
);
648 if (subdir
&& page_private(subdir
) /* has swap entries */) {
650 if (size
> ENTRIES_PER_PAGE
)
651 size
= ENTRIES_PER_PAGE
;
652 freed
= shmem_map_and_free_swp(subdir
,
653 offset
, size
, &dir
, punch_lock
);
655 dir
= shmem_dir_map(middir
);
656 nr_swaps_freed
+= freed
;
657 if (offset
|| punch_lock
) {
658 spin_lock(&info
->lock
);
659 set_page_private(subdir
,
660 page_private(subdir
) - freed
);
661 spin_unlock(&info
->lock
);
663 BUG_ON(page_private(subdir
) != freed
);
668 shmem_dir_unmap(dir
);
670 if (inode
->i_mapping
->nrpages
&& (info
->flags
& SHMEM_PAGEIN
)) {
672 * Call truncate_inode_pages again: racing shmem_unuse_inode
673 * may have swizzled a page in from swap since vmtruncate or
674 * generic_delete_inode did it, before we lowered next_index.
675 * Also, though shmem_getpage checks i_size before adding to
676 * cache, no recheck after: so fix the narrow window there too.
678 * Recalling truncate_inode_pages_range and unmap_mapping_range
679 * every time for punch_hole (which never got a chance to clear
680 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
681 * yet hardly ever necessary: try to optimize them out later.
683 truncate_inode_pages_range(inode
->i_mapping
, start
, end
);
685 unmap_mapping_range(inode
->i_mapping
, start
,
689 spin_lock(&info
->lock
);
690 info
->flags
&= ~SHMEM_TRUNCATE
;
691 info
->swapped
-= nr_swaps_freed
;
692 if (nr_pages_to_free
)
693 shmem_free_blocks(inode
, nr_pages_to_free
);
694 shmem_recalc_inode(inode
);
695 spin_unlock(&info
->lock
);
698 * Empty swap vector directory pages to be freed?
700 if (!list_empty(&pages_to_free
)) {
701 pages_to_free
.prev
->next
= NULL
;
702 shmem_free_pages(pages_to_free
.next
);
706 static void shmem_truncate(struct inode
*inode
)
708 shmem_truncate_range(inode
, inode
->i_size
, (loff_t
)-1);
711 static int shmem_notify_change(struct dentry
*dentry
, struct iattr
*attr
)
713 struct inode
*inode
= dentry
->d_inode
;
714 struct page
*page
= NULL
;
717 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
718 if (attr
->ia_size
< inode
->i_size
) {
720 * If truncating down to a partial page, then
721 * if that page is already allocated, hold it
722 * in memory until the truncation is over, so
723 * truncate_partial_page cannnot miss it were
724 * it assigned to swap.
726 if (attr
->ia_size
& (PAGE_CACHE_SIZE
-1)) {
727 (void) shmem_getpage(inode
,
728 attr
->ia_size
>>PAGE_CACHE_SHIFT
,
729 &page
, SGP_READ
, NULL
);
732 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
733 * detect if any pages might have been added to cache
734 * after truncate_inode_pages. But we needn't bother
735 * if it's being fully truncated to zero-length: the
736 * nrpages check is efficient enough in that case.
739 struct shmem_inode_info
*info
= SHMEM_I(inode
);
740 spin_lock(&info
->lock
);
741 info
->flags
&= ~SHMEM_PAGEIN
;
742 spin_unlock(&info
->lock
);
747 error
= inode_change_ok(inode
, attr
);
749 error
= inode_setattr(inode
, attr
);
750 #ifdef CONFIG_TMPFS_POSIX_ACL
751 if (!error
&& (attr
->ia_valid
& ATTR_MODE
))
752 error
= generic_acl_chmod(inode
, &shmem_acl_ops
);
755 page_cache_release(page
);
759 static void shmem_delete_inode(struct inode
*inode
)
761 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
762 struct shmem_inode_info
*info
= SHMEM_I(inode
);
764 if (inode
->i_op
->truncate
== shmem_truncate
) {
765 truncate_inode_pages(inode
->i_mapping
, 0);
766 shmem_unacct_size(info
->flags
, inode
->i_size
);
768 shmem_truncate(inode
);
769 if (!list_empty(&info
->swaplist
)) {
770 spin_lock(&shmem_swaplist_lock
);
771 list_del_init(&info
->swaplist
);
772 spin_unlock(&shmem_swaplist_lock
);
775 BUG_ON(inode
->i_blocks
);
776 if (sbinfo
->max_inodes
) {
777 spin_lock(&sbinfo
->stat_lock
);
778 sbinfo
->free_inodes
++;
779 spin_unlock(&sbinfo
->stat_lock
);
784 static inline int shmem_find_swp(swp_entry_t entry
, swp_entry_t
*dir
, swp_entry_t
*edir
)
788 for (ptr
= dir
; ptr
< edir
; ptr
++) {
789 if (ptr
->val
== entry
.val
)
795 static int shmem_unuse_inode(struct shmem_inode_info
*info
, swp_entry_t entry
, struct page
*page
)
808 ptr
= info
->i_direct
;
809 spin_lock(&info
->lock
);
810 limit
= info
->next_index
;
812 if (size
> SHMEM_NR_DIRECT
)
813 size
= SHMEM_NR_DIRECT
;
814 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
816 shmem_swp_balance_unmap();
819 if (!info
->i_indirect
)
822 dir
= shmem_dir_map(info
->i_indirect
);
823 stage
= SHMEM_NR_DIRECT
+ ENTRIES_PER_PAGEPAGE
/2;
825 for (idx
= SHMEM_NR_DIRECT
; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, dir
++) {
826 if (unlikely(idx
== stage
)) {
827 shmem_dir_unmap(dir
-1);
828 dir
= shmem_dir_map(info
->i_indirect
) +
829 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
832 idx
+= ENTRIES_PER_PAGEPAGE
;
836 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
838 shmem_dir_unmap(dir
);
839 dir
= shmem_dir_map(subdir
);
842 if (subdir
&& page_private(subdir
)) {
843 ptr
= shmem_swp_map(subdir
);
845 if (size
> ENTRIES_PER_PAGE
)
846 size
= ENTRIES_PER_PAGE
;
847 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
849 shmem_dir_unmap(dir
);
852 shmem_swp_unmap(ptr
);
856 shmem_dir_unmap(dir
-1);
858 spin_unlock(&info
->lock
);
862 inode
= &info
->vfs_inode
;
863 if (move_from_swap_cache(page
, idx
, inode
->i_mapping
) == 0) {
864 info
->flags
|= SHMEM_PAGEIN
;
865 shmem_swp_set(info
, ptr
+ offset
, 0);
867 shmem_swp_unmap(ptr
);
868 spin_unlock(&info
->lock
);
870 * Decrement swap count even when the entry is left behind:
871 * try_to_unuse will skip over mms, then reincrement count.
878 * shmem_unuse() search for an eventually swapped out shmem page.
880 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
882 struct list_head
*p
, *next
;
883 struct shmem_inode_info
*info
;
886 spin_lock(&shmem_swaplist_lock
);
887 list_for_each_safe(p
, next
, &shmem_swaplist
) {
888 info
= list_entry(p
, struct shmem_inode_info
, swaplist
);
890 list_del_init(&info
->swaplist
);
891 else if (shmem_unuse_inode(info
, entry
, page
)) {
892 /* move head to start search for next from here */
893 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
898 spin_unlock(&shmem_swaplist_lock
);
903 * Move the page from the page cache to the swap cache.
905 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
907 struct shmem_inode_info
*info
;
908 swp_entry_t
*entry
, swap
;
909 struct address_space
*mapping
;
913 BUG_ON(!PageLocked(page
));
915 * shmem_backing_dev_info's capabilities prevent regular writeback or
916 * sync from ever calling shmem_writepage; but a stacking filesystem
917 * may use the ->writepage of its underlying filesystem, in which case
918 * we want to do nothing when that underlying filesystem is tmpfs
919 * (writing out to swap is useful as a response to memory pressure, but
920 * of no use to stabilize the data) - just redirty the page, unlock it
921 * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the
922 * page_mapped check below, must be avoided unless we're in reclaim.
924 if (!wbc
->for_reclaim
) {
925 set_page_dirty(page
);
929 BUG_ON(page_mapped(page
));
931 mapping
= page
->mapping
;
933 inode
= mapping
->host
;
934 info
= SHMEM_I(inode
);
935 if (info
->flags
& VM_LOCKED
)
937 swap
= get_swap_page();
941 spin_lock(&info
->lock
);
942 shmem_recalc_inode(inode
);
943 if (index
>= info
->next_index
) {
944 BUG_ON(!(info
->flags
& SHMEM_TRUNCATE
));
947 entry
= shmem_swp_entry(info
, index
, NULL
);
951 if (move_to_swap_cache(page
, swap
) == 0) {
952 shmem_swp_set(info
, entry
, swap
.val
);
953 shmem_swp_unmap(entry
);
954 spin_unlock(&info
->lock
);
955 if (list_empty(&info
->swaplist
)) {
956 spin_lock(&shmem_swaplist_lock
);
957 /* move instead of add in case we're racing */
958 list_move_tail(&info
->swaplist
, &shmem_swaplist
);
959 spin_unlock(&shmem_swaplist_lock
);
965 shmem_swp_unmap(entry
);
967 spin_unlock(&info
->lock
);
970 set_page_dirty(page
);
971 return AOP_WRITEPAGE_ACTIVATE
; /* Return with the page locked */
975 static inline int shmem_parse_mpol(char *value
, int *policy
, nodemask_t
*policy_nodes
)
977 char *nodelist
= strchr(value
, ':');
981 /* NUL-terminate policy string */
983 if (nodelist_parse(nodelist
, *policy_nodes
))
985 if (!nodes_subset(*policy_nodes
, node_online_map
))
988 if (!strcmp(value
, "default")) {
989 *policy
= MPOL_DEFAULT
;
990 /* Don't allow a nodelist */
993 } else if (!strcmp(value
, "prefer")) {
994 *policy
= MPOL_PREFERRED
;
995 /* Insist on a nodelist of one node only */
997 char *rest
= nodelist
;
998 while (isdigit(*rest
))
1003 } else if (!strcmp(value
, "bind")) {
1004 *policy
= MPOL_BIND
;
1005 /* Insist on a nodelist */
1008 } else if (!strcmp(value
, "interleave")) {
1009 *policy
= MPOL_INTERLEAVE
;
1010 /* Default to nodes online if no nodelist */
1012 *policy_nodes
= node_online_map
;
1016 /* Restore string for error message */
1022 static struct page
*shmem_swapin_async(struct shared_policy
*p
,
1023 swp_entry_t entry
, unsigned long idx
)
1026 struct vm_area_struct pvma
;
1028 /* Create a pseudo vma that just contains the policy */
1029 memset(&pvma
, 0, sizeof(struct vm_area_struct
));
1030 pvma
.vm_end
= PAGE_SIZE
;
1031 pvma
.vm_pgoff
= idx
;
1032 pvma
.vm_policy
= mpol_shared_policy_lookup(p
, idx
);
1033 page
= read_swap_cache_async(entry
, &pvma
, 0);
1034 mpol_free(pvma
.vm_policy
);
1038 struct page
*shmem_swapin(struct shmem_inode_info
*info
, swp_entry_t entry
,
1041 struct shared_policy
*p
= &info
->policy
;
1044 unsigned long offset
;
1046 num
= valid_swaphandles(entry
, &offset
);
1047 for (i
= 0; i
< num
; offset
++, i
++) {
1048 page
= shmem_swapin_async(p
,
1049 swp_entry(swp_type(entry
), offset
), idx
);
1052 page_cache_release(page
);
1054 lru_add_drain(); /* Push any new pages onto the LRU now */
1055 return shmem_swapin_async(p
, entry
, idx
);
1058 static struct page
*
1059 shmem_alloc_page(gfp_t gfp
, struct shmem_inode_info
*info
,
1062 struct vm_area_struct pvma
;
1065 memset(&pvma
, 0, sizeof(struct vm_area_struct
));
1066 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, idx
);
1067 pvma
.vm_pgoff
= idx
;
1068 pvma
.vm_end
= PAGE_SIZE
;
1069 page
= alloc_page_vma(gfp
, &pvma
, 0);
1070 mpol_free(pvma
.vm_policy
);
1074 static inline int shmem_parse_mpol(char *value
, int *policy
, nodemask_t
*policy_nodes
)
1079 static inline struct page
*
1080 shmem_swapin(struct shmem_inode_info
*info
,swp_entry_t entry
,unsigned long idx
)
1082 swapin_readahead(entry
, 0, NULL
);
1083 return read_swap_cache_async(entry
, NULL
, 0);
1086 static inline struct page
*
1087 shmem_alloc_page(gfp_t gfp
,struct shmem_inode_info
*info
, unsigned long idx
)
1089 return alloc_page(gfp
);
1094 * shmem_getpage - either get the page from swap or allocate a new one
1096 * If we allocate a new one we do not mark it dirty. That's up to the
1097 * vm. If we swap it in we mark it dirty since we also free the swap
1098 * entry since a page cannot live in both the swap and page cache
1100 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
1101 struct page
**pagep
, enum sgp_type sgp
, int *type
)
1103 struct address_space
*mapping
= inode
->i_mapping
;
1104 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1105 struct shmem_sb_info
*sbinfo
;
1106 struct page
*filepage
= *pagep
;
1107 struct page
*swappage
;
1112 if (idx
>= SHMEM_MAX_INDEX
)
1115 * Normally, filepage is NULL on entry, and either found
1116 * uptodate immediately, or allocated and zeroed, or read
1117 * in under swappage, which is then assigned to filepage.
1118 * But shmem_prepare_write passes in a locked filepage,
1119 * which may be found not uptodate by other callers too,
1120 * and may need to be copied from the swappage read in.
1124 filepage
= find_lock_page(mapping
, idx
);
1125 if (filepage
&& PageUptodate(filepage
))
1128 if (sgp
== SGP_QUICK
)
1131 spin_lock(&info
->lock
);
1132 shmem_recalc_inode(inode
);
1133 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1134 if (IS_ERR(entry
)) {
1135 spin_unlock(&info
->lock
);
1136 error
= PTR_ERR(entry
);
1142 /* Look it up and read it in.. */
1143 swappage
= lookup_swap_cache(swap
);
1145 shmem_swp_unmap(entry
);
1146 /* here we actually do the io */
1147 if (type
&& *type
== VM_FAULT_MINOR
) {
1148 __count_vm_event(PGMAJFAULT
);
1149 *type
= VM_FAULT_MAJOR
;
1151 spin_unlock(&info
->lock
);
1152 swappage
= shmem_swapin(info
, swap
, idx
);
1154 spin_lock(&info
->lock
);
1155 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1157 error
= PTR_ERR(entry
);
1159 if (entry
->val
== swap
.val
)
1161 shmem_swp_unmap(entry
);
1163 spin_unlock(&info
->lock
);
1168 wait_on_page_locked(swappage
);
1169 page_cache_release(swappage
);
1173 /* We have to do this with page locked to prevent races */
1174 if (TestSetPageLocked(swappage
)) {
1175 shmem_swp_unmap(entry
);
1176 spin_unlock(&info
->lock
);
1177 wait_on_page_locked(swappage
);
1178 page_cache_release(swappage
);
1181 if (PageWriteback(swappage
)) {
1182 shmem_swp_unmap(entry
);
1183 spin_unlock(&info
->lock
);
1184 wait_on_page_writeback(swappage
);
1185 unlock_page(swappage
);
1186 page_cache_release(swappage
);
1189 if (!PageUptodate(swappage
)) {
1190 shmem_swp_unmap(entry
);
1191 spin_unlock(&info
->lock
);
1192 unlock_page(swappage
);
1193 page_cache_release(swappage
);
1199 shmem_swp_set(info
, entry
, 0);
1200 shmem_swp_unmap(entry
);
1201 delete_from_swap_cache(swappage
);
1202 spin_unlock(&info
->lock
);
1203 copy_highpage(filepage
, swappage
);
1204 unlock_page(swappage
);
1205 page_cache_release(swappage
);
1206 flush_dcache_page(filepage
);
1207 SetPageUptodate(filepage
);
1208 set_page_dirty(filepage
);
1210 } else if (!(error
= move_from_swap_cache(
1211 swappage
, idx
, mapping
))) {
1212 info
->flags
|= SHMEM_PAGEIN
;
1213 shmem_swp_set(info
, entry
, 0);
1214 shmem_swp_unmap(entry
);
1215 spin_unlock(&info
->lock
);
1216 filepage
= swappage
;
1219 shmem_swp_unmap(entry
);
1220 spin_unlock(&info
->lock
);
1221 unlock_page(swappage
);
1222 page_cache_release(swappage
);
1223 if (error
== -ENOMEM
) {
1224 /* let kswapd refresh zone for GFP_ATOMICs */
1225 congestion_wait(WRITE
, HZ
/50);
1229 } else if (sgp
== SGP_READ
&& !filepage
) {
1230 shmem_swp_unmap(entry
);
1231 filepage
= find_get_page(mapping
, idx
);
1233 (!PageUptodate(filepage
) || TestSetPageLocked(filepage
))) {
1234 spin_unlock(&info
->lock
);
1235 wait_on_page_locked(filepage
);
1236 page_cache_release(filepage
);
1240 spin_unlock(&info
->lock
);
1242 shmem_swp_unmap(entry
);
1243 sbinfo
= SHMEM_SB(inode
->i_sb
);
1244 if (sbinfo
->max_blocks
) {
1245 spin_lock(&sbinfo
->stat_lock
);
1246 if (sbinfo
->free_blocks
== 0 ||
1247 shmem_acct_block(info
->flags
)) {
1248 spin_unlock(&sbinfo
->stat_lock
);
1249 spin_unlock(&info
->lock
);
1253 sbinfo
->free_blocks
--;
1254 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
1255 spin_unlock(&sbinfo
->stat_lock
);
1256 } else if (shmem_acct_block(info
->flags
)) {
1257 spin_unlock(&info
->lock
);
1263 spin_unlock(&info
->lock
);
1264 filepage
= shmem_alloc_page(mapping_gfp_mask(mapping
),
1268 shmem_unacct_blocks(info
->flags
, 1);
1269 shmem_free_blocks(inode
, 1);
1274 spin_lock(&info
->lock
);
1275 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1277 error
= PTR_ERR(entry
);
1280 shmem_swp_unmap(entry
);
1282 if (error
|| swap
.val
|| 0 != add_to_page_cache_lru(
1283 filepage
, mapping
, idx
, GFP_ATOMIC
)) {
1284 spin_unlock(&info
->lock
);
1285 page_cache_release(filepage
);
1286 shmem_unacct_blocks(info
->flags
, 1);
1287 shmem_free_blocks(inode
, 1);
1293 info
->flags
|= SHMEM_PAGEIN
;
1297 spin_unlock(&info
->lock
);
1298 clear_highpage(filepage
);
1299 flush_dcache_page(filepage
);
1300 SetPageUptodate(filepage
);
1303 if (*pagep
!= filepage
) {
1304 unlock_page(filepage
);
1310 if (*pagep
!= filepage
) {
1311 unlock_page(filepage
);
1312 page_cache_release(filepage
);
1317 static struct page
*shmem_nopage(struct vm_area_struct
*vma
,
1318 unsigned long address
, int *type
)
1320 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1321 struct page
*page
= NULL
;
1325 idx
= (address
- vma
->vm_start
) >> PAGE_SHIFT
;
1326 idx
+= vma
->vm_pgoff
;
1327 idx
>>= PAGE_CACHE_SHIFT
- PAGE_SHIFT
;
1328 if (((loff_t
) idx
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
1329 return NOPAGE_SIGBUS
;
1331 error
= shmem_getpage(inode
, idx
, &page
, SGP_CACHE
, type
);
1333 return (error
== -ENOMEM
)? NOPAGE_OOM
: NOPAGE_SIGBUS
;
1335 mark_page_accessed(page
);
1339 static int shmem_populate(struct vm_area_struct
*vma
,
1340 unsigned long addr
, unsigned long len
,
1341 pgprot_t prot
, unsigned long pgoff
, int nonblock
)
1343 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1344 struct mm_struct
*mm
= vma
->vm_mm
;
1345 enum sgp_type sgp
= nonblock
? SGP_QUICK
: SGP_CACHE
;
1348 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1349 if (pgoff
>= size
|| pgoff
+ (len
>> PAGE_SHIFT
) > size
)
1352 while ((long) len
> 0) {
1353 struct page
*page
= NULL
;
1356 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1358 err
= shmem_getpage(inode
, pgoff
, &page
, sgp
, NULL
);
1361 /* Page may still be null, but only if nonblock was set. */
1363 mark_page_accessed(page
);
1364 err
= install_page(mm
, vma
, addr
, page
, prot
);
1366 page_cache_release(page
);
1369 } else if (vma
->vm_flags
& VM_NONLINEAR
) {
1370 /* No page was found just because we can't read it in
1371 * now (being here implies nonblock != 0), but the page
1372 * may exist, so set the PTE to fault it in later. */
1373 err
= install_file_pte(mm
, vma
, addr
, pgoff
, prot
);
1386 int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
1388 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1389 return mpol_set_shared_policy(&SHMEM_I(i
)->policy
, vma
, new);
1393 shmem_get_policy(struct vm_area_struct
*vma
, unsigned long addr
)
1395 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1398 idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1399 return mpol_shared_policy_lookup(&SHMEM_I(i
)->policy
, idx
);
1403 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1405 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1406 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1407 int retval
= -ENOMEM
;
1409 spin_lock(&info
->lock
);
1410 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1411 if (!user_shm_lock(inode
->i_size
, user
))
1413 info
->flags
|= VM_LOCKED
;
1415 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1416 user_shm_unlock(inode
->i_size
, user
);
1417 info
->flags
&= ~VM_LOCKED
;
1421 spin_unlock(&info
->lock
);
1425 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1427 file_accessed(file
);
1428 vma
->vm_ops
= &shmem_vm_ops
;
1432 static struct inode
*
1433 shmem_get_inode(struct super_block
*sb
, int mode
, dev_t dev
)
1435 struct inode
*inode
;
1436 struct shmem_inode_info
*info
;
1437 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1439 if (sbinfo
->max_inodes
) {
1440 spin_lock(&sbinfo
->stat_lock
);
1441 if (!sbinfo
->free_inodes
) {
1442 spin_unlock(&sbinfo
->stat_lock
);
1445 sbinfo
->free_inodes
--;
1446 spin_unlock(&sbinfo
->stat_lock
);
1449 inode
= new_inode(sb
);
1451 inode
->i_mode
= mode
;
1452 inode
->i_uid
= current
->fsuid
;
1453 inode
->i_gid
= current
->fsgid
;
1454 inode
->i_blocks
= 0;
1455 inode
->i_mapping
->a_ops
= &shmem_aops
;
1456 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1457 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1458 inode
->i_generation
= get_seconds();
1459 info
= SHMEM_I(inode
);
1460 memset(info
, 0, (char *)inode
- (char *)info
);
1461 spin_lock_init(&info
->lock
);
1462 INIT_LIST_HEAD(&info
->swaplist
);
1464 switch (mode
& S_IFMT
) {
1466 inode
->i_op
= &shmem_special_inode_operations
;
1467 init_special_inode(inode
, mode
, dev
);
1470 inode
->i_op
= &shmem_inode_operations
;
1471 inode
->i_fop
= &shmem_file_operations
;
1472 mpol_shared_policy_init(&info
->policy
, sbinfo
->policy
,
1473 &sbinfo
->policy_nodes
);
1477 /* Some things misbehave if size == 0 on a directory */
1478 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1479 inode
->i_op
= &shmem_dir_inode_operations
;
1480 inode
->i_fop
= &simple_dir_operations
;
1484 * Must not load anything in the rbtree,
1485 * mpol_free_shared_policy will not be called.
1487 mpol_shared_policy_init(&info
->policy
, MPOL_DEFAULT
,
1491 } else if (sbinfo
->max_inodes
) {
1492 spin_lock(&sbinfo
->stat_lock
);
1493 sbinfo
->free_inodes
++;
1494 spin_unlock(&sbinfo
->stat_lock
);
1500 static const struct inode_operations shmem_symlink_inode_operations
;
1501 static const struct inode_operations shmem_symlink_inline_operations
;
1504 * Normally tmpfs makes no use of shmem_prepare_write, but it
1505 * lets a tmpfs file be used read-write below the loop driver.
1508 shmem_prepare_write(struct file
*file
, struct page
*page
, unsigned offset
, unsigned to
)
1510 struct inode
*inode
= page
->mapping
->host
;
1511 return shmem_getpage(inode
, page
->index
, &page
, SGP_WRITE
, NULL
);
1515 shmem_file_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
1517 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1519 unsigned long written
;
1522 if ((ssize_t
) count
< 0)
1525 if (!access_ok(VERIFY_READ
, buf
, count
))
1528 mutex_lock(&inode
->i_mutex
);
1533 err
= generic_write_checks(file
, &pos
, &count
, 0);
1537 err
= remove_suid(file
->f_path
.dentry
);
1541 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
1544 struct page
*page
= NULL
;
1545 unsigned long bytes
, index
, offset
;
1549 offset
= (pos
& (PAGE_CACHE_SIZE
-1)); /* Within page */
1550 index
= pos
>> PAGE_CACHE_SHIFT
;
1551 bytes
= PAGE_CACHE_SIZE
- offset
;
1556 * We don't hold page lock across copy from user -
1557 * what would it guard against? - so no deadlock here.
1558 * But it still may be a good idea to prefault below.
1561 err
= shmem_getpage(inode
, index
, &page
, SGP_WRITE
, NULL
);
1566 if (PageHighMem(page
)) {
1567 volatile unsigned char dummy
;
1568 __get_user(dummy
, buf
);
1569 __get_user(dummy
, buf
+ bytes
- 1);
1571 kaddr
= kmap_atomic(page
, KM_USER0
);
1572 left
= __copy_from_user_inatomic(kaddr
+ offset
,
1574 kunmap_atomic(kaddr
, KM_USER0
);
1578 left
= __copy_from_user(kaddr
+ offset
, buf
, bytes
);
1586 if (pos
> inode
->i_size
)
1587 i_size_write(inode
, pos
);
1589 flush_dcache_page(page
);
1590 set_page_dirty(page
);
1591 mark_page_accessed(page
);
1592 page_cache_release(page
);
1602 * Our dirty pages are not counted in nr_dirty,
1603 * and we do not attempt to balance dirty pages.
1613 mutex_unlock(&inode
->i_mutex
);
1617 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1619 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1620 struct address_space
*mapping
= inode
->i_mapping
;
1621 unsigned long index
, offset
;
1623 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1624 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1627 struct page
*page
= NULL
;
1628 unsigned long end_index
, nr
, ret
;
1629 loff_t i_size
= i_size_read(inode
);
1631 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1632 if (index
> end_index
)
1634 if (index
== end_index
) {
1635 nr
= i_size
& ~PAGE_CACHE_MASK
;
1640 desc
->error
= shmem_getpage(inode
, index
, &page
, SGP_READ
, NULL
);
1642 if (desc
->error
== -EINVAL
)
1648 * We must evaluate after, since reads (unlike writes)
1649 * are called without i_mutex protection against truncate
1651 nr
= PAGE_CACHE_SIZE
;
1652 i_size
= i_size_read(inode
);
1653 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1654 if (index
== end_index
) {
1655 nr
= i_size
& ~PAGE_CACHE_MASK
;
1658 page_cache_release(page
);
1666 * If users can be writing to this page using arbitrary
1667 * virtual addresses, take care about potential aliasing
1668 * before reading the page on the kernel side.
1670 if (mapping_writably_mapped(mapping
))
1671 flush_dcache_page(page
);
1673 * Mark the page accessed if we read the beginning.
1676 mark_page_accessed(page
);
1678 page
= ZERO_PAGE(0);
1679 page_cache_get(page
);
1683 * Ok, we have the page, and it's up-to-date, so
1684 * now we can copy it to user space...
1686 * The actor routine returns how many bytes were actually used..
1687 * NOTE! This may not be the same as how much of a user buffer
1688 * we filled up (we may be padding etc), so we can only update
1689 * "pos" here (the actor routine has to update the user buffer
1690 * pointers and the remaining count).
1692 ret
= actor(desc
, page
, offset
, nr
);
1694 index
+= offset
>> PAGE_CACHE_SHIFT
;
1695 offset
&= ~PAGE_CACHE_MASK
;
1697 page_cache_release(page
);
1698 if (ret
!= nr
|| !desc
->count
)
1704 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1705 file_accessed(filp
);
1708 static ssize_t
shmem_file_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*ppos
)
1710 read_descriptor_t desc
;
1712 if ((ssize_t
) count
< 0)
1714 if (!access_ok(VERIFY_WRITE
, buf
, count
))
1724 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1726 return desc
.written
;
1730 static ssize_t
shmem_file_sendfile(struct file
*in_file
, loff_t
*ppos
,
1731 size_t count
, read_actor_t actor
, void *target
)
1733 read_descriptor_t desc
;
1740 desc
.arg
.data
= target
;
1743 do_shmem_file_read(in_file
, ppos
, &desc
, actor
);
1745 return desc
.written
;
1749 static int shmem_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1751 struct shmem_sb_info
*sbinfo
= SHMEM_SB(dentry
->d_sb
);
1753 buf
->f_type
= TMPFS_MAGIC
;
1754 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1755 buf
->f_namelen
= NAME_MAX
;
1756 spin_lock(&sbinfo
->stat_lock
);
1757 if (sbinfo
->max_blocks
) {
1758 buf
->f_blocks
= sbinfo
->max_blocks
;
1759 buf
->f_bavail
= buf
->f_bfree
= sbinfo
->free_blocks
;
1761 if (sbinfo
->max_inodes
) {
1762 buf
->f_files
= sbinfo
->max_inodes
;
1763 buf
->f_ffree
= sbinfo
->free_inodes
;
1765 /* else leave those fields 0 like simple_statfs */
1766 spin_unlock(&sbinfo
->stat_lock
);
1771 * File creation. Allocate an inode, and we're done..
1774 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1776 struct inode
*inode
= shmem_get_inode(dir
->i_sb
, mode
, dev
);
1777 int error
= -ENOSPC
;
1780 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1783 if (error
!= -EOPNOTSUPP
) {
1788 error
= shmem_acl_init(inode
, dir
);
1793 if (dir
->i_mode
& S_ISGID
) {
1794 inode
->i_gid
= dir
->i_gid
;
1796 inode
->i_mode
|= S_ISGID
;
1798 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1799 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1800 d_instantiate(dentry
, inode
);
1801 dget(dentry
); /* Extra count - pin the dentry in core */
1806 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1810 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1816 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1817 struct nameidata
*nd
)
1819 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1825 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1827 struct inode
*inode
= old_dentry
->d_inode
;
1828 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
1831 * No ordinary (disk based) filesystem counts links as inodes;
1832 * but each new link needs a new dentry, pinning lowmem, and
1833 * tmpfs dentries cannot be pruned until they are unlinked.
1835 if (sbinfo
->max_inodes
) {
1836 spin_lock(&sbinfo
->stat_lock
);
1837 if (!sbinfo
->free_inodes
) {
1838 spin_unlock(&sbinfo
->stat_lock
);
1841 sbinfo
->free_inodes
--;
1842 spin_unlock(&sbinfo
->stat_lock
);
1845 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1846 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1848 atomic_inc(&inode
->i_count
); /* New dentry reference */
1849 dget(dentry
); /* Extra pinning count for the created dentry */
1850 d_instantiate(dentry
, inode
);
1854 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
1856 struct inode
*inode
= dentry
->d_inode
;
1858 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
)) {
1859 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
1860 if (sbinfo
->max_inodes
) {
1861 spin_lock(&sbinfo
->stat_lock
);
1862 sbinfo
->free_inodes
++;
1863 spin_unlock(&sbinfo
->stat_lock
);
1867 dir
->i_size
-= BOGO_DIRENT_SIZE
;
1868 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1870 dput(dentry
); /* Undo the count from "create" - this does all the work */
1874 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
1876 if (!simple_empty(dentry
))
1879 drop_nlink(dentry
->d_inode
);
1881 return shmem_unlink(dir
, dentry
);
1885 * The VFS layer already does all the dentry stuff for rename,
1886 * we just have to decrement the usage count for the target if
1887 * it exists so that the VFS layer correctly free's it when it
1890 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
1892 struct inode
*inode
= old_dentry
->d_inode
;
1893 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
1895 if (!simple_empty(new_dentry
))
1898 if (new_dentry
->d_inode
) {
1899 (void) shmem_unlink(new_dir
, new_dentry
);
1901 drop_nlink(old_dir
);
1902 } else if (they_are_dirs
) {
1903 drop_nlink(old_dir
);
1907 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
1908 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
1909 old_dir
->i_ctime
= old_dir
->i_mtime
=
1910 new_dir
->i_ctime
= new_dir
->i_mtime
=
1911 inode
->i_ctime
= CURRENT_TIME
;
1915 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
1919 struct inode
*inode
;
1920 struct page
*page
= NULL
;
1922 struct shmem_inode_info
*info
;
1924 len
= strlen(symname
) + 1;
1925 if (len
> PAGE_CACHE_SIZE
)
1926 return -ENAMETOOLONG
;
1928 inode
= shmem_get_inode(dir
->i_sb
, S_IFLNK
|S_IRWXUGO
, 0);
1932 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1935 if (error
!= -EOPNOTSUPP
) {
1942 info
= SHMEM_I(inode
);
1943 inode
->i_size
= len
-1;
1944 if (len
<= (char *)inode
- (char *)info
) {
1946 memcpy(info
, symname
, len
);
1947 inode
->i_op
= &shmem_symlink_inline_operations
;
1949 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
1954 inode
->i_op
= &shmem_symlink_inode_operations
;
1955 kaddr
= kmap_atomic(page
, KM_USER0
);
1956 memcpy(kaddr
, symname
, len
);
1957 kunmap_atomic(kaddr
, KM_USER0
);
1958 set_page_dirty(page
);
1959 page_cache_release(page
);
1961 if (dir
->i_mode
& S_ISGID
)
1962 inode
->i_gid
= dir
->i_gid
;
1963 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1964 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1965 d_instantiate(dentry
, inode
);
1970 static void *shmem_follow_link_inline(struct dentry
*dentry
, struct nameidata
*nd
)
1972 nd_set_link(nd
, (char *)SHMEM_I(dentry
->d_inode
));
1976 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1978 struct page
*page
= NULL
;
1979 int res
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
1980 nd_set_link(nd
, res
? ERR_PTR(res
) : kmap(page
));
1984 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
1986 if (!IS_ERR(nd_get_link(nd
))) {
1987 struct page
*page
= cookie
;
1989 mark_page_accessed(page
);
1990 page_cache_release(page
);
1994 static const struct inode_operations shmem_symlink_inline_operations
= {
1995 .readlink
= generic_readlink
,
1996 .follow_link
= shmem_follow_link_inline
,
1999 static const struct inode_operations shmem_symlink_inode_operations
= {
2000 .truncate
= shmem_truncate
,
2001 .readlink
= generic_readlink
,
2002 .follow_link
= shmem_follow_link
,
2003 .put_link
= shmem_put_link
,
2006 #ifdef CONFIG_TMPFS_POSIX_ACL
2008 * Superblocks without xattr inode operations will get security.* xattr
2009 * support from the VFS "for free". As soon as we have any other xattrs
2010 * like ACLs, we also need to implement the security.* handlers at
2011 * filesystem level, though.
2014 static size_t shmem_xattr_security_list(struct inode
*inode
, char *list
,
2015 size_t list_len
, const char *name
,
2018 return security_inode_listsecurity(inode
, list
, list_len
);
2021 static int shmem_xattr_security_get(struct inode
*inode
, const char *name
,
2022 void *buffer
, size_t size
)
2024 if (strcmp(name
, "") == 0)
2026 return security_inode_getsecurity(inode
, name
, buffer
, size
,
2030 static int shmem_xattr_security_set(struct inode
*inode
, const char *name
,
2031 const void *value
, size_t size
, int flags
)
2033 if (strcmp(name
, "") == 0)
2035 return security_inode_setsecurity(inode
, name
, value
, size
, flags
);
2038 static struct xattr_handler shmem_xattr_security_handler
= {
2039 .prefix
= XATTR_SECURITY_PREFIX
,
2040 .list
= shmem_xattr_security_list
,
2041 .get
= shmem_xattr_security_get
,
2042 .set
= shmem_xattr_security_set
,
2045 static struct xattr_handler
*shmem_xattr_handlers
[] = {
2046 &shmem_xattr_acl_access_handler
,
2047 &shmem_xattr_acl_default_handler
,
2048 &shmem_xattr_security_handler
,
2053 static struct dentry
*shmem_get_parent(struct dentry
*child
)
2055 return ERR_PTR(-ESTALE
);
2058 static int shmem_match(struct inode
*ino
, void *vfh
)
2062 inum
= (inum
<< 32) | fh
[1];
2063 return ino
->i_ino
== inum
&& fh
[0] == ino
->i_generation
;
2066 static struct dentry
*shmem_get_dentry(struct super_block
*sb
, void *vfh
)
2068 struct dentry
*de
= NULL
;
2069 struct inode
*inode
;
2072 inum
= (inum
<< 32) | fh
[1];
2074 inode
= ilookup5(sb
, (unsigned long)(inum
+fh
[0]), shmem_match
, vfh
);
2076 de
= d_find_alias(inode
);
2080 return de
? de
: ERR_PTR(-ESTALE
);
2083 static struct dentry
*shmem_decode_fh(struct super_block
*sb
, __u32
*fh
,
2085 int (*acceptable
)(void *context
, struct dentry
*de
),
2089 return ERR_PTR(-ESTALE
);
2091 return sb
->s_export_op
->find_exported_dentry(sb
, fh
, NULL
, acceptable
,
2095 static int shmem_encode_fh(struct dentry
*dentry
, __u32
*fh
, int *len
,
2098 struct inode
*inode
= dentry
->d_inode
;
2103 if (hlist_unhashed(&inode
->i_hash
)) {
2104 /* Unfortunately insert_inode_hash is not idempotent,
2105 * so as we hash inodes here rather than at creation
2106 * time, we need a lock to ensure we only try
2109 static DEFINE_SPINLOCK(lock
);
2111 if (hlist_unhashed(&inode
->i_hash
))
2112 __insert_inode_hash(inode
,
2113 inode
->i_ino
+ inode
->i_generation
);
2117 fh
[0] = inode
->i_generation
;
2118 fh
[1] = inode
->i_ino
;
2119 fh
[2] = ((__u64
)inode
->i_ino
) >> 32;
2125 static struct export_operations shmem_export_ops
= {
2126 .get_parent
= shmem_get_parent
,
2127 .get_dentry
= shmem_get_dentry
,
2128 .encode_fh
= shmem_encode_fh
,
2129 .decode_fh
= shmem_decode_fh
,
2132 static int shmem_parse_options(char *options
, int *mode
, uid_t
*uid
,
2133 gid_t
*gid
, unsigned long *blocks
, unsigned long *inodes
,
2134 int *policy
, nodemask_t
*policy_nodes
)
2136 char *this_char
, *value
, *rest
;
2138 while (options
!= NULL
) {
2139 this_char
= options
;
2142 * NUL-terminate this option: unfortunately,
2143 * mount options form a comma-separated list,
2144 * but mpol's nodelist may also contain commas.
2146 options
= strchr(options
, ',');
2147 if (options
== NULL
)
2150 if (!isdigit(*options
)) {
2157 if ((value
= strchr(this_char
,'=')) != NULL
) {
2161 "tmpfs: No value for mount option '%s'\n",
2166 if (!strcmp(this_char
,"size")) {
2167 unsigned long long size
;
2168 size
= memparse(value
,&rest
);
2170 size
<<= PAGE_SHIFT
;
2171 size
*= totalram_pages
;
2177 *blocks
= size
>> PAGE_CACHE_SHIFT
;
2178 } else if (!strcmp(this_char
,"nr_blocks")) {
2179 *blocks
= memparse(value
,&rest
);
2182 } else if (!strcmp(this_char
,"nr_inodes")) {
2183 *inodes
= memparse(value
,&rest
);
2186 } else if (!strcmp(this_char
,"mode")) {
2189 *mode
= simple_strtoul(value
,&rest
,8);
2192 } else if (!strcmp(this_char
,"uid")) {
2195 *uid
= simple_strtoul(value
,&rest
,0);
2198 } else if (!strcmp(this_char
,"gid")) {
2201 *gid
= simple_strtoul(value
,&rest
,0);
2204 } else if (!strcmp(this_char
,"mpol")) {
2205 if (shmem_parse_mpol(value
,policy
,policy_nodes
))
2208 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
2216 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
2222 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2224 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2225 unsigned long max_blocks
= sbinfo
->max_blocks
;
2226 unsigned long max_inodes
= sbinfo
->max_inodes
;
2227 int policy
= sbinfo
->policy
;
2228 nodemask_t policy_nodes
= sbinfo
->policy_nodes
;
2229 unsigned long blocks
;
2230 unsigned long inodes
;
2231 int error
= -EINVAL
;
2233 if (shmem_parse_options(data
, NULL
, NULL
, NULL
, &max_blocks
,
2234 &max_inodes
, &policy
, &policy_nodes
))
2237 spin_lock(&sbinfo
->stat_lock
);
2238 blocks
= sbinfo
->max_blocks
- sbinfo
->free_blocks
;
2239 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
2240 if (max_blocks
< blocks
)
2242 if (max_inodes
< inodes
)
2245 * Those tests also disallow limited->unlimited while any are in
2246 * use, so i_blocks will always be zero when max_blocks is zero;
2247 * but we must separately disallow unlimited->limited, because
2248 * in that case we have no record of how much is already in use.
2250 if (max_blocks
&& !sbinfo
->max_blocks
)
2252 if (max_inodes
&& !sbinfo
->max_inodes
)
2256 sbinfo
->max_blocks
= max_blocks
;
2257 sbinfo
->free_blocks
= max_blocks
- blocks
;
2258 sbinfo
->max_inodes
= max_inodes
;
2259 sbinfo
->free_inodes
= max_inodes
- inodes
;
2260 sbinfo
->policy
= policy
;
2261 sbinfo
->policy_nodes
= policy_nodes
;
2263 spin_unlock(&sbinfo
->stat_lock
);
2268 static void shmem_put_super(struct super_block
*sb
)
2270 kfree(sb
->s_fs_info
);
2271 sb
->s_fs_info
= NULL
;
2274 static int shmem_fill_super(struct super_block
*sb
,
2275 void *data
, int silent
)
2277 struct inode
*inode
;
2278 struct dentry
*root
;
2279 int mode
= S_IRWXUGO
| S_ISVTX
;
2280 uid_t uid
= current
->fsuid
;
2281 gid_t gid
= current
->fsgid
;
2283 struct shmem_sb_info
*sbinfo
;
2284 unsigned long blocks
= 0;
2285 unsigned long inodes
= 0;
2286 int policy
= MPOL_DEFAULT
;
2287 nodemask_t policy_nodes
= node_online_map
;
2291 * Per default we only allow half of the physical ram per
2292 * tmpfs instance, limiting inodes to one per page of lowmem;
2293 * but the internal instance is left unlimited.
2295 if (!(sb
->s_flags
& MS_NOUSER
)) {
2296 blocks
= totalram_pages
/ 2;
2297 inodes
= totalram_pages
- totalhigh_pages
;
2298 if (inodes
> blocks
)
2300 if (shmem_parse_options(data
, &mode
, &uid
, &gid
, &blocks
,
2301 &inodes
, &policy
, &policy_nodes
))
2304 sb
->s_export_op
= &shmem_export_ops
;
2306 sb
->s_flags
|= MS_NOUSER
;
2309 /* Round up to L1_CACHE_BYTES to resist false sharing */
2310 sbinfo
= kmalloc(max((int)sizeof(struct shmem_sb_info
),
2311 L1_CACHE_BYTES
), GFP_KERNEL
);
2315 spin_lock_init(&sbinfo
->stat_lock
);
2316 sbinfo
->max_blocks
= blocks
;
2317 sbinfo
->free_blocks
= blocks
;
2318 sbinfo
->max_inodes
= inodes
;
2319 sbinfo
->free_inodes
= inodes
;
2320 sbinfo
->policy
= policy
;
2321 sbinfo
->policy_nodes
= policy_nodes
;
2323 sb
->s_fs_info
= sbinfo
;
2324 sb
->s_maxbytes
= SHMEM_MAX_BYTES
;
2325 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
2326 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
2327 sb
->s_magic
= TMPFS_MAGIC
;
2328 sb
->s_op
= &shmem_ops
;
2329 sb
->s_time_gran
= 1;
2330 #ifdef CONFIG_TMPFS_POSIX_ACL
2331 sb
->s_xattr
= shmem_xattr_handlers
;
2332 sb
->s_flags
|= MS_POSIXACL
;
2335 inode
= shmem_get_inode(sb
, S_IFDIR
| mode
, 0);
2340 root
= d_alloc_root(inode
);
2349 shmem_put_super(sb
);
2353 static struct kmem_cache
*shmem_inode_cachep
;
2355 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2357 struct shmem_inode_info
*p
;
2358 p
= (struct shmem_inode_info
*)kmem_cache_alloc(shmem_inode_cachep
, GFP_KERNEL
);
2361 return &p
->vfs_inode
;
2364 static void shmem_destroy_inode(struct inode
*inode
)
2366 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
) {
2367 /* only struct inode is valid if it's an inline symlink */
2368 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2370 shmem_acl_destroy_inode(inode
);
2371 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2374 static void init_once(void *foo
, struct kmem_cache
*cachep
,
2375 unsigned long flags
)
2377 struct shmem_inode_info
*p
= (struct shmem_inode_info
*) foo
;
2379 inode_init_once(&p
->vfs_inode
);
2380 #ifdef CONFIG_TMPFS_POSIX_ACL
2382 p
->i_default_acl
= NULL
;
2386 static int init_inodecache(void)
2388 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2389 sizeof(struct shmem_inode_info
),
2390 0, 0, init_once
, NULL
);
2391 if (shmem_inode_cachep
== NULL
)
2396 static void destroy_inodecache(void)
2398 kmem_cache_destroy(shmem_inode_cachep
);
2401 static const struct address_space_operations shmem_aops
= {
2402 .writepage
= shmem_writepage
,
2403 .set_page_dirty
= __set_page_dirty_no_writeback
,
2405 .prepare_write
= shmem_prepare_write
,
2406 .commit_write
= simple_commit_write
,
2408 .migratepage
= migrate_page
,
2411 static const struct file_operations shmem_file_operations
= {
2414 .llseek
= generic_file_llseek
,
2415 .read
= shmem_file_read
,
2416 .write
= shmem_file_write
,
2417 .fsync
= simple_sync_file
,
2418 .sendfile
= shmem_file_sendfile
,
2422 static const struct inode_operations shmem_inode_operations
= {
2423 .truncate
= shmem_truncate
,
2424 .setattr
= shmem_notify_change
,
2425 .truncate_range
= shmem_truncate_range
,
2426 #ifdef CONFIG_TMPFS_POSIX_ACL
2427 .setxattr
= generic_setxattr
,
2428 .getxattr
= generic_getxattr
,
2429 .listxattr
= generic_listxattr
,
2430 .removexattr
= generic_removexattr
,
2431 .permission
= shmem_permission
,
2436 static const struct inode_operations shmem_dir_inode_operations
= {
2438 .create
= shmem_create
,
2439 .lookup
= simple_lookup
,
2441 .unlink
= shmem_unlink
,
2442 .symlink
= shmem_symlink
,
2443 .mkdir
= shmem_mkdir
,
2444 .rmdir
= shmem_rmdir
,
2445 .mknod
= shmem_mknod
,
2446 .rename
= shmem_rename
,
2448 #ifdef CONFIG_TMPFS_POSIX_ACL
2449 .setattr
= shmem_notify_change
,
2450 .setxattr
= generic_setxattr
,
2451 .getxattr
= generic_getxattr
,
2452 .listxattr
= generic_listxattr
,
2453 .removexattr
= generic_removexattr
,
2454 .permission
= shmem_permission
,
2458 static const struct inode_operations shmem_special_inode_operations
= {
2459 #ifdef CONFIG_TMPFS_POSIX_ACL
2460 .setattr
= shmem_notify_change
,
2461 .setxattr
= generic_setxattr
,
2462 .getxattr
= generic_getxattr
,
2463 .listxattr
= generic_listxattr
,
2464 .removexattr
= generic_removexattr
,
2465 .permission
= shmem_permission
,
2469 static const struct super_operations shmem_ops
= {
2470 .alloc_inode
= shmem_alloc_inode
,
2471 .destroy_inode
= shmem_destroy_inode
,
2473 .statfs
= shmem_statfs
,
2474 .remount_fs
= shmem_remount_fs
,
2476 .delete_inode
= shmem_delete_inode
,
2477 .drop_inode
= generic_delete_inode
,
2478 .put_super
= shmem_put_super
,
2481 static struct vm_operations_struct shmem_vm_ops
= {
2482 .nopage
= shmem_nopage
,
2483 .populate
= shmem_populate
,
2485 .set_policy
= shmem_set_policy
,
2486 .get_policy
= shmem_get_policy
,
2491 static int shmem_get_sb(struct file_system_type
*fs_type
,
2492 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
2494 return get_sb_nodev(fs_type
, flags
, data
, shmem_fill_super
, mnt
);
2497 static struct file_system_type tmpfs_fs_type
= {
2498 .owner
= THIS_MODULE
,
2500 .get_sb
= shmem_get_sb
,
2501 .kill_sb
= kill_litter_super
,
2503 static struct vfsmount
*shm_mnt
;
2505 static int __init
init_tmpfs(void)
2509 error
= init_inodecache();
2513 error
= register_filesystem(&tmpfs_fs_type
);
2515 printk(KERN_ERR
"Could not register tmpfs\n");
2519 shm_mnt
= vfs_kern_mount(&tmpfs_fs_type
, MS_NOUSER
,
2520 tmpfs_fs_type
.name
, NULL
);
2521 if (IS_ERR(shm_mnt
)) {
2522 error
= PTR_ERR(shm_mnt
);
2523 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2529 unregister_filesystem(&tmpfs_fs_type
);
2531 destroy_inodecache();
2533 shm_mnt
= ERR_PTR(error
);
2536 module_init(init_tmpfs
)
2539 * shmem_file_setup - get an unlinked file living in tmpfs
2541 * @name: name for dentry (to be seen in /proc/<pid>/maps
2542 * @size: size to be set for the file
2545 struct file
*shmem_file_setup(char *name
, loff_t size
, unsigned long flags
)
2549 struct inode
*inode
;
2550 struct dentry
*dentry
, *root
;
2553 if (IS_ERR(shm_mnt
))
2554 return (void *)shm_mnt
;
2556 if (size
< 0 || size
> SHMEM_MAX_BYTES
)
2557 return ERR_PTR(-EINVAL
);
2559 if (shmem_acct_size(flags
, size
))
2560 return ERR_PTR(-ENOMEM
);
2564 this.len
= strlen(name
);
2565 this.hash
= 0; /* will go */
2566 root
= shm_mnt
->mnt_root
;
2567 dentry
= d_alloc(root
, &this);
2572 file
= get_empty_filp();
2577 inode
= shmem_get_inode(root
->d_sb
, S_IFREG
| S_IRWXUGO
, 0);
2581 SHMEM_I(inode
)->flags
= flags
& VM_ACCOUNT
;
2582 d_instantiate(dentry
, inode
);
2583 inode
->i_size
= size
;
2584 inode
->i_nlink
= 0; /* It is unlinked */
2585 file
->f_path
.mnt
= mntget(shm_mnt
);
2586 file
->f_path
.dentry
= dentry
;
2587 file
->f_mapping
= inode
->i_mapping
;
2588 file
->f_op
= &shmem_file_operations
;
2589 file
->f_mode
= FMODE_WRITE
| FMODE_READ
;
2597 shmem_unacct_size(flags
, size
);
2598 return ERR_PTR(error
);
2602 * shmem_zero_setup - setup a shared anonymous mapping
2604 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2606 int shmem_zero_setup(struct vm_area_struct
*vma
)
2609 loff_t size
= vma
->vm_end
- vma
->vm_start
;
2611 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
2613 return PTR_ERR(file
);
2617 vma
->vm_file
= file
;
2618 vma
->vm_ops
= &shmem_vm_ops
;