2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 * This file is released under the GPL.
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
26 #include <linux/config.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/devfs_fs_kernel.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/swap.h>
35 #include <linux/pagemap.h>
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/backing-dev.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/mount.h>
41 #include <linux/writeback.h>
42 #include <linux/vfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/security.h>
45 #include <linux/swapops.h>
46 #include <linux/mempolicy.h>
47 #include <linux/namei.h>
48 #include <linux/xattr.h>
49 #include <asm/uaccess.h>
50 #include <asm/div64.h>
51 #include <asm/pgtable.h>
53 /* This magic number is used in glibc for posix shared memory */
54 #define TMPFS_MAGIC 0x01021994
56 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
57 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
58 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
60 #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
61 #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
63 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
65 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
66 #define SHMEM_PAGEIN VM_READ
67 #define SHMEM_TRUNCATE VM_WRITE
69 /* Definition to limit shmem_truncate's steps between cond_rescheds */
70 #define LATENCY_LIMIT 64
72 /* Pretend that each entry is of this size in directory's i_size */
73 #define BOGO_DIRENT_SIZE 20
75 /* Keep swapped page count in private field of indirect struct page */
76 #define nr_swapped private
78 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
80 SGP_QUICK
, /* don't try more than file page cache lookup */
81 SGP_READ
, /* don't exceed i_size, don't allocate page */
82 SGP_CACHE
, /* don't exceed i_size, may allocate page */
83 SGP_WRITE
, /* may exceed i_size, may allocate page */
86 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
87 struct page
**pagep
, enum sgp_type sgp
, int *type
);
89 static inline struct page
*shmem_dir_alloc(unsigned int gfp_mask
)
92 * The above definition of ENTRIES_PER_PAGE, and the use of
93 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
94 * might be reconsidered if it ever diverges from PAGE_SIZE.
96 return alloc_pages(gfp_mask
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
99 static inline void shmem_dir_free(struct page
*page
)
101 __free_pages(page
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
104 static struct page
**shmem_dir_map(struct page
*page
)
106 return (struct page
**)kmap_atomic(page
, KM_USER0
);
109 static inline void shmem_dir_unmap(struct page
**dir
)
111 kunmap_atomic(dir
, KM_USER0
);
114 static swp_entry_t
*shmem_swp_map(struct page
*page
)
116 return (swp_entry_t
*)kmap_atomic(page
, KM_USER1
);
119 static inline void shmem_swp_balance_unmap(void)
122 * When passing a pointer to an i_direct entry, to code which
123 * also handles indirect entries and so will shmem_swp_unmap,
124 * we must arrange for the preempt count to remain in balance.
125 * What kmap_atomic of a lowmem page does depends on config
126 * and architecture, so pretend to kmap_atomic some lowmem page.
128 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1
);
131 static inline void shmem_swp_unmap(swp_entry_t
*entry
)
133 kunmap_atomic(entry
, KM_USER1
);
136 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
138 return sb
->s_fs_info
;
142 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
143 * for shared memory and for shared anonymous (/dev/zero) mappings
144 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
145 * consistent with the pre-accounting of private mappings ...
147 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
149 return (flags
& VM_ACCOUNT
)?
150 security_vm_enough_memory(VM_ACCT(size
)): 0;
153 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
155 if (flags
& VM_ACCOUNT
)
156 vm_unacct_memory(VM_ACCT(size
));
160 * ... whereas tmpfs objects are accounted incrementally as
161 * pages are allocated, in order to allow huge sparse files.
162 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
163 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
165 static inline int shmem_acct_block(unsigned long flags
)
167 return (flags
& VM_ACCOUNT
)?
168 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE
));
171 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
173 if (!(flags
& VM_ACCOUNT
))
174 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
177 static struct super_operations shmem_ops
;
178 static struct address_space_operations shmem_aops
;
179 static struct file_operations shmem_file_operations
;
180 static struct inode_operations shmem_inode_operations
;
181 static struct inode_operations shmem_dir_inode_operations
;
182 static struct inode_operations shmem_special_inode_operations
;
183 static struct vm_operations_struct shmem_vm_ops
;
185 static struct backing_dev_info shmem_backing_dev_info
= {
186 .ra_pages
= 0, /* No readahead */
187 .capabilities
= BDI_CAP_NO_ACCT_DIRTY
| BDI_CAP_NO_WRITEBACK
,
188 .unplug_io_fn
= default_unplug_io_fn
,
191 static LIST_HEAD(shmem_swaplist
);
192 static DEFINE_SPINLOCK(shmem_swaplist_lock
);
194 static void shmem_free_blocks(struct inode
*inode
, long pages
)
196 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
197 if (sbinfo
->max_blocks
) {
198 spin_lock(&sbinfo
->stat_lock
);
199 sbinfo
->free_blocks
+= pages
;
200 inode
->i_blocks
-= pages
*BLOCKS_PER_PAGE
;
201 spin_unlock(&sbinfo
->stat_lock
);
206 * shmem_recalc_inode - recalculate the size of an inode
208 * @inode: inode to recalc
210 * We have to calculate the free blocks since the mm can drop
211 * undirtied hole pages behind our back.
213 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
214 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
216 * It has to be called with the spinlock held.
218 static void shmem_recalc_inode(struct inode
*inode
)
220 struct shmem_inode_info
*info
= SHMEM_I(inode
);
223 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
225 info
->alloced
-= freed
;
226 shmem_unacct_blocks(info
->flags
, freed
);
227 shmem_free_blocks(inode
, freed
);
232 * shmem_swp_entry - find the swap vector position in the info structure
234 * @info: info structure for the inode
235 * @index: index of the page to find
236 * @page: optional page to add to the structure. Has to be preset to
239 * If there is no space allocated yet it will return NULL when
240 * page is NULL, else it will use the page for the needed block,
241 * setting it to NULL on return to indicate that it has been used.
243 * The swap vector is organized the following way:
245 * There are SHMEM_NR_DIRECT entries directly stored in the
246 * shmem_inode_info structure. So small files do not need an addional
249 * For pages with index > SHMEM_NR_DIRECT there is the pointer
250 * i_indirect which points to a page which holds in the first half
251 * doubly indirect blocks, in the second half triple indirect blocks:
253 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
254 * following layout (for SHMEM_NR_DIRECT == 16):
256 * i_indirect -> dir --> 16-19
269 static swp_entry_t
*shmem_swp_entry(struct shmem_inode_info
*info
, unsigned long index
, struct page
**page
)
271 unsigned long offset
;
275 if (index
< SHMEM_NR_DIRECT
) {
276 shmem_swp_balance_unmap();
277 return info
->i_direct
+index
;
279 if (!info
->i_indirect
) {
281 info
->i_indirect
= *page
;
284 return NULL
; /* need another page */
287 index
-= SHMEM_NR_DIRECT
;
288 offset
= index
% ENTRIES_PER_PAGE
;
289 index
/= ENTRIES_PER_PAGE
;
290 dir
= shmem_dir_map(info
->i_indirect
);
292 if (index
>= ENTRIES_PER_PAGE
/2) {
293 index
-= ENTRIES_PER_PAGE
/2;
294 dir
+= ENTRIES_PER_PAGE
/2 + index
/ENTRIES_PER_PAGE
;
295 index
%= ENTRIES_PER_PAGE
;
302 shmem_dir_unmap(dir
);
303 return NULL
; /* need another page */
305 shmem_dir_unmap(dir
);
306 dir
= shmem_dir_map(subdir
);
312 if (!page
|| !(subdir
= *page
)) {
313 shmem_dir_unmap(dir
);
314 return NULL
; /* need a page */
319 shmem_dir_unmap(dir
);
320 return shmem_swp_map(subdir
) + offset
;
323 static void shmem_swp_set(struct shmem_inode_info
*info
, swp_entry_t
*entry
, unsigned long value
)
325 long incdec
= value
? 1: -1;
328 info
->swapped
+= incdec
;
329 if ((unsigned long)(entry
- info
->i_direct
) >= SHMEM_NR_DIRECT
)
330 kmap_atomic_to_page(entry
)->nr_swapped
+= incdec
;
334 * shmem_swp_alloc - get the position of the swap entry for the page.
335 * If it does not exist allocate the entry.
337 * @info: info structure for the inode
338 * @index: index of the page to find
339 * @sgp: check and recheck i_size? skip allocation?
341 static swp_entry_t
*shmem_swp_alloc(struct shmem_inode_info
*info
, unsigned long index
, enum sgp_type sgp
)
343 struct inode
*inode
= &info
->vfs_inode
;
344 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
345 struct page
*page
= NULL
;
348 if (sgp
!= SGP_WRITE
&&
349 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
350 return ERR_PTR(-EINVAL
);
352 while (!(entry
= shmem_swp_entry(info
, index
, &page
))) {
354 return shmem_swp_map(ZERO_PAGE(0));
356 * Test free_blocks against 1 not 0, since we have 1 data
357 * page (and perhaps indirect index pages) yet to allocate:
358 * a waste to allocate index if we cannot allocate data.
360 if (sbinfo
->max_blocks
) {
361 spin_lock(&sbinfo
->stat_lock
);
362 if (sbinfo
->free_blocks
<= 1) {
363 spin_unlock(&sbinfo
->stat_lock
);
364 return ERR_PTR(-ENOSPC
);
366 sbinfo
->free_blocks
--;
367 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
368 spin_unlock(&sbinfo
->stat_lock
);
371 spin_unlock(&info
->lock
);
372 page
= shmem_dir_alloc(mapping_gfp_mask(inode
->i_mapping
) | __GFP_ZERO
);
374 page
->nr_swapped
= 0;
376 spin_lock(&info
->lock
);
379 shmem_free_blocks(inode
, 1);
380 return ERR_PTR(-ENOMEM
);
382 if (sgp
!= SGP_WRITE
&&
383 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
384 entry
= ERR_PTR(-EINVAL
);
387 if (info
->next_index
<= index
)
388 info
->next_index
= index
+ 1;
391 /* another task gave its page, or truncated the file */
392 shmem_free_blocks(inode
, 1);
393 shmem_dir_free(page
);
395 if (info
->next_index
<= index
&& !IS_ERR(entry
))
396 info
->next_index
= index
+ 1;
401 * shmem_free_swp - free some swap entries in a directory
403 * @dir: pointer to the directory
404 * @edir: pointer after last entry of the directory
406 static int shmem_free_swp(swp_entry_t
*dir
, swp_entry_t
*edir
)
411 for (ptr
= dir
; ptr
< edir
; ptr
++) {
413 free_swap_and_cache(*ptr
);
414 *ptr
= (swp_entry_t
){0};
421 static int shmem_map_and_free_swp(struct page
*subdir
,
422 int offset
, int limit
, struct page
***dir
)
427 ptr
= shmem_swp_map(subdir
);
428 for (; offset
< limit
; offset
+= LATENCY_LIMIT
) {
429 int size
= limit
- offset
;
430 if (size
> LATENCY_LIMIT
)
431 size
= LATENCY_LIMIT
;
432 freed
+= shmem_free_swp(ptr
+offset
, ptr
+offset
+size
);
433 if (need_resched()) {
434 shmem_swp_unmap(ptr
);
436 shmem_dir_unmap(*dir
);
440 ptr
= shmem_swp_map(subdir
);
443 shmem_swp_unmap(ptr
);
447 static void shmem_free_pages(struct list_head
*next
)
453 page
= container_of(next
, struct page
, lru
);
455 shmem_dir_free(page
);
457 if (freed
>= LATENCY_LIMIT
) {
464 static void shmem_truncate(struct inode
*inode
)
466 struct shmem_inode_info
*info
= SHMEM_I(inode
);
471 unsigned long diroff
;
477 LIST_HEAD(pages_to_free
);
478 long nr_pages_to_free
= 0;
479 long nr_swaps_freed
= 0;
483 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
484 idx
= (inode
->i_size
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
485 if (idx
>= info
->next_index
)
488 spin_lock(&info
->lock
);
489 info
->flags
|= SHMEM_TRUNCATE
;
490 limit
= info
->next_index
;
491 info
->next_index
= idx
;
492 topdir
= info
->i_indirect
;
493 if (topdir
&& idx
<= SHMEM_NR_DIRECT
) {
494 info
->i_indirect
= NULL
;
496 list_add(&topdir
->lru
, &pages_to_free
);
498 spin_unlock(&info
->lock
);
500 if (info
->swapped
&& idx
< SHMEM_NR_DIRECT
) {
501 ptr
= info
->i_direct
;
503 if (size
> SHMEM_NR_DIRECT
)
504 size
= SHMEM_NR_DIRECT
;
505 nr_swaps_freed
= shmem_free_swp(ptr
+idx
, ptr
+size
);
510 BUG_ON(limit
<= SHMEM_NR_DIRECT
);
511 limit
-= SHMEM_NR_DIRECT
;
512 idx
= (idx
> SHMEM_NR_DIRECT
)? (idx
- SHMEM_NR_DIRECT
): 0;
513 offset
= idx
% ENTRIES_PER_PAGE
;
516 dir
= shmem_dir_map(topdir
);
517 stage
= ENTRIES_PER_PAGEPAGE
/2;
518 if (idx
< ENTRIES_PER_PAGEPAGE
/2) {
520 diroff
= idx
/ENTRIES_PER_PAGE
;
522 dir
+= ENTRIES_PER_PAGE
/2;
523 dir
+= (idx
- ENTRIES_PER_PAGEPAGE
/2)/ENTRIES_PER_PAGEPAGE
;
525 stage
+= ENTRIES_PER_PAGEPAGE
;
528 diroff
= ((idx
- ENTRIES_PER_PAGEPAGE
/2) %
529 ENTRIES_PER_PAGEPAGE
) / ENTRIES_PER_PAGE
;
530 if (!diroff
&& !offset
) {
533 list_add(&middir
->lru
, &pages_to_free
);
535 shmem_dir_unmap(dir
);
536 dir
= shmem_dir_map(middir
);
544 for (; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, diroff
++) {
545 if (unlikely(idx
== stage
)) {
546 shmem_dir_unmap(dir
);
547 dir
= shmem_dir_map(topdir
) +
548 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
551 idx
+= ENTRIES_PER_PAGEPAGE
;
555 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
559 list_add(&middir
->lru
, &pages_to_free
);
560 shmem_dir_unmap(dir
);
562 dir
= shmem_dir_map(middir
);
565 subdir
= dir
[diroff
];
566 if (subdir
&& subdir
->nr_swapped
) {
568 if (size
> ENTRIES_PER_PAGE
)
569 size
= ENTRIES_PER_PAGE
;
570 freed
= shmem_map_and_free_swp(subdir
,
573 dir
= shmem_dir_map(middir
);
574 nr_swaps_freed
+= freed
;
576 spin_lock(&info
->lock
);
577 subdir
->nr_swapped
-= freed
;
579 spin_unlock(&info
->lock
);
580 BUG_ON(subdir
->nr_swapped
> offset
);
587 list_add(&subdir
->lru
, &pages_to_free
);
591 shmem_dir_unmap(dir
);
593 if (inode
->i_mapping
->nrpages
&& (info
->flags
& SHMEM_PAGEIN
)) {
595 * Call truncate_inode_pages again: racing shmem_unuse_inode
596 * may have swizzled a page in from swap since vmtruncate or
597 * generic_delete_inode did it, before we lowered next_index.
598 * Also, though shmem_getpage checks i_size before adding to
599 * cache, no recheck after: so fix the narrow window there too.
601 truncate_inode_pages(inode
->i_mapping
, inode
->i_size
);
604 spin_lock(&info
->lock
);
605 info
->flags
&= ~SHMEM_TRUNCATE
;
606 info
->swapped
-= nr_swaps_freed
;
607 if (nr_pages_to_free
)
608 shmem_free_blocks(inode
, nr_pages_to_free
);
609 shmem_recalc_inode(inode
);
610 spin_unlock(&info
->lock
);
613 * Empty swap vector directory pages to be freed?
615 if (!list_empty(&pages_to_free
)) {
616 pages_to_free
.prev
->next
= NULL
;
617 shmem_free_pages(pages_to_free
.next
);
621 static int shmem_notify_change(struct dentry
*dentry
, struct iattr
*attr
)
623 struct inode
*inode
= dentry
->d_inode
;
624 struct page
*page
= NULL
;
627 if (attr
->ia_valid
& ATTR_SIZE
) {
628 if (attr
->ia_size
< inode
->i_size
) {
630 * If truncating down to a partial page, then
631 * if that page is already allocated, hold it
632 * in memory until the truncation is over, so
633 * truncate_partial_page cannnot miss it were
634 * it assigned to swap.
636 if (attr
->ia_size
& (PAGE_CACHE_SIZE
-1)) {
637 (void) shmem_getpage(inode
,
638 attr
->ia_size
>>PAGE_CACHE_SHIFT
,
639 &page
, SGP_READ
, NULL
);
642 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
643 * detect if any pages might have been added to cache
644 * after truncate_inode_pages. But we needn't bother
645 * if it's being fully truncated to zero-length: the
646 * nrpages check is efficient enough in that case.
649 struct shmem_inode_info
*info
= SHMEM_I(inode
);
650 spin_lock(&info
->lock
);
651 info
->flags
&= ~SHMEM_PAGEIN
;
652 spin_unlock(&info
->lock
);
657 error
= inode_change_ok(inode
, attr
);
659 error
= inode_setattr(inode
, attr
);
661 page_cache_release(page
);
665 static void shmem_delete_inode(struct inode
*inode
)
667 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
668 struct shmem_inode_info
*info
= SHMEM_I(inode
);
670 if (inode
->i_op
->truncate
== shmem_truncate
) {
671 shmem_unacct_size(info
->flags
, inode
->i_size
);
673 shmem_truncate(inode
);
674 if (!list_empty(&info
->swaplist
)) {
675 spin_lock(&shmem_swaplist_lock
);
676 list_del_init(&info
->swaplist
);
677 spin_unlock(&shmem_swaplist_lock
);
680 BUG_ON(inode
->i_blocks
);
681 if (sbinfo
->max_inodes
) {
682 spin_lock(&sbinfo
->stat_lock
);
683 sbinfo
->free_inodes
++;
684 spin_unlock(&sbinfo
->stat_lock
);
689 static inline int shmem_find_swp(swp_entry_t entry
, swp_entry_t
*dir
, swp_entry_t
*edir
)
693 for (ptr
= dir
; ptr
< edir
; ptr
++) {
694 if (ptr
->val
== entry
.val
)
700 static int shmem_unuse_inode(struct shmem_inode_info
*info
, swp_entry_t entry
, struct page
*page
)
713 ptr
= info
->i_direct
;
714 spin_lock(&info
->lock
);
715 limit
= info
->next_index
;
717 if (size
> SHMEM_NR_DIRECT
)
718 size
= SHMEM_NR_DIRECT
;
719 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
721 shmem_swp_balance_unmap();
724 if (!info
->i_indirect
)
727 dir
= shmem_dir_map(info
->i_indirect
);
728 stage
= SHMEM_NR_DIRECT
+ ENTRIES_PER_PAGEPAGE
/2;
730 for (idx
= SHMEM_NR_DIRECT
; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, dir
++) {
731 if (unlikely(idx
== stage
)) {
732 shmem_dir_unmap(dir
-1);
733 dir
= shmem_dir_map(info
->i_indirect
) +
734 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
737 idx
+= ENTRIES_PER_PAGEPAGE
;
741 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
743 shmem_dir_unmap(dir
);
744 dir
= shmem_dir_map(subdir
);
747 if (subdir
&& subdir
->nr_swapped
) {
748 ptr
= shmem_swp_map(subdir
);
750 if (size
> ENTRIES_PER_PAGE
)
751 size
= ENTRIES_PER_PAGE
;
752 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
754 shmem_dir_unmap(dir
);
757 shmem_swp_unmap(ptr
);
761 shmem_dir_unmap(dir
-1);
763 spin_unlock(&info
->lock
);
767 inode
= &info
->vfs_inode
;
768 if (move_from_swap_cache(page
, idx
, inode
->i_mapping
) == 0) {
769 info
->flags
|= SHMEM_PAGEIN
;
770 shmem_swp_set(info
, ptr
+ offset
, 0);
772 shmem_swp_unmap(ptr
);
773 spin_unlock(&info
->lock
);
775 * Decrement swap count even when the entry is left behind:
776 * try_to_unuse will skip over mms, then reincrement count.
783 * shmem_unuse() search for an eventually swapped out shmem page.
785 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
787 struct list_head
*p
, *next
;
788 struct shmem_inode_info
*info
;
791 spin_lock(&shmem_swaplist_lock
);
792 list_for_each_safe(p
, next
, &shmem_swaplist
) {
793 info
= list_entry(p
, struct shmem_inode_info
, swaplist
);
795 list_del_init(&info
->swaplist
);
796 else if (shmem_unuse_inode(info
, entry
, page
)) {
797 /* move head to start search for next from here */
798 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
803 spin_unlock(&shmem_swaplist_lock
);
808 * Move the page from the page cache to the swap cache.
810 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
812 struct shmem_inode_info
*info
;
813 swp_entry_t
*entry
, swap
;
814 struct address_space
*mapping
;
818 BUG_ON(!PageLocked(page
));
819 BUG_ON(page_mapped(page
));
821 mapping
= page
->mapping
;
823 inode
= mapping
->host
;
824 info
= SHMEM_I(inode
);
825 if (info
->flags
& VM_LOCKED
)
827 swap
= get_swap_page();
831 spin_lock(&info
->lock
);
832 shmem_recalc_inode(inode
);
833 if (index
>= info
->next_index
) {
834 BUG_ON(!(info
->flags
& SHMEM_TRUNCATE
));
837 entry
= shmem_swp_entry(info
, index
, NULL
);
841 if (move_to_swap_cache(page
, swap
) == 0) {
842 shmem_swp_set(info
, entry
, swap
.val
);
843 shmem_swp_unmap(entry
);
844 spin_unlock(&info
->lock
);
845 if (list_empty(&info
->swaplist
)) {
846 spin_lock(&shmem_swaplist_lock
);
847 /* move instead of add in case we're racing */
848 list_move_tail(&info
->swaplist
, &shmem_swaplist
);
849 spin_unlock(&shmem_swaplist_lock
);
855 shmem_swp_unmap(entry
);
857 spin_unlock(&info
->lock
);
860 set_page_dirty(page
);
861 return WRITEPAGE_ACTIVATE
; /* Return with the page locked */
865 static struct page
*shmem_swapin_async(struct shared_policy
*p
,
866 swp_entry_t entry
, unsigned long idx
)
869 struct vm_area_struct pvma
;
871 /* Create a pseudo vma that just contains the policy */
872 memset(&pvma
, 0, sizeof(struct vm_area_struct
));
873 pvma
.vm_end
= PAGE_SIZE
;
875 pvma
.vm_policy
= mpol_shared_policy_lookup(p
, idx
);
876 page
= read_swap_cache_async(entry
, &pvma
, 0);
877 mpol_free(pvma
.vm_policy
);
881 struct page
*shmem_swapin(struct shmem_inode_info
*info
, swp_entry_t entry
,
884 struct shared_policy
*p
= &info
->policy
;
887 unsigned long offset
;
889 num
= valid_swaphandles(entry
, &offset
);
890 for (i
= 0; i
< num
; offset
++, i
++) {
891 page
= shmem_swapin_async(p
,
892 swp_entry(swp_type(entry
), offset
), idx
);
895 page_cache_release(page
);
897 lru_add_drain(); /* Push any new pages onto the LRU now */
898 return shmem_swapin_async(p
, entry
, idx
);
902 shmem_alloc_page(unsigned long gfp
, struct shmem_inode_info
*info
,
905 struct vm_area_struct pvma
;
908 memset(&pvma
, 0, sizeof(struct vm_area_struct
));
909 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, idx
);
911 pvma
.vm_end
= PAGE_SIZE
;
912 page
= alloc_page_vma(gfp
| __GFP_ZERO
, &pvma
, 0);
913 mpol_free(pvma
.vm_policy
);
917 static inline struct page
*
918 shmem_swapin(struct shmem_inode_info
*info
,swp_entry_t entry
,unsigned long idx
)
920 swapin_readahead(entry
, 0, NULL
);
921 return read_swap_cache_async(entry
, NULL
, 0);
924 static inline struct page
*
925 shmem_alloc_page(unsigned int __nocast gfp
,struct shmem_inode_info
*info
,
928 return alloc_page(gfp
| __GFP_ZERO
);
933 * shmem_getpage - either get the page from swap or allocate a new one
935 * If we allocate a new one we do not mark it dirty. That's up to the
936 * vm. If we swap it in we mark it dirty since we also free the swap
937 * entry since a page cannot live in both the swap and page cache
939 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
940 struct page
**pagep
, enum sgp_type sgp
, int *type
)
942 struct address_space
*mapping
= inode
->i_mapping
;
943 struct shmem_inode_info
*info
= SHMEM_I(inode
);
944 struct shmem_sb_info
*sbinfo
;
945 struct page
*filepage
= *pagep
;
946 struct page
*swappage
;
951 if (idx
>= SHMEM_MAX_INDEX
)
954 * Normally, filepage is NULL on entry, and either found
955 * uptodate immediately, or allocated and zeroed, or read
956 * in under swappage, which is then assigned to filepage.
957 * But shmem_prepare_write passes in a locked filepage,
958 * which may be found not uptodate by other callers too,
959 * and may need to be copied from the swappage read in.
963 filepage
= find_lock_page(mapping
, idx
);
964 if (filepage
&& PageUptodate(filepage
))
967 if (sgp
== SGP_QUICK
)
970 spin_lock(&info
->lock
);
971 shmem_recalc_inode(inode
);
972 entry
= shmem_swp_alloc(info
, idx
, sgp
);
974 spin_unlock(&info
->lock
);
975 error
= PTR_ERR(entry
);
981 /* Look it up and read it in.. */
982 swappage
= lookup_swap_cache(swap
);
984 shmem_swp_unmap(entry
);
985 spin_unlock(&info
->lock
);
986 /* here we actually do the io */
987 if (type
&& *type
== VM_FAULT_MINOR
) {
988 inc_page_state(pgmajfault
);
989 *type
= VM_FAULT_MAJOR
;
991 swappage
= shmem_swapin(info
, swap
, idx
);
993 spin_lock(&info
->lock
);
994 entry
= shmem_swp_alloc(info
, idx
, sgp
);
996 error
= PTR_ERR(entry
);
998 if (entry
->val
== swap
.val
)
1000 shmem_swp_unmap(entry
);
1002 spin_unlock(&info
->lock
);
1007 wait_on_page_locked(swappage
);
1008 page_cache_release(swappage
);
1012 /* We have to do this with page locked to prevent races */
1013 if (TestSetPageLocked(swappage
)) {
1014 shmem_swp_unmap(entry
);
1015 spin_unlock(&info
->lock
);
1016 wait_on_page_locked(swappage
);
1017 page_cache_release(swappage
);
1020 if (PageWriteback(swappage
)) {
1021 shmem_swp_unmap(entry
);
1022 spin_unlock(&info
->lock
);
1023 wait_on_page_writeback(swappage
);
1024 unlock_page(swappage
);
1025 page_cache_release(swappage
);
1028 if (!PageUptodate(swappage
)) {
1029 shmem_swp_unmap(entry
);
1030 spin_unlock(&info
->lock
);
1031 unlock_page(swappage
);
1032 page_cache_release(swappage
);
1038 shmem_swp_set(info
, entry
, 0);
1039 shmem_swp_unmap(entry
);
1040 delete_from_swap_cache(swappage
);
1041 spin_unlock(&info
->lock
);
1042 copy_highpage(filepage
, swappage
);
1043 unlock_page(swappage
);
1044 page_cache_release(swappage
);
1045 flush_dcache_page(filepage
);
1046 SetPageUptodate(filepage
);
1047 set_page_dirty(filepage
);
1049 } else if (!(error
= move_from_swap_cache(
1050 swappage
, idx
, mapping
))) {
1051 info
->flags
|= SHMEM_PAGEIN
;
1052 shmem_swp_set(info
, entry
, 0);
1053 shmem_swp_unmap(entry
);
1054 spin_unlock(&info
->lock
);
1055 filepage
= swappage
;
1058 shmem_swp_unmap(entry
);
1059 spin_unlock(&info
->lock
);
1060 unlock_page(swappage
);
1061 page_cache_release(swappage
);
1062 if (error
== -ENOMEM
) {
1063 /* let kswapd refresh zone for GFP_ATOMICs */
1064 blk_congestion_wait(WRITE
, HZ
/50);
1068 } else if (sgp
== SGP_READ
&& !filepage
) {
1069 shmem_swp_unmap(entry
);
1070 filepage
= find_get_page(mapping
, idx
);
1072 (!PageUptodate(filepage
) || TestSetPageLocked(filepage
))) {
1073 spin_unlock(&info
->lock
);
1074 wait_on_page_locked(filepage
);
1075 page_cache_release(filepage
);
1079 spin_unlock(&info
->lock
);
1081 shmem_swp_unmap(entry
);
1082 sbinfo
= SHMEM_SB(inode
->i_sb
);
1083 if (sbinfo
->max_blocks
) {
1084 spin_lock(&sbinfo
->stat_lock
);
1085 if (sbinfo
->free_blocks
== 0 ||
1086 shmem_acct_block(info
->flags
)) {
1087 spin_unlock(&sbinfo
->stat_lock
);
1088 spin_unlock(&info
->lock
);
1092 sbinfo
->free_blocks
--;
1093 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
1094 spin_unlock(&sbinfo
->stat_lock
);
1095 } else if (shmem_acct_block(info
->flags
)) {
1096 spin_unlock(&info
->lock
);
1102 spin_unlock(&info
->lock
);
1103 filepage
= shmem_alloc_page(mapping_gfp_mask(mapping
),
1107 shmem_unacct_blocks(info
->flags
, 1);
1108 shmem_free_blocks(inode
, 1);
1113 spin_lock(&info
->lock
);
1114 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1116 error
= PTR_ERR(entry
);
1119 shmem_swp_unmap(entry
);
1121 if (error
|| swap
.val
|| 0 != add_to_page_cache_lru(
1122 filepage
, mapping
, idx
, GFP_ATOMIC
)) {
1123 spin_unlock(&info
->lock
);
1124 page_cache_release(filepage
);
1125 shmem_unacct_blocks(info
->flags
, 1);
1126 shmem_free_blocks(inode
, 1);
1132 info
->flags
|= SHMEM_PAGEIN
;
1136 spin_unlock(&info
->lock
);
1137 flush_dcache_page(filepage
);
1138 SetPageUptodate(filepage
);
1141 if (*pagep
!= filepage
) {
1142 unlock_page(filepage
);
1148 if (*pagep
!= filepage
) {
1149 unlock_page(filepage
);
1150 page_cache_release(filepage
);
1155 struct page
*shmem_nopage(struct vm_area_struct
*vma
, unsigned long address
, int *type
)
1157 struct inode
*inode
= vma
->vm_file
->f_dentry
->d_inode
;
1158 struct page
*page
= NULL
;
1162 idx
= (address
- vma
->vm_start
) >> PAGE_SHIFT
;
1163 idx
+= vma
->vm_pgoff
;
1164 idx
>>= PAGE_CACHE_SHIFT
- PAGE_SHIFT
;
1165 if (((loff_t
) idx
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
1166 return NOPAGE_SIGBUS
;
1168 error
= shmem_getpage(inode
, idx
, &page
, SGP_CACHE
, type
);
1170 return (error
== -ENOMEM
)? NOPAGE_OOM
: NOPAGE_SIGBUS
;
1172 mark_page_accessed(page
);
1176 static int shmem_populate(struct vm_area_struct
*vma
,
1177 unsigned long addr
, unsigned long len
,
1178 pgprot_t prot
, unsigned long pgoff
, int nonblock
)
1180 struct inode
*inode
= vma
->vm_file
->f_dentry
->d_inode
;
1181 struct mm_struct
*mm
= vma
->vm_mm
;
1182 enum sgp_type sgp
= nonblock
? SGP_QUICK
: SGP_CACHE
;
1185 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1186 if (pgoff
>= size
|| pgoff
+ (len
>> PAGE_SHIFT
) > size
)
1189 while ((long) len
> 0) {
1190 struct page
*page
= NULL
;
1193 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1195 err
= shmem_getpage(inode
, pgoff
, &page
, sgp
, NULL
);
1199 mark_page_accessed(page
);
1200 err
= install_page(mm
, vma
, addr
, page
, prot
);
1202 page_cache_release(page
);
1205 } else if (nonblock
) {
1206 err
= install_file_pte(mm
, vma
, addr
, pgoff
, prot
);
1219 int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
1221 struct inode
*i
= vma
->vm_file
->f_dentry
->d_inode
;
1222 return mpol_set_shared_policy(&SHMEM_I(i
)->policy
, vma
, new);
1226 shmem_get_policy(struct vm_area_struct
*vma
, unsigned long addr
)
1228 struct inode
*i
= vma
->vm_file
->f_dentry
->d_inode
;
1231 idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1232 return mpol_shared_policy_lookup(&SHMEM_I(i
)->policy
, idx
);
1236 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1238 struct inode
*inode
= file
->f_dentry
->d_inode
;
1239 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1240 int retval
= -ENOMEM
;
1242 spin_lock(&info
->lock
);
1243 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1244 if (!user_shm_lock(inode
->i_size
, user
))
1246 info
->flags
|= VM_LOCKED
;
1248 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1249 user_shm_unlock(inode
->i_size
, user
);
1250 info
->flags
&= ~VM_LOCKED
;
1254 spin_unlock(&info
->lock
);
1258 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1260 file_accessed(file
);
1261 vma
->vm_ops
= &shmem_vm_ops
;
1265 static struct inode
*
1266 shmem_get_inode(struct super_block
*sb
, int mode
, dev_t dev
)
1268 struct inode
*inode
;
1269 struct shmem_inode_info
*info
;
1270 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1272 if (sbinfo
->max_inodes
) {
1273 spin_lock(&sbinfo
->stat_lock
);
1274 if (!sbinfo
->free_inodes
) {
1275 spin_unlock(&sbinfo
->stat_lock
);
1278 sbinfo
->free_inodes
--;
1279 spin_unlock(&sbinfo
->stat_lock
);
1282 inode
= new_inode(sb
);
1284 inode
->i_mode
= mode
;
1285 inode
->i_uid
= current
->fsuid
;
1286 inode
->i_gid
= current
->fsgid
;
1287 inode
->i_blksize
= PAGE_CACHE_SIZE
;
1288 inode
->i_blocks
= 0;
1289 inode
->i_mapping
->a_ops
= &shmem_aops
;
1290 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1291 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1292 info
= SHMEM_I(inode
);
1293 memset(info
, 0, (char *)inode
- (char *)info
);
1294 spin_lock_init(&info
->lock
);
1295 INIT_LIST_HEAD(&info
->swaplist
);
1297 switch (mode
& S_IFMT
) {
1299 inode
->i_op
= &shmem_special_inode_operations
;
1300 init_special_inode(inode
, mode
, dev
);
1303 inode
->i_op
= &shmem_inode_operations
;
1304 inode
->i_fop
= &shmem_file_operations
;
1305 mpol_shared_policy_init(&info
->policy
);
1309 /* Some things misbehave if size == 0 on a directory */
1310 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1311 inode
->i_op
= &shmem_dir_inode_operations
;
1312 inode
->i_fop
= &simple_dir_operations
;
1316 * Must not load anything in the rbtree,
1317 * mpol_free_shared_policy will not be called.
1319 mpol_shared_policy_init(&info
->policy
);
1322 } else if (sbinfo
->max_inodes
) {
1323 spin_lock(&sbinfo
->stat_lock
);
1324 sbinfo
->free_inodes
++;
1325 spin_unlock(&sbinfo
->stat_lock
);
1331 static struct inode_operations shmem_symlink_inode_operations
;
1332 static struct inode_operations shmem_symlink_inline_operations
;
1335 * Normally tmpfs makes no use of shmem_prepare_write, but it
1336 * lets a tmpfs file be used read-write below the loop driver.
1339 shmem_prepare_write(struct file
*file
, struct page
*page
, unsigned offset
, unsigned to
)
1341 struct inode
*inode
= page
->mapping
->host
;
1342 return shmem_getpage(inode
, page
->index
, &page
, SGP_WRITE
, NULL
);
1346 shmem_file_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
1348 struct inode
*inode
= file
->f_dentry
->d_inode
;
1350 unsigned long written
;
1353 if ((ssize_t
) count
< 0)
1356 if (!access_ok(VERIFY_READ
, buf
, count
))
1359 down(&inode
->i_sem
);
1364 err
= generic_write_checks(file
, &pos
, &count
, 0);
1368 err
= remove_suid(file
->f_dentry
);
1372 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
1375 struct page
*page
= NULL
;
1376 unsigned long bytes
, index
, offset
;
1380 offset
= (pos
& (PAGE_CACHE_SIZE
-1)); /* Within page */
1381 index
= pos
>> PAGE_CACHE_SHIFT
;
1382 bytes
= PAGE_CACHE_SIZE
- offset
;
1387 * We don't hold page lock across copy from user -
1388 * what would it guard against? - so no deadlock here.
1389 * But it still may be a good idea to prefault below.
1392 err
= shmem_getpage(inode
, index
, &page
, SGP_WRITE
, NULL
);
1397 if (PageHighMem(page
)) {
1398 volatile unsigned char dummy
;
1399 __get_user(dummy
, buf
);
1400 __get_user(dummy
, buf
+ bytes
- 1);
1402 kaddr
= kmap_atomic(page
, KM_USER0
);
1403 left
= __copy_from_user_inatomic(kaddr
+ offset
,
1405 kunmap_atomic(kaddr
, KM_USER0
);
1409 left
= __copy_from_user(kaddr
+ offset
, buf
, bytes
);
1417 if (pos
> inode
->i_size
)
1418 i_size_write(inode
, pos
);
1420 flush_dcache_page(page
);
1421 set_page_dirty(page
);
1422 mark_page_accessed(page
);
1423 page_cache_release(page
);
1433 * Our dirty pages are not counted in nr_dirty,
1434 * and we do not attempt to balance dirty pages.
1448 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1450 struct inode
*inode
= filp
->f_dentry
->d_inode
;
1451 struct address_space
*mapping
= inode
->i_mapping
;
1452 unsigned long index
, offset
;
1454 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1455 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1458 struct page
*page
= NULL
;
1459 unsigned long end_index
, nr
, ret
;
1460 loff_t i_size
= i_size_read(inode
);
1462 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1463 if (index
> end_index
)
1465 if (index
== end_index
) {
1466 nr
= i_size
& ~PAGE_CACHE_MASK
;
1471 desc
->error
= shmem_getpage(inode
, index
, &page
, SGP_READ
, NULL
);
1473 if (desc
->error
== -EINVAL
)
1479 * We must evaluate after, since reads (unlike writes)
1480 * are called without i_sem protection against truncate
1482 nr
= PAGE_CACHE_SIZE
;
1483 i_size
= i_size_read(inode
);
1484 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1485 if (index
== end_index
) {
1486 nr
= i_size
& ~PAGE_CACHE_MASK
;
1489 page_cache_release(page
);
1497 * If users can be writing to this page using arbitrary
1498 * virtual addresses, take care about potential aliasing
1499 * before reading the page on the kernel side.
1501 if (mapping_writably_mapped(mapping
))
1502 flush_dcache_page(page
);
1504 * Mark the page accessed if we read the beginning.
1507 mark_page_accessed(page
);
1509 page
= ZERO_PAGE(0);
1512 * Ok, we have the page, and it's up-to-date, so
1513 * now we can copy it to user space...
1515 * The actor routine returns how many bytes were actually used..
1516 * NOTE! This may not be the same as how much of a user buffer
1517 * we filled up (we may be padding etc), so we can only update
1518 * "pos" here (the actor routine has to update the user buffer
1519 * pointers and the remaining count).
1521 ret
= actor(desc
, page
, offset
, nr
);
1523 index
+= offset
>> PAGE_CACHE_SHIFT
;
1524 offset
&= ~PAGE_CACHE_MASK
;
1526 page_cache_release(page
);
1527 if (ret
!= nr
|| !desc
->count
)
1533 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1534 file_accessed(filp
);
1537 static ssize_t
shmem_file_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*ppos
)
1539 read_descriptor_t desc
;
1541 if ((ssize_t
) count
< 0)
1543 if (!access_ok(VERIFY_WRITE
, buf
, count
))
1553 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1555 return desc
.written
;
1559 static ssize_t
shmem_file_sendfile(struct file
*in_file
, loff_t
*ppos
,
1560 size_t count
, read_actor_t actor
, void *target
)
1562 read_descriptor_t desc
;
1569 desc
.arg
.data
= target
;
1572 do_shmem_file_read(in_file
, ppos
, &desc
, actor
);
1574 return desc
.written
;
1578 static int shmem_statfs(struct super_block
*sb
, struct kstatfs
*buf
)
1580 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1582 buf
->f_type
= TMPFS_MAGIC
;
1583 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1584 buf
->f_namelen
= NAME_MAX
;
1585 spin_lock(&sbinfo
->stat_lock
);
1586 if (sbinfo
->max_blocks
) {
1587 buf
->f_blocks
= sbinfo
->max_blocks
;
1588 buf
->f_bavail
= buf
->f_bfree
= sbinfo
->free_blocks
;
1590 if (sbinfo
->max_inodes
) {
1591 buf
->f_files
= sbinfo
->max_inodes
;
1592 buf
->f_ffree
= sbinfo
->free_inodes
;
1594 /* else leave those fields 0 like simple_statfs */
1595 spin_unlock(&sbinfo
->stat_lock
);
1600 * File creation. Allocate an inode, and we're done..
1603 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1605 struct inode
*inode
= shmem_get_inode(dir
->i_sb
, mode
, dev
);
1606 int error
= -ENOSPC
;
1609 if (dir
->i_mode
& S_ISGID
) {
1610 inode
->i_gid
= dir
->i_gid
;
1612 inode
->i_mode
|= S_ISGID
;
1614 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1615 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1616 d_instantiate(dentry
, inode
);
1617 dget(dentry
); /* Extra count - pin the dentry in core */
1623 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1627 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1633 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1634 struct nameidata
*nd
)
1636 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1642 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1644 struct inode
*inode
= old_dentry
->d_inode
;
1645 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
1648 * No ordinary (disk based) filesystem counts links as inodes;
1649 * but each new link needs a new dentry, pinning lowmem, and
1650 * tmpfs dentries cannot be pruned until they are unlinked.
1652 if (sbinfo
->max_inodes
) {
1653 spin_lock(&sbinfo
->stat_lock
);
1654 if (!sbinfo
->free_inodes
) {
1655 spin_unlock(&sbinfo
->stat_lock
);
1658 sbinfo
->free_inodes
--;
1659 spin_unlock(&sbinfo
->stat_lock
);
1662 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1663 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1665 atomic_inc(&inode
->i_count
); /* New dentry reference */
1666 dget(dentry
); /* Extra pinning count for the created dentry */
1667 d_instantiate(dentry
, inode
);
1671 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
1673 struct inode
*inode
= dentry
->d_inode
;
1675 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
)) {
1676 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
1677 if (sbinfo
->max_inodes
) {
1678 spin_lock(&sbinfo
->stat_lock
);
1679 sbinfo
->free_inodes
++;
1680 spin_unlock(&sbinfo
->stat_lock
);
1684 dir
->i_size
-= BOGO_DIRENT_SIZE
;
1685 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1687 dput(dentry
); /* Undo the count from "create" - this does all the work */
1691 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
1693 if (!simple_empty(dentry
))
1697 return shmem_unlink(dir
, dentry
);
1701 * The VFS layer already does all the dentry stuff for rename,
1702 * we just have to decrement the usage count for the target if
1703 * it exists so that the VFS layer correctly free's it when it
1706 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
1708 struct inode
*inode
= old_dentry
->d_inode
;
1709 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
1711 if (!simple_empty(new_dentry
))
1714 if (new_dentry
->d_inode
) {
1715 (void) shmem_unlink(new_dir
, new_dentry
);
1718 } else if (they_are_dirs
) {
1723 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
1724 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
1725 old_dir
->i_ctime
= old_dir
->i_mtime
=
1726 new_dir
->i_ctime
= new_dir
->i_mtime
=
1727 inode
->i_ctime
= CURRENT_TIME
;
1731 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
1735 struct inode
*inode
;
1736 struct page
*page
= NULL
;
1738 struct shmem_inode_info
*info
;
1740 len
= strlen(symname
) + 1;
1741 if (len
> PAGE_CACHE_SIZE
)
1742 return -ENAMETOOLONG
;
1744 inode
= shmem_get_inode(dir
->i_sb
, S_IFLNK
|S_IRWXUGO
, 0);
1748 info
= SHMEM_I(inode
);
1749 inode
->i_size
= len
-1;
1750 if (len
<= (char *)inode
- (char *)info
) {
1752 memcpy(info
, symname
, len
);
1753 inode
->i_op
= &shmem_symlink_inline_operations
;
1755 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
1760 inode
->i_op
= &shmem_symlink_inode_operations
;
1761 kaddr
= kmap_atomic(page
, KM_USER0
);
1762 memcpy(kaddr
, symname
, len
);
1763 kunmap_atomic(kaddr
, KM_USER0
);
1764 set_page_dirty(page
);
1765 page_cache_release(page
);
1767 if (dir
->i_mode
& S_ISGID
)
1768 inode
->i_gid
= dir
->i_gid
;
1769 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1770 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1771 d_instantiate(dentry
, inode
);
1776 static void *shmem_follow_link_inline(struct dentry
*dentry
, struct nameidata
*nd
)
1778 nd_set_link(nd
, (char *)SHMEM_I(dentry
->d_inode
));
1782 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1784 struct page
*page
= NULL
;
1785 int res
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
1786 nd_set_link(nd
, res
? ERR_PTR(res
) : kmap(page
));
1790 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
1792 if (!IS_ERR(nd_get_link(nd
))) {
1793 struct page
*page
= cookie
;
1795 mark_page_accessed(page
);
1796 page_cache_release(page
);
1800 static struct inode_operations shmem_symlink_inline_operations
= {
1801 .readlink
= generic_readlink
,
1802 .follow_link
= shmem_follow_link_inline
,
1803 #ifdef CONFIG_TMPFS_XATTR
1804 .setxattr
= generic_setxattr
,
1805 .getxattr
= generic_getxattr
,
1806 .listxattr
= generic_listxattr
,
1807 .removexattr
= generic_removexattr
,
1811 static struct inode_operations shmem_symlink_inode_operations
= {
1812 .truncate
= shmem_truncate
,
1813 .readlink
= generic_readlink
,
1814 .follow_link
= shmem_follow_link
,
1815 .put_link
= shmem_put_link
,
1816 #ifdef CONFIG_TMPFS_XATTR
1817 .setxattr
= generic_setxattr
,
1818 .getxattr
= generic_getxattr
,
1819 .listxattr
= generic_listxattr
,
1820 .removexattr
= generic_removexattr
,
1824 static int shmem_parse_options(char *options
, int *mode
, uid_t
*uid
, gid_t
*gid
, unsigned long *blocks
, unsigned long *inodes
)
1826 char *this_char
, *value
, *rest
;
1828 while ((this_char
= strsep(&options
, ",")) != NULL
) {
1831 if ((value
= strchr(this_char
,'=')) != NULL
) {
1835 "tmpfs: No value for mount option '%s'\n",
1840 if (!strcmp(this_char
,"size")) {
1841 unsigned long long size
;
1842 size
= memparse(value
,&rest
);
1844 size
<<= PAGE_SHIFT
;
1845 size
*= totalram_pages
;
1851 *blocks
= size
>> PAGE_CACHE_SHIFT
;
1852 } else if (!strcmp(this_char
,"nr_blocks")) {
1853 *blocks
= memparse(value
,&rest
);
1856 } else if (!strcmp(this_char
,"nr_inodes")) {
1857 *inodes
= memparse(value
,&rest
);
1860 } else if (!strcmp(this_char
,"mode")) {
1863 *mode
= simple_strtoul(value
,&rest
,8);
1866 } else if (!strcmp(this_char
,"uid")) {
1869 *uid
= simple_strtoul(value
,&rest
,0);
1872 } else if (!strcmp(this_char
,"gid")) {
1875 *gid
= simple_strtoul(value
,&rest
,0);
1879 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
1887 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
1893 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
1895 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1896 unsigned long max_blocks
= sbinfo
->max_blocks
;
1897 unsigned long max_inodes
= sbinfo
->max_inodes
;
1898 unsigned long blocks
;
1899 unsigned long inodes
;
1900 int error
= -EINVAL
;
1902 if (shmem_parse_options(data
, NULL
, NULL
, NULL
,
1903 &max_blocks
, &max_inodes
))
1906 spin_lock(&sbinfo
->stat_lock
);
1907 blocks
= sbinfo
->max_blocks
- sbinfo
->free_blocks
;
1908 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
1909 if (max_blocks
< blocks
)
1911 if (max_inodes
< inodes
)
1914 * Those tests also disallow limited->unlimited while any are in
1915 * use, so i_blocks will always be zero when max_blocks is zero;
1916 * but we must separately disallow unlimited->limited, because
1917 * in that case we have no record of how much is already in use.
1919 if (max_blocks
&& !sbinfo
->max_blocks
)
1921 if (max_inodes
&& !sbinfo
->max_inodes
)
1925 sbinfo
->max_blocks
= max_blocks
;
1926 sbinfo
->free_blocks
= max_blocks
- blocks
;
1927 sbinfo
->max_inodes
= max_inodes
;
1928 sbinfo
->free_inodes
= max_inodes
- inodes
;
1930 spin_unlock(&sbinfo
->stat_lock
);
1935 static void shmem_put_super(struct super_block
*sb
)
1937 kfree(sb
->s_fs_info
);
1938 sb
->s_fs_info
= NULL
;
1941 #ifdef CONFIG_TMPFS_XATTR
1942 static struct xattr_handler
*shmem_xattr_handlers
[];
1944 #define shmem_xattr_handlers NULL
1947 static int shmem_fill_super(struct super_block
*sb
,
1948 void *data
, int silent
)
1950 struct inode
*inode
;
1951 struct dentry
*root
;
1952 int mode
= S_IRWXUGO
| S_ISVTX
;
1953 uid_t uid
= current
->fsuid
;
1954 gid_t gid
= current
->fsgid
;
1956 struct shmem_sb_info
*sbinfo
;
1957 unsigned long blocks
= 0;
1958 unsigned long inodes
= 0;
1962 * Per default we only allow half of the physical ram per
1963 * tmpfs instance, limiting inodes to one per page of lowmem;
1964 * but the internal instance is left unlimited.
1966 if (!(sb
->s_flags
& MS_NOUSER
)) {
1967 blocks
= totalram_pages
/ 2;
1968 inodes
= totalram_pages
- totalhigh_pages
;
1969 if (inodes
> blocks
)
1971 if (shmem_parse_options(data
, &mode
, &uid
, &gid
,
1976 sb
->s_flags
|= MS_NOUSER
;
1979 /* Round up to L1_CACHE_BYTES to resist false sharing */
1980 sbinfo
= kmalloc(max((int)sizeof(struct shmem_sb_info
),
1981 L1_CACHE_BYTES
), GFP_KERNEL
);
1985 spin_lock_init(&sbinfo
->stat_lock
);
1986 sbinfo
->max_blocks
= blocks
;
1987 sbinfo
->free_blocks
= blocks
;
1988 sbinfo
->max_inodes
= inodes
;
1989 sbinfo
->free_inodes
= inodes
;
1991 sb
->s_fs_info
= sbinfo
;
1992 sb
->s_maxbytes
= SHMEM_MAX_BYTES
;
1993 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
1994 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
1995 sb
->s_magic
= TMPFS_MAGIC
;
1996 sb
->s_op
= &shmem_ops
;
1997 sb
->s_xattr
= shmem_xattr_handlers
;
1999 inode
= shmem_get_inode(sb
, S_IFDIR
| mode
, 0);
2004 root
= d_alloc_root(inode
);
2013 shmem_put_super(sb
);
2017 static kmem_cache_t
*shmem_inode_cachep
;
2019 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2021 struct shmem_inode_info
*p
;
2022 p
= (struct shmem_inode_info
*)kmem_cache_alloc(shmem_inode_cachep
, SLAB_KERNEL
);
2025 return &p
->vfs_inode
;
2028 static void shmem_destroy_inode(struct inode
*inode
)
2030 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
) {
2031 /* only struct inode is valid if it's an inline symlink */
2032 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2034 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2037 static void init_once(void *foo
, kmem_cache_t
*cachep
, unsigned long flags
)
2039 struct shmem_inode_info
*p
= (struct shmem_inode_info
*) foo
;
2041 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
2042 SLAB_CTOR_CONSTRUCTOR
) {
2043 inode_init_once(&p
->vfs_inode
);
2047 static int init_inodecache(void)
2049 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2050 sizeof(struct shmem_inode_info
),
2051 0, 0, init_once
, NULL
);
2052 if (shmem_inode_cachep
== NULL
)
2057 static void destroy_inodecache(void)
2059 if (kmem_cache_destroy(shmem_inode_cachep
))
2060 printk(KERN_INFO
"shmem_inode_cache: not all structures were freed\n");
2063 static struct address_space_operations shmem_aops
= {
2064 .writepage
= shmem_writepage
,
2065 .set_page_dirty
= __set_page_dirty_nobuffers
,
2067 .prepare_write
= shmem_prepare_write
,
2068 .commit_write
= simple_commit_write
,
2072 static struct file_operations shmem_file_operations
= {
2075 .llseek
= generic_file_llseek
,
2076 .read
= shmem_file_read
,
2077 .write
= shmem_file_write
,
2078 .fsync
= simple_sync_file
,
2079 .sendfile
= shmem_file_sendfile
,
2083 static struct inode_operations shmem_inode_operations
= {
2084 .truncate
= shmem_truncate
,
2085 .setattr
= shmem_notify_change
,
2086 #ifdef CONFIG_TMPFS_XATTR
2087 .setxattr
= generic_setxattr
,
2088 .getxattr
= generic_getxattr
,
2089 .listxattr
= generic_listxattr
,
2090 .removexattr
= generic_removexattr
,
2094 static struct inode_operations shmem_dir_inode_operations
= {
2096 .create
= shmem_create
,
2097 .lookup
= simple_lookup
,
2099 .unlink
= shmem_unlink
,
2100 .symlink
= shmem_symlink
,
2101 .mkdir
= shmem_mkdir
,
2102 .rmdir
= shmem_rmdir
,
2103 .mknod
= shmem_mknod
,
2104 .rename
= shmem_rename
,
2105 #ifdef CONFIG_TMPFS_XATTR
2106 .setxattr
= generic_setxattr
,
2107 .getxattr
= generic_getxattr
,
2108 .listxattr
= generic_listxattr
,
2109 .removexattr
= generic_removexattr
,
2114 static struct inode_operations shmem_special_inode_operations
= {
2115 #ifdef CONFIG_TMPFS_XATTR
2116 .setxattr
= generic_setxattr
,
2117 .getxattr
= generic_getxattr
,
2118 .listxattr
= generic_listxattr
,
2119 .removexattr
= generic_removexattr
,
2123 static struct super_operations shmem_ops
= {
2124 .alloc_inode
= shmem_alloc_inode
,
2125 .destroy_inode
= shmem_destroy_inode
,
2127 .statfs
= shmem_statfs
,
2128 .remount_fs
= shmem_remount_fs
,
2130 .delete_inode
= shmem_delete_inode
,
2131 .drop_inode
= generic_delete_inode
,
2132 .put_super
= shmem_put_super
,
2135 static struct vm_operations_struct shmem_vm_ops
= {
2136 .nopage
= shmem_nopage
,
2137 .populate
= shmem_populate
,
2139 .set_policy
= shmem_set_policy
,
2140 .get_policy
= shmem_get_policy
,
2145 #ifdef CONFIG_TMPFS_SECURITY
2147 static size_t shmem_xattr_security_list(struct inode
*inode
, char *list
, size_t list_len
,
2148 const char *name
, size_t name_len
)
2150 return security_inode_listsecurity(inode
, list
, list_len
);
2153 static int shmem_xattr_security_get(struct inode
*inode
, const char *name
, void *buffer
, size_t size
)
2155 if (strcmp(name
, "") == 0)
2157 return security_inode_getsecurity(inode
, name
, buffer
, size
);
2160 static int shmem_xattr_security_set(struct inode
*inode
, const char *name
, const void *value
, size_t size
, int flags
)
2162 if (strcmp(name
, "") == 0)
2164 return security_inode_setsecurity(inode
, name
, value
, size
, flags
);
2167 static struct xattr_handler shmem_xattr_security_handler
= {
2168 .prefix
= XATTR_SECURITY_PREFIX
,
2169 .list
= shmem_xattr_security_list
,
2170 .get
= shmem_xattr_security_get
,
2171 .set
= shmem_xattr_security_set
,
2174 #endif /* CONFIG_TMPFS_SECURITY */
2176 #ifdef CONFIG_TMPFS_XATTR
2178 static struct xattr_handler
*shmem_xattr_handlers
[] = {
2179 #ifdef CONFIG_TMPFS_SECURITY
2180 &shmem_xattr_security_handler
,
2185 #endif /* CONFIG_TMPFS_XATTR */
2187 static struct super_block
*shmem_get_sb(struct file_system_type
*fs_type
,
2188 int flags
, const char *dev_name
, void *data
)
2190 return get_sb_nodev(fs_type
, flags
, data
, shmem_fill_super
);
2193 static struct file_system_type tmpfs_fs_type
= {
2194 .owner
= THIS_MODULE
,
2196 .get_sb
= shmem_get_sb
,
2197 .kill_sb
= kill_litter_super
,
2199 static struct vfsmount
*shm_mnt
;
2201 static int __init
init_tmpfs(void)
2205 error
= init_inodecache();
2209 error
= register_filesystem(&tmpfs_fs_type
);
2211 printk(KERN_ERR
"Could not register tmpfs\n");
2215 devfs_mk_dir("shm");
2217 shm_mnt
= do_kern_mount(tmpfs_fs_type
.name
, MS_NOUSER
,
2218 tmpfs_fs_type
.name
, NULL
);
2219 if (IS_ERR(shm_mnt
)) {
2220 error
= PTR_ERR(shm_mnt
);
2221 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2227 unregister_filesystem(&tmpfs_fs_type
);
2229 destroy_inodecache();
2231 shm_mnt
= ERR_PTR(error
);
2234 module_init(init_tmpfs
)
2237 * shmem_file_setup - get an unlinked file living in tmpfs
2239 * @name: name for dentry (to be seen in /proc/<pid>/maps
2240 * @size: size to be set for the file
2243 struct file
*shmem_file_setup(char *name
, loff_t size
, unsigned long flags
)
2247 struct inode
*inode
;
2248 struct dentry
*dentry
, *root
;
2251 if (IS_ERR(shm_mnt
))
2252 return (void *)shm_mnt
;
2254 if (size
< 0 || size
> SHMEM_MAX_BYTES
)
2255 return ERR_PTR(-EINVAL
);
2257 if (shmem_acct_size(flags
, size
))
2258 return ERR_PTR(-ENOMEM
);
2262 this.len
= strlen(name
);
2263 this.hash
= 0; /* will go */
2264 root
= shm_mnt
->mnt_root
;
2265 dentry
= d_alloc(root
, &this);
2270 file
= get_empty_filp();
2275 inode
= shmem_get_inode(root
->d_sb
, S_IFREG
| S_IRWXUGO
, 0);
2279 SHMEM_I(inode
)->flags
= flags
& VM_ACCOUNT
;
2280 d_instantiate(dentry
, inode
);
2281 inode
->i_size
= size
;
2282 inode
->i_nlink
= 0; /* It is unlinked */
2283 file
->f_vfsmnt
= mntget(shm_mnt
);
2284 file
->f_dentry
= dentry
;
2285 file
->f_mapping
= inode
->i_mapping
;
2286 file
->f_op
= &shmem_file_operations
;
2287 file
->f_mode
= FMODE_WRITE
| FMODE_READ
;
2295 shmem_unacct_size(flags
, size
);
2296 return ERR_PTR(error
);
2300 * shmem_zero_setup - setup a shared anonymous mapping
2302 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2304 int shmem_zero_setup(struct vm_area_struct
*vma
)
2307 loff_t size
= vma
->vm_end
- vma
->vm_start
;
2309 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
2311 return PTR_ERR(file
);
2315 vma
->vm_file
= file
;
2316 vma
->vm_ops
= &shmem_vm_ops
;