1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* file-nommu.c: no-MMU version of ramfs
4 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/module.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/init.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/ramfs.h>
17 #include <linux/pagevec.h>
18 #include <linux/mman.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
22 #include <linux/uaccess.h>
25 static int ramfs_nommu_setattr(struct mnt_idmap
*, struct dentry
*, struct iattr
*);
26 static unsigned long ramfs_nommu_get_unmapped_area(struct file
*file
,
31 static int ramfs_nommu_mmap(struct file
*file
, struct vm_area_struct
*vma
);
33 static unsigned ramfs_mmap_capabilities(struct file
*file
)
35 return NOMMU_MAP_DIRECT
| NOMMU_MAP_COPY
| NOMMU_MAP_READ
|
36 NOMMU_MAP_WRITE
| NOMMU_MAP_EXEC
;
39 const struct file_operations ramfs_file_operations
= {
40 .mmap_capabilities
= ramfs_mmap_capabilities
,
41 .mmap
= ramfs_nommu_mmap
,
42 .get_unmapped_area
= ramfs_nommu_get_unmapped_area
,
43 .read_iter
= generic_file_read_iter
,
44 .write_iter
= generic_file_write_iter
,
46 .splice_read
= filemap_splice_read
,
47 .splice_write
= iter_file_splice_write
,
48 .llseek
= generic_file_llseek
,
51 const struct inode_operations ramfs_file_inode_operations
= {
52 .setattr
= ramfs_nommu_setattr
,
53 .getattr
= simple_getattr
,
56 /*****************************************************************************/
58 * add a contiguous set of pages into a ramfs inode when it's truncated from
59 * size 0 on the assumption that it's going to be used for an mmap of shared
62 int ramfs_nommu_expand_for_mapping(struct inode
*inode
, size_t newsize
)
64 unsigned long npages
, xpages
, loop
;
69 gfp_t gfp
= mapping_gfp_mask(inode
->i_mapping
);
71 /* make various checks */
72 order
= get_order(newsize
);
73 if (unlikely(order
> MAX_PAGE_ORDER
))
76 ret
= inode_newsize_ok(inode
, newsize
);
80 i_size_write(inode
, newsize
);
82 /* allocate enough contiguous pages to be able to satisfy the
84 pages
= alloc_pages(gfp
, order
);
88 /* split the high-order page into an array of single pages */
89 xpages
= 1UL << order
;
90 npages
= (newsize
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
92 split_page(pages
, order
);
94 /* trim off any pages we don't actually require */
95 for (loop
= npages
; loop
< xpages
; loop
++)
96 __free_page(pages
+ loop
);
98 /* clear the memory we allocated */
99 newsize
= PAGE_SIZE
* npages
;
100 data
= page_address(pages
);
101 memset(data
, 0, newsize
);
103 /* attach all the pages to the inode's address space */
104 for (loop
= 0; loop
< npages
; loop
++) {
105 struct page
*page
= pages
+ loop
;
107 ret
= add_to_page_cache_lru(page
, inode
->i_mapping
, loop
,
112 /* prevent the page from being discarded on memory pressure */
114 SetPageUptodate(page
);
123 while (loop
< npages
)
124 __free_page(pages
+ loop
++);
128 /*****************************************************************************/
132 static int ramfs_nommu_resize(struct inode
*inode
, loff_t newsize
, loff_t size
)
136 /* assume a truncate from zero size is going to be for the purposes of
139 if (unlikely(newsize
>> 32))
142 return ramfs_nommu_expand_for_mapping(inode
, newsize
);
145 /* check that a decrease in size doesn't cut off any shared mappings */
146 if (newsize
< size
) {
147 ret
= nommu_shrink_inode_mappings(inode
, size
, newsize
);
152 truncate_setsize(inode
, newsize
);
156 /*****************************************************************************/
158 * handle a change of attributes
159 * - we're specifically interested in a change of size
161 static int ramfs_nommu_setattr(struct mnt_idmap
*idmap
,
162 struct dentry
*dentry
, struct iattr
*ia
)
164 struct inode
*inode
= d_inode(dentry
);
165 unsigned int old_ia_valid
= ia
->ia_valid
;
168 /* POSIX UID/GID verification for setting inode attributes */
169 ret
= setattr_prepare(&nop_mnt_idmap
, dentry
, ia
);
173 /* pick out size-changing events */
174 if (ia
->ia_valid
& ATTR_SIZE
) {
175 loff_t size
= inode
->i_size
;
177 if (ia
->ia_size
!= size
) {
178 ret
= ramfs_nommu_resize(inode
, ia
->ia_size
, size
);
179 if (ret
< 0 || ia
->ia_valid
== ATTR_SIZE
)
182 /* we skipped the truncate but must still update
185 ia
->ia_valid
|= ATTR_MTIME
|ATTR_CTIME
;
189 setattr_copy(&nop_mnt_idmap
, inode
, ia
);
191 ia
->ia_valid
= old_ia_valid
;
195 /*****************************************************************************/
197 * try to determine where a shared mapping can be made
199 * - the pages to be mapped must exist
200 * - the pages be physically contiguous in sequence
202 static unsigned long ramfs_nommu_get_unmapped_area(struct file
*file
,
203 unsigned long addr
, unsigned long len
,
204 unsigned long pgoff
, unsigned long flags
)
206 unsigned long maxpages
, lpages
, nr_folios
, loop
, ret
, nr_pages
, pfn
;
207 struct inode
*inode
= file_inode(file
);
208 struct folio_batch fbatch
;
211 /* the mapping mustn't extend beyond the EOF */
212 lpages
= (len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
213 isize
= i_size_read(inode
);
216 maxpages
= (isize
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
217 if (pgoff
>= maxpages
)
220 if (maxpages
- pgoff
< lpages
)
223 /* gang-find the pages */
224 folio_batch_init(&fbatch
);
227 nr_folios
= filemap_get_folios_contig(inode
->i_mapping
, &pgoff
,
234 if (ret
== -ENOSYS
) {
235 ret
= (unsigned long) folio_address(fbatch
.folios
[0]);
236 pfn
= folio_pfn(fbatch
.folios
[0]);
238 /* check the pages for physical adjacency */
239 for (loop
= 0; loop
< nr_folios
; loop
++) {
240 if (pfn
+ nr_pages
!= folio_pfn(fbatch
.folios
[loop
])) {
242 goto out_free
; /* leave if not physical adjacent */
244 nr_pages
+= folio_nr_pages(fbatch
.folios
[loop
]);
245 if (nr_pages
>= lpages
)
246 goto out_free
; /* successfully found desired pages*/
249 if (nr_pages
< lpages
) {
250 folio_batch_release(&fbatch
);
251 goto repeat
; /* loop if pages are missing */
253 /* okay - all conditions fulfilled */
256 folio_batch_release(&fbatch
);
261 /*****************************************************************************/
263 * set up a mapping for shared memory segments
265 static int ramfs_nommu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
267 if (!is_nommu_shared_mapping(vma
->vm_flags
))
271 vma
->vm_ops
= &generic_file_vm_ops
;