2 * linux/mm/filemap_xip.c
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
12 #include <linux/pagemap.h>
13 #include <linux/module.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <linux/sched.h>
17 #include <asm/tlbflush.h>
21 * We do use our own empty page to avoid interference with other users
22 * of ZERO_PAGE(), such as /dev/zero
24 static struct page
*__xip_sparse_page
;
26 static struct page
*xip_sparse_page(void)
28 if (!__xip_sparse_page
) {
29 struct page
*page
= alloc_page(GFP_HIGHUSER
| __GFP_ZERO
);
32 static DEFINE_SPINLOCK(xip_alloc_lock
);
33 spin_lock(&xip_alloc_lock
);
34 if (!__xip_sparse_page
)
35 __xip_sparse_page
= page
;
38 spin_unlock(&xip_alloc_lock
);
41 return __xip_sparse_page
;
45 * This is a file read routine for execute in place files, and uses
46 * the mapping->a_ops->get_xip_mem() function for the actual low-level
49 * Note the struct file* is not used at all. It may be NULL.
52 do_xip_mapping_read(struct address_space
*mapping
,
53 struct file_ra_state
*_ra
,
59 struct inode
*inode
= mapping
->host
;
60 pgoff_t index
, end_index
;
63 size_t copied
= 0, error
= 0;
65 BUG_ON(!mapping
->a_ops
->get_xip_mem
);
68 index
= pos
>> PAGE_CACHE_SHIFT
;
69 offset
= pos
& ~PAGE_CACHE_MASK
;
71 isize
= i_size_read(inode
);
75 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
77 unsigned long nr
, left
;
79 unsigned long xip_pfn
;
82 /* nr is the maximum number of bytes to copy from this page */
84 if (index
>= end_index
) {
85 if (index
> end_index
)
87 nr
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
96 error
= mapping
->a_ops
->get_xip_mem(mapping
, index
, 0,
98 if (unlikely(error
)) {
99 if (error
== -ENODATA
) {
106 /* If users can be writing to this page using arbitrary
107 * virtual addresses, take care about potential aliasing
108 * before reading the page on the kernel side.
110 if (mapping_writably_mapped(mapping
))
111 /* address based flush */ ;
114 * Ok, we have the mem, so now we can copy it to user space...
116 * The actor routine returns how many bytes were actually used..
117 * NOTE! This may not be the same as how much of a user buffer
118 * we filled up (we may be padding etc), so we can only update
119 * "pos" here (the actor routine has to update the user buffer
120 * pointers and the remaining count).
123 left
= __copy_to_user(buf
+copied
, xip_mem
+offset
, nr
);
125 left
= __clear_user(buf
+ copied
, nr
);
132 copied
+= (nr
- left
);
133 offset
+= (nr
- left
);
134 index
+= offset
>> PAGE_CACHE_SHIFT
;
135 offset
&= ~PAGE_CACHE_MASK
;
136 } while (copied
< len
);
139 *ppos
= pos
+ copied
;
143 return (copied
? copied
: error
);
147 xip_file_read(struct file
*filp
, char __user
*buf
, size_t len
, loff_t
*ppos
)
149 if (!access_ok(VERIFY_WRITE
, buf
, len
))
152 return do_xip_mapping_read(filp
->f_mapping
, &filp
->f_ra
, filp
,
155 EXPORT_SYMBOL_GPL(xip_file_read
);
158 * __xip_unmap is invoked from xip_unmap and
161 * This function walks all vmas of the address_space and unmaps the
162 * __xip_sparse_page when found at pgoff.
165 __xip_unmap (struct address_space
* mapping
,
168 struct vm_area_struct
*vma
;
169 struct mm_struct
*mm
;
170 struct prio_tree_iter iter
;
171 unsigned long address
;
177 page
= __xip_sparse_page
;
181 spin_lock(&mapping
->i_mmap_lock
);
182 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
184 address
= vma
->vm_start
+
185 ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
186 BUG_ON(address
< vma
->vm_start
|| address
>= vma
->vm_end
);
187 pte
= page_check_address(page
, mm
, address
, &ptl
);
189 /* Nuke the page table entry. */
190 flush_cache_page(vma
, address
, pte_pfn(*pte
));
191 pteval
= ptep_clear_flush(vma
, address
, pte
);
192 page_remove_rmap(page
, vma
);
193 dec_mm_counter(mm
, file_rss
);
194 BUG_ON(pte_dirty(pteval
));
195 pte_unmap_unlock(pte
, ptl
);
196 page_cache_release(page
);
199 spin_unlock(&mapping
->i_mmap_lock
);
203 * xip_fault() is invoked via the vma operations vector for a
204 * mapped memory region to read in file data during a page fault.
206 * This function is derived from filemap_fault, but used for execute in place
208 static int xip_file_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
210 struct file
*file
= vma
->vm_file
;
211 struct address_space
*mapping
= file
->f_mapping
;
212 struct inode
*inode
= mapping
->host
;
215 unsigned long xip_pfn
;
219 /* XXX: are VM_FAULT_ codes OK? */
221 size
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
222 if (vmf
->pgoff
>= size
)
223 return VM_FAULT_SIGBUS
;
225 error
= mapping
->a_ops
->get_xip_mem(mapping
, vmf
->pgoff
, 0,
229 if (error
!= -ENODATA
)
233 if ((vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
)) &&
234 (vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
)) &&
235 (!(mapping
->host
->i_sb
->s_flags
& MS_RDONLY
))) {
238 /* maybe shared writable, allocate new block */
239 error
= mapping
->a_ops
->get_xip_mem(mapping
, vmf
->pgoff
, 1,
242 return VM_FAULT_SIGBUS
;
243 /* unmap sparse mappings at pgoff from all other vmas */
244 __xip_unmap(mapping
, vmf
->pgoff
);
247 err
= vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
,
252 return VM_FAULT_NOPAGE
;
254 /* not shared and writable, use xip_sparse_page() */
255 page
= xip_sparse_page();
259 page_cache_get(page
);
265 static struct vm_operations_struct xip_file_vm_ops
= {
266 .fault
= xip_file_fault
,
269 int xip_file_mmap(struct file
* file
, struct vm_area_struct
* vma
)
271 BUG_ON(!file
->f_mapping
->a_ops
->get_xip_mem
);
274 vma
->vm_ops
= &xip_file_vm_ops
;
275 vma
->vm_flags
|= VM_CAN_NONLINEAR
| VM_MIXEDMAP
;
278 EXPORT_SYMBOL_GPL(xip_file_mmap
);
281 __xip_file_write(struct file
*filp
, const char __user
*buf
,
282 size_t count
, loff_t pos
, loff_t
*ppos
)
284 struct address_space
* mapping
= filp
->f_mapping
;
285 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
286 struct inode
*inode
= mapping
->host
;
291 BUG_ON(!mapping
->a_ops
->get_xip_mem
);
295 unsigned long offset
;
298 unsigned long xip_pfn
;
300 offset
= (pos
& (PAGE_CACHE_SIZE
-1)); /* Within page */
301 index
= pos
>> PAGE_CACHE_SHIFT
;
302 bytes
= PAGE_CACHE_SIZE
- offset
;
306 status
= a_ops
->get_xip_mem(mapping
, index
, 0,
308 if (status
== -ENODATA
) {
309 /* we allocate a new page unmap it */
310 status
= a_ops
->get_xip_mem(mapping
, index
, 1,
313 /* unmap page at pgoff from all other vmas */
314 __xip_unmap(mapping
, index
);
321 __copy_from_user_nocache(xip_mem
+ offset
, buf
, bytes
);
323 if (likely(copied
> 0)) {
333 if (unlikely(copied
!= bytes
))
341 * No need to use i_size_read() here, the i_size
342 * cannot change under us because we hold i_mutex.
344 if (pos
> inode
->i_size
) {
345 i_size_write(inode
, pos
);
346 mark_inode_dirty(inode
);
349 return written
? written
: status
;
353 xip_file_write(struct file
*filp
, const char __user
*buf
, size_t len
,
356 struct address_space
*mapping
= filp
->f_mapping
;
357 struct inode
*inode
= mapping
->host
;
362 mutex_lock(&inode
->i_mutex
);
364 if (!access_ok(VERIFY_READ
, buf
, len
)) {
372 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
374 /* We can write back this queue in page reclaim */
375 current
->backing_dev_info
= mapping
->backing_dev_info
;
377 ret
= generic_write_checks(filp
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
383 ret
= remove_suid(filp
->f_path
.dentry
);
387 file_update_time(filp
);
389 ret
= __xip_file_write (filp
, buf
, count
, pos
, ppos
);
392 current
->backing_dev_info
= NULL
;
394 mutex_unlock(&inode
->i_mutex
);
397 EXPORT_SYMBOL_GPL(xip_file_write
);
400 * truncate a page used for execute in place
401 * functionality is analog to block_truncate_page but does use get_xip_mem
402 * to get the page instead of page cache
405 xip_truncate_page(struct address_space
*mapping
, loff_t from
)
407 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
408 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
412 unsigned long xip_pfn
;
415 BUG_ON(!mapping
->a_ops
->get_xip_mem
);
417 blocksize
= 1 << mapping
->host
->i_blkbits
;
418 length
= offset
& (blocksize
- 1);
420 /* Block boundary? Nothing to do */
424 length
= blocksize
- length
;
426 err
= mapping
->a_ops
->get_xip_mem(mapping
, index
, 0,
430 /* Hole? No need to truncate */
435 memset(xip_mem
+ offset
, 0, length
);
438 EXPORT_SYMBOL_GPL(xip_truncate_page
);