2 * linux/mm/filemap_xip.c
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
12 #include <linux/pagemap.h>
13 #include <linux/module.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/sched.h>
18 #include <linux/seqlock.h>
19 #include <linux/mutex.h>
20 #include <asm/tlbflush.h>
24 * We do use our own empty page to avoid interference with other users
25 * of ZERO_PAGE(), such as /dev/zero
27 static DEFINE_MUTEX(xip_sparse_mutex
);
28 static seqcount_t xip_sparse_seq
= SEQCNT_ZERO
;
29 static struct page
*__xip_sparse_page
;
31 /* called under xip_sparse_mutex */
32 static struct page
*xip_sparse_page(void)
34 if (!__xip_sparse_page
) {
35 struct page
*page
= alloc_page(GFP_HIGHUSER
| __GFP_ZERO
);
38 __xip_sparse_page
= page
;
40 return __xip_sparse_page
;
44 * This is a file read routine for execute in place files, and uses
45 * the mapping->a_ops->get_xip_mem() function for the actual low-level
48 * Note the struct file* is not used at all. It may be NULL.
51 do_xip_mapping_read(struct address_space
*mapping
,
52 struct file_ra_state
*_ra
,
58 struct inode
*inode
= mapping
->host
;
59 pgoff_t index
, end_index
;
62 size_t copied
= 0, error
= 0;
64 BUG_ON(!mapping
->a_ops
->get_xip_mem
);
67 index
= pos
>> PAGE_CACHE_SHIFT
;
68 offset
= pos
& ~PAGE_CACHE_MASK
;
70 isize
= i_size_read(inode
);
74 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
76 unsigned long nr
, left
;
78 unsigned long xip_pfn
;
81 /* nr is the maximum number of bytes to copy from this page */
83 if (index
>= end_index
) {
84 if (index
> end_index
)
86 nr
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
92 if (nr
> len
- copied
)
95 error
= mapping
->a_ops
->get_xip_mem(mapping
, index
, 0,
97 if (unlikely(error
)) {
98 if (error
== -ENODATA
) {
105 /* If users can be writing to this page using arbitrary
106 * virtual addresses, take care about potential aliasing
107 * before reading the page on the kernel side.
109 if (mapping_writably_mapped(mapping
))
110 /* address based flush */ ;
113 * Ok, we have the mem, so now we can copy it to user space...
115 * The actor routine returns how many bytes were actually used..
116 * NOTE! This may not be the same as how much of a user buffer
117 * we filled up (we may be padding etc), so we can only update
118 * "pos" here (the actor routine has to update the user buffer
119 * pointers and the remaining count).
122 left
= __copy_to_user(buf
+copied
, xip_mem
+offset
, nr
);
124 left
= __clear_user(buf
+ copied
, nr
);
131 copied
+= (nr
- left
);
132 offset
+= (nr
- left
);
133 index
+= offset
>> PAGE_CACHE_SHIFT
;
134 offset
&= ~PAGE_CACHE_MASK
;
135 } while (copied
< len
);
138 *ppos
= pos
+ copied
;
142 return (copied
? copied
: error
);
146 xip_file_read(struct file
*filp
, char __user
*buf
, size_t len
, loff_t
*ppos
)
148 if (!access_ok(VERIFY_WRITE
, buf
, len
))
151 return do_xip_mapping_read(filp
->f_mapping
, &filp
->f_ra
, filp
,
154 EXPORT_SYMBOL_GPL(xip_file_read
);
157 * __xip_unmap is invoked from xip_unmap and
160 * This function walks all vmas of the address_space and unmaps the
161 * __xip_sparse_page when found at pgoff.
164 __xip_unmap (struct address_space
* mapping
,
167 struct vm_area_struct
*vma
;
168 struct mm_struct
*mm
;
169 struct prio_tree_iter iter
;
170 unsigned long address
;
178 count
= read_seqcount_begin(&xip_sparse_seq
);
180 page
= __xip_sparse_page
;
185 spin_lock(&mapping
->i_mmap_lock
);
186 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
188 address
= vma
->vm_start
+
189 ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
190 BUG_ON(address
< vma
->vm_start
|| address
>= vma
->vm_end
);
191 pte
= page_check_address(page
, mm
, address
, &ptl
, 1);
193 /* Nuke the page table entry. */
194 flush_cache_page(vma
, address
, pte_pfn(*pte
));
195 pteval
= ptep_clear_flush_notify(vma
, address
, pte
);
196 page_remove_rmap(page
);
197 dec_mm_counter(mm
, file_rss
);
198 BUG_ON(pte_dirty(pteval
));
199 pte_unmap_unlock(pte
, ptl
);
200 page_cache_release(page
);
203 spin_unlock(&mapping
->i_mmap_lock
);
206 mutex_unlock(&xip_sparse_mutex
);
207 } else if (read_seqcount_retry(&xip_sparse_seq
, count
)) {
208 mutex_lock(&xip_sparse_mutex
);
215 * xip_fault() is invoked via the vma operations vector for a
216 * mapped memory region to read in file data during a page fault.
218 * This function is derived from filemap_fault, but used for execute in place
220 static int xip_file_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
222 struct file
*file
= vma
->vm_file
;
223 struct address_space
*mapping
= file
->f_mapping
;
224 struct inode
*inode
= mapping
->host
;
227 unsigned long xip_pfn
;
231 /* XXX: are VM_FAULT_ codes OK? */
233 size
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
234 if (vmf
->pgoff
>= size
)
235 return VM_FAULT_SIGBUS
;
237 error
= mapping
->a_ops
->get_xip_mem(mapping
, vmf
->pgoff
, 0,
241 if (error
!= -ENODATA
)
245 if ((vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
)) &&
246 (vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
)) &&
247 (!(mapping
->host
->i_sb
->s_flags
& MS_RDONLY
))) {
250 /* maybe shared writable, allocate new block */
251 mutex_lock(&xip_sparse_mutex
);
252 error
= mapping
->a_ops
->get_xip_mem(mapping
, vmf
->pgoff
, 1,
254 mutex_unlock(&xip_sparse_mutex
);
256 return VM_FAULT_SIGBUS
;
257 /* unmap sparse mappings at pgoff from all other vmas */
258 __xip_unmap(mapping
, vmf
->pgoff
);
261 err
= vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
,
266 return VM_FAULT_NOPAGE
;
268 int err
, ret
= VM_FAULT_OOM
;
270 mutex_lock(&xip_sparse_mutex
);
271 write_seqcount_begin(&xip_sparse_seq
);
272 error
= mapping
->a_ops
->get_xip_mem(mapping
, vmf
->pgoff
, 0,
274 if (unlikely(!error
)) {
275 write_seqcount_end(&xip_sparse_seq
);
276 mutex_unlock(&xip_sparse_mutex
);
279 if (error
!= -ENODATA
)
281 /* not shared and writable, use xip_sparse_page() */
282 page
= xip_sparse_page();
285 err
= vm_insert_page(vma
, (unsigned long)vmf
->virtual_address
,
290 ret
= VM_FAULT_NOPAGE
;
292 write_seqcount_end(&xip_sparse_seq
);
293 mutex_unlock(&xip_sparse_mutex
);
299 static const struct vm_operations_struct xip_file_vm_ops
= {
300 .fault
= xip_file_fault
,
303 int xip_file_mmap(struct file
* file
, struct vm_area_struct
* vma
)
305 BUG_ON(!file
->f_mapping
->a_ops
->get_xip_mem
);
308 vma
->vm_ops
= &xip_file_vm_ops
;
309 vma
->vm_flags
|= VM_CAN_NONLINEAR
| VM_MIXEDMAP
;
312 EXPORT_SYMBOL_GPL(xip_file_mmap
);
315 __xip_file_write(struct file
*filp
, const char __user
*buf
,
316 size_t count
, loff_t pos
, loff_t
*ppos
)
318 struct address_space
* mapping
= filp
->f_mapping
;
319 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
320 struct inode
*inode
= mapping
->host
;
325 BUG_ON(!mapping
->a_ops
->get_xip_mem
);
329 unsigned long offset
;
332 unsigned long xip_pfn
;
334 offset
= (pos
& (PAGE_CACHE_SIZE
-1)); /* Within page */
335 index
= pos
>> PAGE_CACHE_SHIFT
;
336 bytes
= PAGE_CACHE_SIZE
- offset
;
340 status
= a_ops
->get_xip_mem(mapping
, index
, 0,
342 if (status
== -ENODATA
) {
343 /* we allocate a new page unmap it */
344 mutex_lock(&xip_sparse_mutex
);
345 status
= a_ops
->get_xip_mem(mapping
, index
, 1,
347 mutex_unlock(&xip_sparse_mutex
);
349 /* unmap page at pgoff from all other vmas */
350 __xip_unmap(mapping
, index
);
357 __copy_from_user_nocache(xip_mem
+ offset
, buf
, bytes
);
359 if (likely(copied
> 0)) {
369 if (unlikely(copied
!= bytes
))
377 * No need to use i_size_read() here, the i_size
378 * cannot change under us because we hold i_mutex.
380 if (pos
> inode
->i_size
) {
381 i_size_write(inode
, pos
);
382 mark_inode_dirty(inode
);
385 return written
? written
: status
;
389 xip_file_write(struct file
*filp
, const char __user
*buf
, size_t len
,
392 struct address_space
*mapping
= filp
->f_mapping
;
393 struct inode
*inode
= mapping
->host
;
398 mutex_lock(&inode
->i_mutex
);
400 if (!access_ok(VERIFY_READ
, buf
, len
)) {
408 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
410 /* We can write back this queue in page reclaim */
411 current
->backing_dev_info
= mapping
->backing_dev_info
;
413 ret
= generic_write_checks(filp
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
419 ret
= file_remove_suid(filp
);
423 file_update_time(filp
);
425 ret
= __xip_file_write (filp
, buf
, count
, pos
, ppos
);
428 current
->backing_dev_info
= NULL
;
430 mutex_unlock(&inode
->i_mutex
);
433 EXPORT_SYMBOL_GPL(xip_file_write
);
436 * truncate a page used for execute in place
437 * functionality is analog to block_truncate_page but does use get_xip_mem
438 * to get the page instead of page cache
441 xip_truncate_page(struct address_space
*mapping
, loff_t from
)
443 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
444 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
448 unsigned long xip_pfn
;
451 BUG_ON(!mapping
->a_ops
->get_xip_mem
);
453 blocksize
= 1 << mapping
->host
->i_blkbits
;
454 length
= offset
& (blocksize
- 1);
456 /* Block boundary? Nothing to do */
460 length
= blocksize
- length
;
462 err
= mapping
->a_ops
->get_xip_mem(mapping
, index
, 0,
466 /* Hole? No need to truncate */
471 memset(xip_mem
+ offset
, 0, length
);
474 EXPORT_SYMBOL_GPL(xip_truncate_page
);