2 * linux/mm/filemap_xip.c
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
12 #include <linux/pagemap.h>
13 #include <linux/module.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <asm/tlbflush.h>
20 * This is a file read routine for execute in place files, and uses
21 * the mapping->a_ops->get_xip_page() function for the actual low-level
24 * Note the struct file* is not used at all. It may be NULL.
27 do_xip_mapping_read(struct address_space
*mapping
,
28 struct file_ra_state
*_ra
,
31 read_descriptor_t
*desc
,
34 struct inode
*inode
= mapping
->host
;
35 unsigned long index
, end_index
, offset
;
38 BUG_ON(!mapping
->a_ops
->get_xip_page
);
40 index
= *ppos
>> PAGE_CACHE_SHIFT
;
41 offset
= *ppos
& ~PAGE_CACHE_MASK
;
43 isize
= i_size_read(inode
);
47 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
50 unsigned long nr
, ret
;
52 /* nr is the maximum number of bytes to copy from this page */
54 if (index
>= end_index
) {
55 if (index
> end_index
)
57 nr
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
64 page
= mapping
->a_ops
->get_xip_page(mapping
,
65 index
*(PAGE_SIZE
/512), 0);
68 if (unlikely(IS_ERR(page
))) {
69 if (PTR_ERR(page
) == -ENODATA
) {
73 desc
->error
= PTR_ERR(page
);
78 /* If users can be writing to this page using arbitrary
79 * virtual addresses, take care about potential aliasing
80 * before reading the page on the kernel side.
82 if (mapping_writably_mapped(mapping
))
83 flush_dcache_page(page
);
86 * Ok, we have the page, so now we can copy it to user space...
88 * The actor routine returns how many bytes were actually used..
89 * NOTE! This may not be the same as how much of a user buffer
90 * we filled up (we may be padding etc), so we can only update
91 * "pos" here (the actor routine has to update the user buffer
92 * pointers and the remaining count).
94 ret
= actor(desc
, page
, offset
, nr
);
96 index
+= offset
>> PAGE_CACHE_SHIFT
;
97 offset
&= ~PAGE_CACHE_MASK
;
99 if (ret
== nr
&& desc
->count
)
104 /* Did not get the page. Report it */
110 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
116 xip_file_read(struct file
*filp
, char __user
*buf
, size_t len
, loff_t
*ppos
)
118 read_descriptor_t desc
;
120 if (!access_ok(VERIFY_WRITE
, buf
, len
))
128 do_xip_mapping_read(filp
->f_mapping
, &filp
->f_ra
, filp
,
129 ppos
, &desc
, file_read_actor
);
136 EXPORT_SYMBOL_GPL(xip_file_read
);
139 xip_file_sendfile(struct file
*in_file
, loff_t
*ppos
,
140 size_t count
, read_actor_t actor
, void *target
)
142 read_descriptor_t desc
;
149 desc
.arg
.data
= target
;
152 do_xip_mapping_read(in_file
->f_mapping
, &in_file
->f_ra
, in_file
,
158 EXPORT_SYMBOL_GPL(xip_file_sendfile
);
161 * __xip_unmap is invoked from xip_unmap and
164 * This function walks all vmas of the address_space and unmaps the
165 * ZERO_PAGE when found at pgoff. Should it go in rmap.c?
168 __xip_unmap (struct address_space
* mapping
,
171 struct vm_area_struct
*vma
;
172 struct mm_struct
*mm
;
173 struct prio_tree_iter iter
;
174 unsigned long address
;
180 spin_lock(&mapping
->i_mmap_lock
);
181 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
183 address
= vma
->vm_start
+
184 ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
185 BUG_ON(address
< vma
->vm_start
|| address
>= vma
->vm_end
);
186 page
= ZERO_PAGE(address
);
187 pte
= page_check_address(page
, mm
, address
, &ptl
);
189 /* Nuke the page table entry. */
190 flush_cache_page(vma
, address
, pte_pfn(*pte
));
191 pteval
= ptep_clear_flush(vma
, address
, pte
);
192 page_remove_rmap(page
);
193 dec_mm_counter(mm
, file_rss
);
194 BUG_ON(pte_dirty(pteval
));
195 pte_unmap_unlock(pte
, ptl
);
196 page_cache_release(page
);
199 spin_unlock(&mapping
->i_mmap_lock
);
203 * xip_nopage() is invoked via the vma operations vector for a
204 * mapped memory region to read in file data during a page fault.
206 * This function is derived from filemap_nopage, but used for execute in place
209 xip_file_nopage(struct vm_area_struct
* area
,
210 unsigned long address
,
213 struct file
*file
= area
->vm_file
;
214 struct address_space
*mapping
= file
->f_mapping
;
215 struct inode
*inode
= mapping
->host
;
217 unsigned long size
, pgoff
, endoff
;
219 pgoff
= ((address
- area
->vm_start
) >> PAGE_CACHE_SHIFT
)
221 endoff
= ((area
->vm_end
- area
->vm_start
) >> PAGE_CACHE_SHIFT
)
224 size
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
229 page
= mapping
->a_ops
->get_xip_page(mapping
, pgoff
*(PAGE_SIZE
/512), 0);
233 if (PTR_ERR(page
) != -ENODATA
)
237 if ((area
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
)) &&
238 (area
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
)) &&
239 (!(mapping
->host
->i_sb
->s_flags
& MS_RDONLY
))) {
240 /* maybe shared writable, allocate new block */
241 page
= mapping
->a_ops
->get_xip_page (mapping
,
242 pgoff
*(PAGE_SIZE
/512), 1);
245 /* unmap page at pgoff from all other vmas */
246 __xip_unmap(mapping
, pgoff
);
248 /* not shared and writable, use ZERO_PAGE() */
249 page
= ZERO_PAGE(address
);
253 page_cache_get(page
);
257 static struct vm_operations_struct xip_file_vm_ops
= {
258 .nopage
= xip_file_nopage
,
261 int xip_file_mmap(struct file
* file
, struct vm_area_struct
* vma
)
263 BUG_ON(!file
->f_mapping
->a_ops
->get_xip_page
);
266 vma
->vm_ops
= &xip_file_vm_ops
;
269 EXPORT_SYMBOL_GPL(xip_file_mmap
);
272 __xip_file_write(struct file
*filp
, const char __user
*buf
,
273 size_t count
, loff_t pos
, loff_t
*ppos
)
275 struct address_space
* mapping
= filp
->f_mapping
;
276 struct address_space_operations
*a_ops
= mapping
->a_ops
;
277 struct inode
*inode
= mapping
->host
;
283 BUG_ON(!mapping
->a_ops
->get_xip_page
);
287 unsigned long offset
;
290 offset
= (pos
& (PAGE_CACHE_SIZE
-1)); /* Within page */
291 index
= pos
>> PAGE_CACHE_SHIFT
;
292 bytes
= PAGE_CACHE_SIZE
- offset
;
297 * Bring in the user page that we will copy from _first_.
298 * Otherwise there's a nasty deadlock on copying from the
299 * same page as we're writing to, without it being marked
302 fault_in_pages_readable(buf
, bytes
);
304 page
= a_ops
->get_xip_page(mapping
,
305 index
*(PAGE_SIZE
/512), 0);
306 if (IS_ERR(page
) && (PTR_ERR(page
) == -ENODATA
)) {
307 /* we allocate a new page unmap it */
308 page
= a_ops
->get_xip_page(mapping
,
309 index
*(PAGE_SIZE
/512), 1);
311 /* unmap page at pgoff from all other vmas */
312 __xip_unmap(mapping
, index
);
316 status
= PTR_ERR(page
);
320 copied
= filemap_copy_from_user(page
, offset
, buf
, bytes
);
321 flush_dcache_page(page
);
322 if (likely(copied
> 0)) {
332 if (unlikely(copied
!= bytes
))
340 * No need to use i_size_read() here, the i_size
341 * cannot change under us because we hold i_sem.
343 if (pos
> inode
->i_size
) {
344 i_size_write(inode
, pos
);
345 mark_inode_dirty(inode
);
348 return written
? written
: status
;
352 xip_file_write(struct file
*filp
, const char __user
*buf
, size_t len
,
355 struct address_space
*mapping
= filp
->f_mapping
;
356 struct inode
*inode
= mapping
->host
;
363 if (!access_ok(VERIFY_READ
, buf
, len
)) {
371 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
373 /* We can write back this queue in page reclaim */
374 current
->backing_dev_info
= mapping
->backing_dev_info
;
376 ret
= generic_write_checks(filp
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
382 ret
= remove_suid(filp
->f_dentry
);
386 inode_update_time(inode
, 1);
388 ret
= __xip_file_write (filp
, buf
, count
, pos
, ppos
);
391 current
->backing_dev_info
= NULL
;
396 EXPORT_SYMBOL_GPL(xip_file_write
);
399 * truncate a page used for execute in place
400 * functionality is analog to block_truncate_page but does use get_xip_page
401 * to get the page instead of page cache
404 xip_truncate_page(struct address_space
*mapping
, loff_t from
)
406 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
407 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
413 BUG_ON(!mapping
->a_ops
->get_xip_page
);
415 blocksize
= 1 << mapping
->host
->i_blkbits
;
416 length
= offset
& (blocksize
- 1);
418 /* Block boundary? Nothing to do */
422 length
= blocksize
- length
;
424 page
= mapping
->a_ops
->get_xip_page(mapping
,
425 index
*(PAGE_SIZE
/512), 0);
428 if (unlikely(IS_ERR(page
))) {
429 if (PTR_ERR(page
) == -ENODATA
)
430 /* Hole? No need to truncate */
433 return PTR_ERR(page
);
435 kaddr
= kmap_atomic(page
, KM_USER0
);
436 memset(kaddr
+ offset
, 0, length
);
437 kunmap_atomic(kaddr
, KM_USER0
);
439 flush_dcache_page(page
);
442 EXPORT_SYMBOL_GPL(xip_truncate_page
);