4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/types.h>
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
12 #include <linux/proc_fs.h>
15 #include <asm/uaccess.h>
17 #include <asm/pgtable.h>
20 * mem_write isn't really a good idea right now. It needs
21 * to check a lot more: if the process we try to write to
22 * dies in the middle right now, mem_write will overwrite
23 * kernel memory.. This disables it altogether.
25 #define mem_write NULL
27 static int check_range(struct mm_struct
* mm
, unsigned long addr
, int count
)
29 struct vm_area_struct
*vma
;
32 vma
= find_vma(mm
, addr
);
35 if (vma
->vm_start
> addr
)
37 if (!(vma
->vm_flags
& VM_READ
))
39 while ((retval
= vma
->vm_end
- addr
) < count
) {
40 struct vm_area_struct
*next
= vma
->vm_next
;
43 if (vma
->vm_end
!= next
->vm_start
)
45 if (!(next
->vm_flags
& VM_READ
))
54 static struct task_struct
* get_task(int pid
)
56 struct task_struct
* tsk
= current
;
58 if (pid
!= tsk
->pid
) {
59 tsk
= find_task_by_pid(pid
);
61 /* Allow accesses only under the same circumstances
62 * that we would allow ptrace to work.
65 if (!(tsk
->flags
& PF_PTRACED
)
66 || tsk
->state
!= TASK_STOPPED
67 || tsk
->p_pptr
!= current
)
74 static ssize_t
mem_read(struct file
* file
, char * buf
,
75 size_t count
, loff_t
*ppos
)
77 struct inode
* inode
= file
->f_dentry
->d_inode
;
82 struct task_struct
* tsk
;
87 read_lock(&tasklist_lock
);
88 tsk
= get_task(inode
->i_ino
>> 16);
89 read_unlock(&tasklist_lock
); /* FIXME: This should really be done only afetr not using tsk any more!!! */
93 scount
= check_range(tsk
->mm
, addr
, count
);
98 if (signal_pending(current
))
100 page_dir
= pgd_offset(tsk
->mm
,addr
);
101 if (pgd_none(*page_dir
))
103 if (pgd_bad(*page_dir
)) {
104 printk("Bad page dir entry %08lx\n", pgd_val(*page_dir
));
108 page_middle
= pmd_offset(page_dir
,addr
);
109 if (pmd_none(*page_middle
))
111 if (pmd_bad(*page_middle
)) {
112 printk("Bad page middle entry %08lx\n", pmd_val(*page_middle
));
113 pmd_clear(page_middle
);
116 pte
= *pte_offset(page_middle
,addr
);
117 if (!pte_present(pte
))
119 page
= (char *) pte_page(pte
) + (addr
& ~PAGE_MASK
);
120 i
= PAGE_SIZE
-(addr
& ~PAGE_MASK
);
123 copy_to_user(tmp
, page
, i
);
134 static ssize_t
mem_write(struct file
* file
, char * buf
,
135 size_t count
, loff_t
*ppos
)
137 struct inode
* inode
= file
->f_dentry
->d_inode
;
142 struct task_struct
* tsk
;
148 tsk
= get_task(inode
->i_ino
>> 16);
153 if (signal_pending(current
))
155 page_dir
= pgd_offset(tsk
,addr
);
156 if (pgd_none(*page_dir
))
158 if (pgd_bad(*page_dir
)) {
159 printk("Bad page dir entry %08lx\n", pgd_val(*page_dir
));
163 page_middle
= pmd_offset(page_dir
,addr
);
164 if (pmd_none(*page_middle
))
166 if (pmd_bad(*page_middle
)) {
167 printk("Bad page middle entry %08lx\n", pmd_val(*page_middle
));
168 pmd_clear(page_middle
);
171 pte
= *pte_offset(page_middle
,addr
);
172 if (!pte_present(pte
))
176 page
= (char *) pte_page(pte
) + (addr
& ~PAGE_MASK
);
177 i
= PAGE_SIZE
-(addr
& ~PAGE_MASK
);
180 copy_from_user(page
, tmp
, i
);
188 if (signal_pending(current
))
195 static long long mem_lseek(struct file
* file
, long long offset
, int orig
)
199 file
->f_pos
= offset
;
202 file
->f_pos
+= offset
;
210 * This isn't really reliable by any means..
212 int mem_mmap(struct file
* file
, struct vm_area_struct
* vma
)
214 struct task_struct
*tsk
;
215 pgd_t
*src_dir
, *dest_dir
;
216 pmd_t
*src_middle
, *dest_middle
;
217 pte_t
*src_table
, *dest_table
;
218 unsigned long stmp
, dtmp
, mapnr
;
219 struct vm_area_struct
*src_vma
= NULL
;
220 struct inode
*inode
= file
->f_dentry
->d_inode
;
222 /* Get the source's task information */
224 tsk
= get_task(inode
->i_ino
>> 16);
229 /* Ensure that we have a valid source area. (Has to be mmap'ed and
230 have valid page information.) We can't map shared memory at the
231 moment because working out the vm_area_struct & nattach stuff isn't
234 src_vma
= tsk
->mm
->mmap
;
235 stmp
= vma
->vm_offset
;
236 while (stmp
< vma
->vm_offset
+ (vma
->vm_end
- vma
->vm_start
)) {
237 while (src_vma
&& stmp
> src_vma
->vm_end
)
238 src_vma
= src_vma
->vm_next
;
239 if (!src_vma
|| (src_vma
->vm_flags
& VM_SHM
))
242 src_dir
= pgd_offset(tsk
->mm
, stmp
);
243 if (pgd_none(*src_dir
))
245 if (pgd_bad(*src_dir
)) {
246 printk("Bad source page dir entry %08lx\n", pgd_val(*src_dir
));
249 src_middle
= pmd_offset(src_dir
, stmp
);
250 if (pmd_none(*src_middle
))
252 if (pmd_bad(*src_middle
)) {
253 printk("Bad source page middle entry %08lx\n", pmd_val(*src_middle
));
256 src_table
= pte_offset(src_middle
, stmp
);
257 if (pte_none(*src_table
))
260 if (stmp
< src_vma
->vm_start
) {
261 if (!(src_vma
->vm_flags
& VM_GROWSDOWN
))
263 if (src_vma
->vm_end
- stmp
> current
->rlim
[RLIMIT_STACK
].rlim_cur
)
269 src_vma
= tsk
->mm
->mmap
;
270 stmp
= vma
->vm_offset
;
271 dtmp
= vma
->vm_start
;
273 flush_cache_range(vma
->vm_mm
, vma
->vm_start
, vma
->vm_end
);
274 flush_cache_range(src_vma
->vm_mm
, src_vma
->vm_start
, src_vma
->vm_end
);
275 while (dtmp
< vma
->vm_end
) {
276 while (src_vma
&& stmp
> src_vma
->vm_end
)
277 src_vma
= src_vma
->vm_next
;
279 src_dir
= pgd_offset(tsk
->mm
, stmp
);
280 src_middle
= pmd_offset(src_dir
, stmp
);
281 src_table
= pte_offset(src_middle
, stmp
);
283 dest_dir
= pgd_offset(current
->mm
, dtmp
);
284 dest_middle
= pmd_alloc(dest_dir
, dtmp
);
287 dest_table
= pte_alloc(dest_middle
, dtmp
);
291 if (!pte_present(*src_table
))
292 handle_mm_fault(tsk
, src_vma
, stmp
, 1);
294 if ((vma
->vm_flags
& VM_WRITE
) && !pte_write(*src_table
))
295 handle_mm_fault(tsk
, src_vma
, stmp
, 1);
297 set_pte(src_table
, pte_mkdirty(*src_table
));
298 set_pte(dest_table
, *src_table
);
299 mapnr
= MAP_NR(pte_page(*src_table
));
300 if (mapnr
< max_mapnr
)
301 get_page(mem_map
+ MAP_NR(pte_page(*src_table
)));
307 flush_tlb_range(vma
->vm_mm
, vma
->vm_start
, vma
->vm_end
);
308 flush_tlb_range(src_vma
->vm_mm
, src_vma
->vm_start
, src_vma
->vm_end
);
312 static struct file_operations proc_mem_operations
= {
316 NULL
, /* mem_readdir */
318 NULL
, /* mem_ioctl */
320 NULL
, /* no special open code */
322 NULL
, /* no special release code */
323 NULL
/* can't fsync */
326 struct inode_operations proc_mem_inode_operations
= {
327 &proc_mem_operations
, /* default base directory file-ops */
338 NULL
, /* follow_link */
340 NULL
, /* writepage */
343 proc_permission
/* permission */