4 * (C) Copyright 1995 Linus Torvalds
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/shm.h>
12 #include <linux/errno.h>
13 #include <linux/mman.h>
14 #include <linux/string.h>
15 #include <linux/smp.h>
16 #include <linux/smp_lock.h>
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <asm/pgtable.h>
22 static inline int mlock_fixup_all(struct vm_area_struct
* vma
, int newflags
)
24 vma
->vm_flags
= newflags
;
28 static inline int mlock_fixup_start(struct vm_area_struct
* vma
,
29 unsigned long end
, int newflags
)
31 struct vm_area_struct
* n
;
33 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
39 vma
->vm_offset
+= vma
->vm_start
- n
->vm_start
;
40 n
->vm_flags
= newflags
;
42 n
->vm_file
->f_count
++;
43 if (n
->vm_ops
&& n
->vm_ops
->open
)
45 insert_vm_struct(current
->mm
, n
);
49 static inline int mlock_fixup_end(struct vm_area_struct
* vma
,
50 unsigned long start
, int newflags
)
52 struct vm_area_struct
* n
;
54 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
60 n
->vm_offset
+= n
->vm_start
- vma
->vm_start
;
61 n
->vm_flags
= newflags
;
63 n
->vm_file
->f_count
++;
64 if (n
->vm_ops
&& n
->vm_ops
->open
)
66 insert_vm_struct(current
->mm
, n
);
70 static inline int mlock_fixup_middle(struct vm_area_struct
* vma
,
71 unsigned long start
, unsigned long end
, int newflags
)
73 struct vm_area_struct
* left
, * right
;
75 left
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
78 right
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
80 kmem_cache_free(vm_area_cachep
, left
);
86 vma
->vm_start
= start
;
88 right
->vm_start
= end
;
89 vma
->vm_offset
+= vma
->vm_start
- left
->vm_start
;
90 right
->vm_offset
+= right
->vm_start
- left
->vm_start
;
91 vma
->vm_flags
= newflags
;
93 vma
->vm_file
->f_count
+= 2;
95 if (vma
->vm_ops
&& vma
->vm_ops
->open
) {
96 vma
->vm_ops
->open(left
);
97 vma
->vm_ops
->open(right
);
99 insert_vm_struct(current
->mm
, left
);
100 insert_vm_struct(current
->mm
, right
);
104 static int mlock_fixup(struct vm_area_struct
* vma
,
105 unsigned long start
, unsigned long end
, unsigned int newflags
)
109 if (newflags
== vma
->vm_flags
)
112 if (start
== vma
->vm_start
) {
113 if (end
== vma
->vm_end
)
114 retval
= mlock_fixup_all(vma
, newflags
);
116 retval
= mlock_fixup_start(vma
, end
, newflags
);
118 if (end
== vma
->vm_end
)
119 retval
= mlock_fixup_end(vma
, start
, newflags
);
121 retval
= mlock_fixup_middle(vma
, start
, end
, newflags
);
124 /* keep track of amount of locked VM */
125 pages
= (end
- start
) >> PAGE_SHIFT
;
126 if (!(newflags
& VM_LOCKED
))
128 vma
->vm_mm
->locked_vm
+= pages
;
130 if (newflags
& VM_LOCKED
)
131 while (start
< end
) {
133 get_user(c
,(char *) start
);
134 __asm__
__volatile__("": :"r" (c
));
141 static int do_mlock(unsigned long start
, size_t len
, int on
)
143 unsigned long nstart
, end
, tmp
;
144 struct vm_area_struct
* vma
, * next
;
149 len
= (len
+ ~PAGE_MASK
) & PAGE_MASK
;
155 vma
= find_vma(current
->mm
, start
);
156 if (!vma
|| vma
->vm_start
> start
)
159 for (nstart
= start
; ; ) {
160 unsigned int newflags
;
162 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
164 newflags
= vma
->vm_flags
| VM_LOCKED
;
166 newflags
&= ~VM_LOCKED
;
168 if (vma
->vm_end
>= end
) {
169 error
= mlock_fixup(vma
, nstart
, end
, newflags
);
175 error
= mlock_fixup(vma
, nstart
, tmp
, newflags
);
180 if (!vma
|| vma
->vm_start
!= nstart
) {
185 merge_segments(current
->mm
, start
, end
);
189 asmlinkage
int sys_mlock(unsigned long start
, size_t len
)
191 unsigned long locked
;
192 unsigned long lock_limit
;
196 len
= (len
+ (start
& ~PAGE_MASK
) + ~PAGE_MASK
) & PAGE_MASK
;
199 locked
= len
>> PAGE_SHIFT
;
200 locked
+= current
->mm
->locked_vm
;
202 lock_limit
= current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
203 lock_limit
>>= PAGE_SHIFT
;
205 /* check against resource limits */
206 if (locked
> lock_limit
)
209 /* we may lock at most half of physical memory... */
210 /* (this check is pretty bogus, but doesn't hurt) */
211 if (locked
> num_physpages
/2)
214 error
= do_mlock(start
, len
, 1);
220 asmlinkage
int sys_munlock(unsigned long start
, size_t len
)
225 len
= (len
+ (start
& ~PAGE_MASK
) + ~PAGE_MASK
) & PAGE_MASK
;
227 ret
= do_mlock(start
, len
, 0);
232 static int do_mlockall(int flags
)
235 unsigned int def_flags
;
236 struct vm_area_struct
* vma
;
242 if (flags
& MCL_FUTURE
)
243 def_flags
= VM_LOCKED
;
244 current
->mm
->def_flags
= def_flags
;
247 for (vma
= current
->mm
->mmap
; vma
; vma
= vma
->vm_next
) {
248 unsigned int newflags
;
250 newflags
= vma
->vm_flags
| VM_LOCKED
;
251 if (!(flags
& MCL_CURRENT
))
252 newflags
&= ~VM_LOCKED
;
253 error
= mlock_fixup(vma
, vma
->vm_start
, vma
->vm_end
, newflags
);
257 merge_segments(current
->mm
, 0, TASK_SIZE
);
261 asmlinkage
int sys_mlockall(int flags
)
263 unsigned long lock_limit
;
267 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
270 lock_limit
= current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
271 lock_limit
>>= PAGE_SHIFT
;
274 if (current
->mm
->total_vm
> lock_limit
)
277 /* we may lock at most half of physical memory... */
278 /* (this check is pretty bogus, but doesn't hurt) */
279 if (current
->mm
->total_vm
> num_physpages
/2)
282 ret
= do_mlockall(flags
);
288 asmlinkage
int sys_munlockall(void)
293 ret
= do_mlockall(0);