tuntap: switch to use rtnl_dereference()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / mm / util.c
blobc55e26b17d93de77dc069d7f30019f7391e0d93f
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/export.h>
5 #include <linux/err.h>
6 #include <linux/sched.h>
7 #include <linux/security.h>
8 #include <asm/uaccess.h>
10 #include "internal.h"
12 #define CREATE_TRACE_POINTS
13 #include <trace/events/kmem.h>
15 /**
16 * kstrdup - allocate space for and copy an existing string
17 * @s: the string to duplicate
18 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
20 char *kstrdup(const char *s, gfp_t gfp)
22 size_t len;
23 char *buf;
25 if (!s)
26 return NULL;
28 len = strlen(s) + 1;
29 buf = kmalloc_track_caller(len, gfp);
30 if (buf)
31 memcpy(buf, s, len);
32 return buf;
34 EXPORT_SYMBOL(kstrdup);
36 /**
37 * kstrndup - allocate space for and copy an existing string
38 * @s: the string to duplicate
39 * @max: read at most @max chars from @s
40 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
42 char *kstrndup(const char *s, size_t max, gfp_t gfp)
44 size_t len;
45 char *buf;
47 if (!s)
48 return NULL;
50 len = strnlen(s, max);
51 buf = kmalloc_track_caller(len+1, gfp);
52 if (buf) {
53 memcpy(buf, s, len);
54 buf[len] = '\0';
56 return buf;
58 EXPORT_SYMBOL(kstrndup);
60 /**
61 * kmemdup - duplicate region of memory
63 * @src: memory region to duplicate
64 * @len: memory region length
65 * @gfp: GFP mask to use
67 void *kmemdup(const void *src, size_t len, gfp_t gfp)
69 void *p;
71 p = kmalloc_track_caller(len, gfp);
72 if (p)
73 memcpy(p, src, len);
74 return p;
76 EXPORT_SYMBOL(kmemdup);
78 /**
79 * memdup_user - duplicate memory region from user space
81 * @src: source address in user space
82 * @len: number of bytes to copy
84 * Returns an ERR_PTR() on failure.
86 void *memdup_user(const void __user *src, size_t len)
88 void *p;
91 * Always use GFP_KERNEL, since copy_from_user() can sleep and
92 * cause pagefault, which makes it pointless to use GFP_NOFS
93 * or GFP_ATOMIC.
95 p = kmalloc_track_caller(len, GFP_KERNEL);
96 if (!p)
97 return ERR_PTR(-ENOMEM);
99 if (copy_from_user(p, src, len)) {
100 kfree(p);
101 return ERR_PTR(-EFAULT);
104 return p;
106 EXPORT_SYMBOL(memdup_user);
108 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
109 gfp_t flags)
111 void *ret;
112 size_t ks = 0;
114 if (p)
115 ks = ksize(p);
117 if (ks >= new_size)
118 return (void *)p;
120 ret = kmalloc_track_caller(new_size, flags);
121 if (ret && p)
122 memcpy(ret, p, ks);
124 return ret;
128 * __krealloc - like krealloc() but don't free @p.
129 * @p: object to reallocate memory for.
130 * @new_size: how many bytes of memory are required.
131 * @flags: the type of memory to allocate.
133 * This function is like krealloc() except it never frees the originally
134 * allocated buffer. Use this if you don't want to free the buffer immediately
135 * like, for example, with RCU.
137 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
139 if (unlikely(!new_size))
140 return ZERO_SIZE_PTR;
142 return __do_krealloc(p, new_size, flags);
145 EXPORT_SYMBOL(__krealloc);
148 * krealloc - reallocate memory. The contents will remain unchanged.
149 * @p: object to reallocate memory for.
150 * @new_size: how many bytes of memory are required.
151 * @flags: the type of memory to allocate.
153 * The contents of the object pointed to are preserved up to the
154 * lesser of the new and old sizes. If @p is %NULL, krealloc()
155 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
156 * %NULL pointer, the object pointed to is freed.
158 void *krealloc(const void *p, size_t new_size, gfp_t flags)
160 void *ret;
162 if (unlikely(!new_size)) {
163 kfree(p);
164 return ZERO_SIZE_PTR;
167 ret = __do_krealloc(p, new_size, flags);
168 if (ret && p != ret)
169 kfree(p);
171 return ret;
173 EXPORT_SYMBOL(krealloc);
176 * kzfree - like kfree but zero memory
177 * @p: object to free memory of
179 * The memory of the object @p points to is zeroed before freed.
180 * If @p is %NULL, kzfree() does nothing.
182 * Note: this function zeroes the whole allocated buffer which can be a good
183 * deal bigger than the requested buffer size passed to kmalloc(). So be
184 * careful when using this function in performance sensitive code.
186 void kzfree(const void *p)
188 size_t ks;
189 void *mem = (void *)p;
191 if (unlikely(ZERO_OR_NULL_PTR(mem)))
192 return;
193 ks = ksize(mem);
194 memset(mem, 0, ks);
195 kfree(mem);
197 EXPORT_SYMBOL(kzfree);
200 * strndup_user - duplicate an existing string from user space
201 * @s: The string to duplicate
202 * @n: Maximum number of bytes to copy, including the trailing NUL.
204 char *strndup_user(const char __user *s, long n)
206 char *p;
207 long length;
209 length = strnlen_user(s, n);
211 if (!length)
212 return ERR_PTR(-EFAULT);
214 if (length > n)
215 return ERR_PTR(-EINVAL);
217 p = memdup_user(s, length);
219 if (IS_ERR(p))
220 return p;
222 p[length - 1] = '\0';
224 return p;
226 EXPORT_SYMBOL(strndup_user);
228 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
229 struct vm_area_struct *prev, struct rb_node *rb_parent)
231 struct vm_area_struct *next;
233 vma->vm_prev = prev;
234 if (prev) {
235 next = prev->vm_next;
236 prev->vm_next = vma;
237 } else {
238 mm->mmap = vma;
239 if (rb_parent)
240 next = rb_entry(rb_parent,
241 struct vm_area_struct, vm_rb);
242 else
243 next = NULL;
245 vma->vm_next = next;
246 if (next)
247 next->vm_prev = vma;
250 /* Check if the vma is being used as a stack by this task */
251 static int vm_is_stack_for_task(struct task_struct *t,
252 struct vm_area_struct *vma)
254 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
258 * Check if the vma is being used as a stack.
259 * If is_group is non-zero, check in the entire thread group or else
260 * just check in the current task. Returns the pid of the task that
261 * the vma is stack for.
263 pid_t vm_is_stack(struct task_struct *task,
264 struct vm_area_struct *vma, int in_group)
266 pid_t ret = 0;
268 if (vm_is_stack_for_task(task, vma))
269 return task->pid;
271 if (in_group) {
272 struct task_struct *t;
273 rcu_read_lock();
274 if (!pid_alive(task))
275 goto done;
277 t = task;
278 do {
279 if (vm_is_stack_for_task(t, vma)) {
280 ret = t->pid;
281 goto done;
283 } while_each_thread(task, t);
284 done:
285 rcu_read_unlock();
288 return ret;
291 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
292 void arch_pick_mmap_layout(struct mm_struct *mm)
294 mm->mmap_base = TASK_UNMAPPED_BASE;
295 mm->get_unmapped_area = arch_get_unmapped_area;
296 mm->unmap_area = arch_unmap_area;
298 #endif
301 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
302 * back to the regular GUP.
303 * If the architecture not support this function, simply return with no
304 * page pinned
306 int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
307 int nr_pages, int write, struct page **pages)
309 return 0;
311 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
314 * get_user_pages_fast() - pin user pages in memory
315 * @start: starting user address
316 * @nr_pages: number of pages from start to pin
317 * @write: whether pages will be written to
318 * @pages: array that receives pointers to the pages pinned.
319 * Should be at least nr_pages long.
321 * Returns number of pages pinned. This may be fewer than the number
322 * requested. If nr_pages is 0 or negative, returns 0. If no pages
323 * were pinned, returns -errno.
325 * get_user_pages_fast provides equivalent functionality to get_user_pages,
326 * operating on current and current->mm, with force=0 and vma=NULL. However
327 * unlike get_user_pages, it must be called without mmap_sem held.
329 * get_user_pages_fast may take mmap_sem and page table locks, so no
330 * assumptions can be made about lack of locking. get_user_pages_fast is to be
331 * implemented in a way that is advantageous (vs get_user_pages()) when the
332 * user memory area is already faulted in and present in ptes. However if the
333 * pages have to be faulted in, it may turn out to be slightly slower so
334 * callers need to carefully consider what to use. On many architectures,
335 * get_user_pages_fast simply falls back to get_user_pages.
337 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
338 int nr_pages, int write, struct page **pages)
340 struct mm_struct *mm = current->mm;
341 int ret;
343 down_read(&mm->mmap_sem);
344 ret = get_user_pages(current, mm, start, nr_pages,
345 write, 0, pages, NULL);
346 up_read(&mm->mmap_sem);
348 return ret;
350 EXPORT_SYMBOL_GPL(get_user_pages_fast);
352 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
353 unsigned long len, unsigned long prot,
354 unsigned long flag, unsigned long pgoff)
356 unsigned long ret;
357 struct mm_struct *mm = current->mm;
359 ret = security_mmap_file(file, prot, flag);
360 if (!ret) {
361 down_write(&mm->mmap_sem);
362 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
363 up_write(&mm->mmap_sem);
365 return ret;
368 unsigned long vm_mmap(struct file *file, unsigned long addr,
369 unsigned long len, unsigned long prot,
370 unsigned long flag, unsigned long offset)
372 if (unlikely(offset + PAGE_ALIGN(len) < offset))
373 return -EINVAL;
374 if (unlikely(offset & ~PAGE_MASK))
375 return -EINVAL;
377 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
379 EXPORT_SYMBOL(vm_mmap);
381 /* Tracepoints definitions. */
382 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
383 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
384 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
385 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
386 EXPORT_TRACEPOINT_SYMBOL(kfree);
387 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);