KVM: x86: In DM_LOWEST, only deliver interrupts to vcpus with enabled LAPIC's
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / mm / util.c
blobf5712e8964be8a8da2c521361dee71c123689428
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/sched.h>
7 #include <asm/uaccess.h>
9 #define CREATE_TRACE_POINTS
10 #include <trace/events/kmem.h>
12 /**
13 * kstrdup - allocate space for and copy an existing string
14 * @s: the string to duplicate
15 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
17 char *kstrdup(const char *s, gfp_t gfp)
19 size_t len;
20 char *buf;
22 if (!s)
23 return NULL;
25 len = strlen(s) + 1;
26 buf = kmalloc_track_caller(len, gfp);
27 if (buf)
28 memcpy(buf, s, len);
29 return buf;
31 EXPORT_SYMBOL(kstrdup);
33 /**
34 * kstrndup - allocate space for and copy an existing string
35 * @s: the string to duplicate
36 * @max: read at most @max chars from @s
37 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
39 char *kstrndup(const char *s, size_t max, gfp_t gfp)
41 size_t len;
42 char *buf;
44 if (!s)
45 return NULL;
47 len = strnlen(s, max);
48 buf = kmalloc_track_caller(len+1, gfp);
49 if (buf) {
50 memcpy(buf, s, len);
51 buf[len] = '\0';
53 return buf;
55 EXPORT_SYMBOL(kstrndup);
57 /**
58 * kmemdup - duplicate region of memory
60 * @src: memory region to duplicate
61 * @len: memory region length
62 * @gfp: GFP mask to use
64 void *kmemdup(const void *src, size_t len, gfp_t gfp)
66 void *p;
68 p = kmalloc_track_caller(len, gfp);
69 if (p)
70 memcpy(p, src, len);
71 return p;
73 EXPORT_SYMBOL(kmemdup);
75 /**
76 * memdup_user - duplicate memory region from user space
78 * @src: source address in user space
79 * @len: number of bytes to copy
81 * Returns an ERR_PTR() on failure.
83 void *memdup_user(const void __user *src, size_t len)
85 void *p;
88 * Always use GFP_KERNEL, since copy_from_user() can sleep and
89 * cause pagefault, which makes it pointless to use GFP_NOFS
90 * or GFP_ATOMIC.
92 p = kmalloc_track_caller(len, GFP_KERNEL);
93 if (!p)
94 return ERR_PTR(-ENOMEM);
96 if (copy_from_user(p, src, len)) {
97 kfree(p);
98 return ERR_PTR(-EFAULT);
101 return p;
103 EXPORT_SYMBOL(memdup_user);
106 * __krealloc - like krealloc() but don't free @p.
107 * @p: object to reallocate memory for.
108 * @new_size: how many bytes of memory are required.
109 * @flags: the type of memory to allocate.
111 * This function is like krealloc() except it never frees the originally
112 * allocated buffer. Use this if you don't want to free the buffer immediately
113 * like, for example, with RCU.
115 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
117 void *ret;
118 size_t ks = 0;
120 if (unlikely(!new_size))
121 return ZERO_SIZE_PTR;
123 if (p)
124 ks = ksize(p);
126 if (ks >= new_size)
127 return (void *)p;
129 ret = kmalloc_track_caller(new_size, flags);
130 if (ret && p)
131 memcpy(ret, p, ks);
133 return ret;
135 EXPORT_SYMBOL(__krealloc);
138 * krealloc - reallocate memory. The contents will remain unchanged.
139 * @p: object to reallocate memory for.
140 * @new_size: how many bytes of memory are required.
141 * @flags: the type of memory to allocate.
143 * The contents of the object pointed to are preserved up to the
144 * lesser of the new and old sizes. If @p is %NULL, krealloc()
145 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
146 * %NULL pointer, the object pointed to is freed.
148 void *krealloc(const void *p, size_t new_size, gfp_t flags)
150 void *ret;
152 if (unlikely(!new_size)) {
153 kfree(p);
154 return ZERO_SIZE_PTR;
157 ret = __krealloc(p, new_size, flags);
158 if (ret && p != ret)
159 kfree(p);
161 return ret;
163 EXPORT_SYMBOL(krealloc);
166 * kzfree - like kfree but zero memory
167 * @p: object to free memory of
169 * The memory of the object @p points to is zeroed before freed.
170 * If @p is %NULL, kzfree() does nothing.
172 * Note: this function zeroes the whole allocated buffer which can be a good
173 * deal bigger than the requested buffer size passed to kmalloc(). So be
174 * careful when using this function in performance sensitive code.
176 void kzfree(const void *p)
178 size_t ks;
179 void *mem = (void *)p;
181 if (unlikely(ZERO_OR_NULL_PTR(mem)))
182 return;
183 ks = ksize(mem);
184 memset(mem, 0, ks);
185 kfree(mem);
187 EXPORT_SYMBOL(kzfree);
189 int kern_ptr_validate(const void *ptr, unsigned long size)
191 unsigned long addr = (unsigned long)ptr;
192 unsigned long min_addr = PAGE_OFFSET;
193 unsigned long align_mask = sizeof(void *) - 1;
195 if (unlikely(addr < min_addr))
196 goto out;
197 if (unlikely(addr > (unsigned long)high_memory - size))
198 goto out;
199 if (unlikely(addr & align_mask))
200 goto out;
201 if (unlikely(!kern_addr_valid(addr)))
202 goto out;
203 if (unlikely(!kern_addr_valid(addr + size - 1)))
204 goto out;
205 return 1;
206 out:
207 return 0;
211 * strndup_user - duplicate an existing string from user space
212 * @s: The string to duplicate
213 * @n: Maximum number of bytes to copy, including the trailing NUL.
215 char *strndup_user(const char __user *s, long n)
217 char *p;
218 long length;
220 length = strnlen_user(s, n);
222 if (!length)
223 return ERR_PTR(-EFAULT);
225 if (length > n)
226 return ERR_PTR(-EINVAL);
228 p = kmalloc(length, GFP_KERNEL);
230 if (!p)
231 return ERR_PTR(-ENOMEM);
233 if (copy_from_user(p, s, length)) {
234 kfree(p);
235 return ERR_PTR(-EFAULT);
238 p[length - 1] = '\0';
240 return p;
242 EXPORT_SYMBOL(strndup_user);
244 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
245 void arch_pick_mmap_layout(struct mm_struct *mm)
247 mm->mmap_base = TASK_UNMAPPED_BASE;
248 mm->get_unmapped_area = arch_get_unmapped_area;
249 mm->unmap_area = arch_unmap_area;
251 #endif
254 * get_user_pages_fast() - pin user pages in memory
255 * @start: starting user address
256 * @nr_pages: number of pages from start to pin
257 * @write: whether pages will be written to
258 * @pages: array that receives pointers to the pages pinned.
259 * Should be at least nr_pages long.
261 * Returns number of pages pinned. This may be fewer than the number
262 * requested. If nr_pages is 0 or negative, returns 0. If no pages
263 * were pinned, returns -errno.
265 * get_user_pages_fast provides equivalent functionality to get_user_pages,
266 * operating on current and current->mm, with force=0 and vma=NULL. However
267 * unlike get_user_pages, it must be called without mmap_sem held.
269 * get_user_pages_fast may take mmap_sem and page table locks, so no
270 * assumptions can be made about lack of locking. get_user_pages_fast is to be
271 * implemented in a way that is advantageous (vs get_user_pages()) when the
272 * user memory area is already faulted in and present in ptes. However if the
273 * pages have to be faulted in, it may turn out to be slightly slower so
274 * callers need to carefully consider what to use. On many architectures,
275 * get_user_pages_fast simply falls back to get_user_pages.
277 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
278 int nr_pages, int write, struct page **pages)
280 struct mm_struct *mm = current->mm;
281 int ret;
283 down_read(&mm->mmap_sem);
284 ret = get_user_pages(current, mm, start, nr_pages,
285 write, 0, pages, NULL);
286 up_read(&mm->mmap_sem);
288 return ret;
290 EXPORT_SYMBOL_GPL(get_user_pages_fast);
292 /* Tracepoints definitions. */
293 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
294 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
295 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
296 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
297 EXPORT_TRACEPOINT_SYMBOL(kfree);
298 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);