Optimize for kernels which are known to have the vfork syscall.
[glibc/pb-stable.git] / nptl / allocatestack.c
blobeabc68517a33857496907bc316745dd9d05723d0
1 /* Copyright (C) 2002 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <stdint.h>
23 #include <string.h>
24 #include <unistd.h>
25 #include <sys/mman.h>
26 #include <sys/param.h>
27 #include <dl-sysdep.h>
28 #include <tls.h>
33 /* Most architectures have exactly one stack pointer. Some have more. */
34 #define STACK_VARIABLES void *stackaddr
36 /* How to pass the values to the 'create_thread' function. */
37 #define STACK_VARIABLES_ARGS stackaddr
39 /* How to declare function which gets there parameters. */
40 #define STACK_VARIABLES_PARMS void *stackaddr
43 /* Default alignment of stack. */
44 #ifndef STACK_ALIGN
45 # define STACK_ALIGN __alignof__ (long double)
46 #endif
48 /* Default value for minimal stack size after allocating thread
49 descriptor and guard. */
50 #ifndef MINIMAL_REST_STACK
51 # define MINIMAL_REST_STACK 4096
52 #endif
57 /* Cache handling for not-yet free stacks. */
59 /* Maximum size in kB of cache. */
60 static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
61 static size_t stack_cache_actsize;
63 /* Mutex protecting this variable. */
64 static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER;
66 /* List of queued stack frames. */
67 static LIST_HEAD (stack_cache);
69 /* List of the stacks in use. */
70 static LIST_HEAD (stack_used);
72 /* List of the threads with user provided stacks in use. No need to
73 initialize this, since it's done in __pthread_initialize_minimal. */
74 list_t __stack_user __attribute__ ((nocommon));
75 hidden_def (__stack_user)
77 /* Number of threads running. */
78 static unsigned int nptl_nthreads = 1;
81 /* Check whether the stack is still used or not. */
82 #define FREE_P(descr) ((descr)->tid == 0)
85 /* We create a double linked list of all cache entries. Double linked
86 because this allows removing entries from the end. */
89 /* Get a stack frame from the cache. We have to match by size since
90 some blocks might be too small or far too large. */
91 static struct pthread *
92 get_cached_stack (size_t *sizep, void **memp)
94 size_t size = *sizep;
95 struct pthread *result = NULL;
96 list_t *entry;
98 lll_lock (stack_cache_lock);
100 /* Search the cache for a matching entry. We search for the
101 smallest stack which has at least the required size. Note that
102 in normal situations the size of all allocated stacks is the
103 same. As the very least there are only a few different sizes.
104 Therefore this loop will exit early most of the time with an
105 exact match. */
106 list_for_each (entry, &stack_cache)
108 struct pthread *curr;
110 curr = list_entry (entry, struct pthread, header.data.list);
111 if (FREE_P (curr) && curr->stackblock_size >= size)
113 if (curr->stackblock_size == size)
115 result = curr;
116 break;
119 if (result == NULL
120 || result->stackblock_size > curr->stackblock_size)
121 result = curr;
125 if (__builtin_expect (result == NULL, 0)
126 /* Make sure the size difference is not too excessive. In that
127 case we do not use the block. */
128 || __builtin_expect (result->stackblock_size > 4 * size, 0))
130 /* Release the lock. */
131 lll_unlock (stack_cache_lock);
133 return NULL;
136 /* Dequeue the entry. */
137 list_del (&result->header.data.list);
139 /* And add to the list of stacks in use. */
140 list_add (&result->header.data.list, &stack_used);
142 /* One more thread. */
143 ++nptl_nthreads;
145 /* And decrease the cache size. */
146 stack_cache_actsize -= result->stackblock_size;
148 /* Release the lock early. */
149 lll_unlock (stack_cache_lock);
152 *sizep = result->stackblock_size;
153 *memp = result->stackblock;
155 /* Cancellation handling is back to the default. */
156 result->cancelhandling = 0;
157 result->cleanup = NULL;
159 /* No pending event. */
160 result->nextevent = NULL;
162 /* Clear the DTV. */
163 dtv_t *dtv = GET_DTV (result);
164 memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
166 /* Re-initialize the TLS. */
167 return _dl_allocate_tls_init (result);
171 /* Add a stack frame which is not used anymore to the stack. Must be
172 called with the cache lock held. */
173 static void
174 queue_stack (struct pthread *stack)
176 /* We unconditionally add the stack to the list. The memory may
177 still be in use but it will not be reused until the kernel marks
178 the stack as not used anymore. */
179 list_add (&stack->header.data.list, &stack_cache);
181 stack_cache_actsize += stack->stackblock_size;
182 if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
184 /* We reduce the size of the cache. Remove the last entries
185 until the size is below the limit. */
186 list_t *entry;
187 list_t *prev;
189 /* Search from the end of the list. */
190 list_for_each_prev_safe (entry, prev, &stack_cache)
192 struct pthread *curr;
194 curr = list_entry(entry, struct pthread, header.data.list);
195 if (FREE_P (curr))
197 /* Unlink the block. */
198 list_del (entry);
200 /* Account for the freed memory. */
201 stack_cache_actsize -= curr->stackblock_size;
203 /* Free the memory associated with the ELF TLS. */
204 _dl_deallocate_tls (curr, false);
206 /* Remove this block. This should never fail. If it
207 does something is really wrong. */
208 if (munmap (curr->stackblock, curr->stackblock_size) != 0)
209 abort ();
211 /* Maybe we have freed enough. */
212 if (stack_cache_actsize <= stack_cache_maxsize)
213 break;
221 static int
222 allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
223 void **stack)
225 struct pthread *pd;
226 size_t size;
227 size_t pagesize = __sysconf (_SC_PAGESIZE);
229 assert (attr != NULL);
230 assert (powerof2 (pagesize));
231 assert (TCB_ALIGNMENT >= STACK_ALIGN);
233 /* Get the stack size from the attribute if it is set. Otherwise we
234 use the default we determined at start time. */
235 size = attr->stacksize ?: __default_stacksize;
237 /* Get memory for the stack. */
238 if (__builtin_expect (attr->flags & ATTR_FLAG_STACKADDR, 0))
240 uintptr_t adj;
242 /* If the user also specified the size of the stack make sure it
243 is large enough. */
244 if (attr->stacksize != 0
245 && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK))
246 return EINVAL;
248 /* Adjust stack size for alignment of the TLS block. */
249 adj = ((uintptr_t) attr->stackaddr) & (__static_tls_align - 1);
250 assert (size > adj);
252 /* The user provided some memory. Let's hope it matches the
253 size... We do not allocate guard pages if the user provided
254 the stack. It is the user's responsibility to do this if it
255 is wanted. */
256 pd = (struct pthread *) (((uintptr_t) attr->stackaddr - adj)
257 & ~(__alignof (struct pthread) - 1)) - 1;
259 /* The user provided stack memory need not be cleared. */
260 memset (pd, '\0', sizeof (struct pthread));
262 /* The first TSD block is included in the TCB. */
263 pd->specific[0] = pd->specific_1stblock;
265 /* Initialize the lock. */
266 pd->lock = LLL_LOCK_INITIALIZER;
268 /* Remember the stack-related values. Signal that this stack
269 must not be put into the stack cache. */
270 pd->stackblock = (char *) attr->stackaddr - size;
271 pd->stackblock_size = size - adj;
273 /* This is a user-provided stack. */
274 pd->user_stack = true;
276 /* There is at least one more thread. */
277 pd->header.data.multiple_threads = 1;
279 #ifdef NEED_DL_SYSINFO
280 /* Copy the sysinfo value from the parent. */
281 pd->header.data.sysinfo
282 = THREAD_GETMEM (THREAD_SELF, header.data.sysinfo);
283 #endif
285 /* Allocate the DTV for this thread. */
286 if (_dl_allocate_tls (pd) == NULL)
287 /* Something went wrong. */
288 return errno;
291 lll_lock (stack_cache_lock);
293 /* And add to the list of stacks in use. */
294 list_add (&pd->header.data.list, &__stack_user);
296 /* One more thread. */
297 ++nptl_nthreads;
299 lll_unlock (stack_cache_lock);
301 else
303 /* Allocate some anonymous memory. If possible use the
304 cache. */
305 size_t guardsize;
306 size_t reqsize;
307 void *mem;
309 /* Adjust the stack size for alignment. */
310 size &= ~(__static_tls_align - 1);
311 assert (size != 0);
313 /* Make sure the size of the stack is enough for the guard and
314 eventually the thread descriptor. */
315 guardsize = (attr->guardsize + pagesize - 1) & ~(pagesize - 1);
316 if (__builtin_expect (size < (guardsize + __static_tls_size
317 + MINIMAL_REST_STACK), 0))
318 /* The stack is too small (or the guard too large). */
319 return EINVAL;
321 reqsize = size;
322 pd = get_cached_stack (&size, &mem);
323 if (pd == NULL)
325 mem = mmap (NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC,
326 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
328 if (__builtin_expect (mem == MAP_FAILED, 0))
329 return errno;
331 /* 'size' is guaranteed to be greater than zero. So we can
332 never get a NULL pointer back from MMAP. */
333 assert (mem != NULL);
335 /* Place the thread descriptor at the end of the stack. */
336 pd = (struct pthread *) ((char *) mem + size) - 1;
338 /* Remember the stack-related values. */
339 pd->stackblock = mem;
340 pd->stackblock_size = size;
342 /* We allocated the first block thread-specific data array.
343 This address will not change for the lifetime of this
344 descriptor. */
345 pd->specific[0] = pd->specific_1stblock;
347 /* Initialize the lock. */
348 pd->lock = LLL_LOCK_INITIALIZER;
350 /* There is at least one more thread. */
351 pd->header.data.multiple_threads = 1;
353 #ifdef NEED_DL_SYSINFO
354 /* Copy the sysinfo value from the parent. */
355 pd->header.data.sysinfo
356 = THREAD_GETMEM (THREAD_SELF, header.data.sysinfo);
357 #endif
359 /* Allocate the DTV for this thread. */
360 if (_dl_allocate_tls (pd) == NULL)
362 /* Something went wrong. */
363 int err = errno;
365 /* Free the stack memory we just allocated. */
366 munmap (mem, size);
368 return err;
372 lll_lock (stack_cache_lock);
374 /* And add to the list of stacks in use. */
375 list_add (&pd->header.data.list, &stack_used);
377 /* One more thread. */
378 ++nptl_nthreads;
380 lll_unlock (stack_cache_lock);
383 /* Note that all of the stack and the thread descriptor is
384 zeroed. This means we do not have to initialize fields
385 with initial value zero. This is specifically true for
386 the 'tid' field which is always set back to zero once the
387 stack is not used anymore and for the 'guardsize' field
388 which will be read next. */
391 /* Create or resize the guard area if necessary. */
392 if (__builtin_expect (guardsize > pd->guardsize, 0))
394 if (mprotect (mem, guardsize, PROT_NONE) != 0)
396 int err;
397 mprot_error:
398 err = errno;
400 lll_lock (stack_cache_lock);
402 /* Remove the thread from the list. */
403 list_del (&pd->header.data.list);
405 /* The thread is gone. */
406 --nptl_nthreads;
408 lll_unlock (stack_cache_lock);
410 /* Free the memory regardless of whether the size of the
411 cache is over the limit or not. If this piece of
412 memory caused problems we better do not use it
413 anymore. Uh, and we ignore possible errors. There
414 is nothing we could do. */
415 (void) munmap (mem, size);
417 return err;
420 pd->guardsize = guardsize;
422 else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
425 /* The old guard area is too large. */
426 if (mprotect ((char *) mem + guardsize,
427 pd->guardsize - guardsize,
428 PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
429 goto mprot_error;
431 pd->guardsize = guardsize;
435 /* We place the thread descriptor at the end of the stack. */
436 *pdp = pd;
438 #if TLS_TCB_AT_TP
439 /* The stack begin before the TCB and the static TLS block. */
440 *stack = ((char *) (pd + 1) - __static_tls_size);
441 #else
442 # error "Implement me"
443 #endif
445 return 0;
448 /* This is how the function is called. We do it this way to allow
449 other variants of the function to have more parameters. */
450 #define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
453 void
454 __deallocate_stack (struct pthread *pd)
456 lll_lock (stack_cache_lock);
458 /* Remove the thread from the list of threads with user defined
459 stacks. */
460 list_del (&pd->header.data.list);
462 /* Not much to do. Just free the mmap()ed memory. Note that we do
463 not reset the 'used' flag in the 'tid' field. This is done by
464 the kernel. If no thread has been created yet this field is
465 still zero. */
466 if (__builtin_expect (! pd->user_stack, 1))
467 (void) queue_stack (pd);
468 else
469 /* Free the memory associated with the ELF TLS. */
470 _dl_deallocate_tls (pd, false);
472 /* One less thread. */
473 --nptl_nthreads;
475 lll_unlock (stack_cache_lock);
479 /* In case of a fork() call the memory allocation in the child will be
480 the same but only one thread is running. All stacks except that of
481 the one running thread are not used anymore. We have to recycle
482 them. */
483 void
484 __reclaim_stacks (void)
486 struct pthread *self = (struct pthread *) THREAD_SELF;
488 /* No locking necessary. The caller is the only stack in use. */
490 /* Mark all stacks except the still running one as free. */
491 list_t *runp;
492 list_for_each (runp, &stack_used)
494 struct pthread *curp;
496 curp = list_entry (runp, struct pthread, header.data.list);
497 if (curp != self)
499 /* This marks the stack as free. */
500 curp->tid = 0;
502 /* Account for the size of the stack. */
503 stack_cache_actsize += curp->stackblock_size;
507 /* Add the stack of all running threads to the cache. */
508 list_splice (&stack_used, &stack_cache);
510 /* Remove the entry for the current thread to from the cache list
511 and add it to the list of running threads. Which of the two
512 lists is decided by the user_stack flag. */
513 list_del (&self->header.data.list);
515 /* Re-initialize the lists for all the threads. */
516 INIT_LIST_HEAD (&stack_used);
517 INIT_LIST_HEAD (&__stack_user);
519 if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0))
520 list_add (&self->header.data.list, &__stack_user);
521 else
522 list_add (&self->header.data.list, &stack_used);
524 /* There is one thread running. */
525 nptl_nthreads = 1;
527 /* Initialize the lock. */
528 stack_cache_lock = LLL_LOCK_INITIALIZER;