Make SH ucontext always match current kernels.
[glibc.git] / nptl / pthread_getattr_np.c
blobfb906f0484f18184058b74e397cba92081870398
1 /* Copyright (C) 2002-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <stdio.h>
23 #include <stdio_ext.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/resource.h>
27 #include "pthreadP.h"
28 #include <lowlevellock.h>
29 #include <ldsodefs.h>
32 int
33 pthread_getattr_np (pthread_t thread_id, pthread_attr_t *attr)
35 struct pthread *thread = (struct pthread *) thread_id;
36 struct pthread_attr *iattr = (struct pthread_attr *) attr;
37 int ret = 0;
39 lll_lock (thread->lock, LLL_PRIVATE);
41 /* The thread library is responsible for keeping the values in the
42 thread desriptor up-to-date in case the user changes them. */
43 memcpy (&iattr->schedparam, &thread->schedparam,
44 sizeof (struct sched_param));
45 iattr->schedpolicy = thread->schedpolicy;
47 /* Clear the flags work. */
48 iattr->flags = thread->flags;
50 /* The thread might be detached by now. */
51 if (IS_DETACHED (thread))
52 iattr->flags |= ATTR_FLAG_DETACHSTATE;
54 /* This is the guardsize after adjusting it. */
55 iattr->guardsize = thread->reported_guardsize;
57 /* The sizes are subject to alignment. */
58 if (__glibc_likely (thread->stackblock != NULL))
60 iattr->stacksize = thread->stackblock_size;
61 #if _STACK_GROWS_DOWN
62 iattr->stackaddr = (char *) thread->stackblock + iattr->stacksize;
63 #else
64 iattr->stackaddr = (char *) thread->stackblock;
65 #endif
67 else
69 /* No stack information available. This must be for the initial
70 thread. Get the info in some magical way. */
71 assert (abs (thread->pid) == thread->tid);
73 /* Stack size limit. */
74 struct rlimit rl;
76 /* The safest way to get the top of the stack is to read
77 /proc/self/maps and locate the line into which
78 __libc_stack_end falls. */
79 FILE *fp = fopen ("/proc/self/maps", "rce");
80 if (fp == NULL)
81 ret = errno;
82 /* We need the limit of the stack in any case. */
83 else
85 if (getrlimit (RLIMIT_STACK, &rl) != 0)
86 ret = errno;
87 else
89 /* We consider the main process stack to have ended with
90 the page containing __libc_stack_end. There is stuff below
91 it in the stack too, like the program arguments, environment
92 variables and auxv info, but we ignore those pages when
93 returning size so that the output is consistent when the
94 stack is marked executable due to a loaded DSO requiring
95 it. */
96 void *stack_end = (void *) ((uintptr_t) __libc_stack_end
97 & -(uintptr_t) GLRO(dl_pagesize));
98 #if _STACK_GROWS_DOWN
99 stack_end += GLRO(dl_pagesize);
100 #endif
101 /* We need no locking. */
102 __fsetlocking (fp, FSETLOCKING_BYCALLER);
104 /* Until we found an entry (which should always be the case)
105 mark the result as a failure. */
106 ret = ENOENT;
108 char *line = NULL;
109 size_t linelen = 0;
110 #if _STACK_GROWS_DOWN
111 uintptr_t last_to = 0;
112 #endif
114 while (! feof_unlocked (fp))
116 if (__getdelim (&line, &linelen, '\n', fp) <= 0)
117 break;
119 uintptr_t from;
120 uintptr_t to;
121 if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
122 continue;
123 if (from <= (uintptr_t) __libc_stack_end
124 && (uintptr_t) __libc_stack_end < to)
126 /* Found the entry. Now we have the info we need. */
127 iattr->stackaddr = stack_end;
128 iattr->stacksize =
129 rl.rlim_cur - (size_t) (to - (uintptr_t) stack_end);
131 /* Cut it down to align it to page size since otherwise we
132 risk going beyond rlimit when the kernel rounds up the
133 stack extension request. */
134 iattr->stacksize = (iattr->stacksize
135 & -(intptr_t) GLRO(dl_pagesize));
136 #if _STACK_GROWS_DOWN
137 /* The limit might be too high. */
138 if ((size_t) iattr->stacksize
139 > (size_t) iattr->stackaddr - last_to)
140 iattr->stacksize = (size_t) iattr->stackaddr - last_to;
141 #else
142 /* The limit might be too high. */
143 if ((size_t) iattr->stacksize
144 > to - (size_t) iattr->stackaddr)
145 iattr->stacksize = to - (size_t) iattr->stackaddr;
146 #endif
147 /* We succeed and no need to look further. */
148 ret = 0;
149 break;
151 #if _STACK_GROWS_DOWN
152 last_to = to;
153 #endif
156 free (line);
159 fclose (fp);
163 iattr->flags |= ATTR_FLAG_STACKADDR;
165 if (ret == 0)
167 size_t size = 16;
168 cpu_set_t *cpuset = NULL;
172 size <<= 1;
174 void *newp = realloc (cpuset, size);
175 if (newp == NULL)
177 ret = ENOMEM;
178 break;
180 cpuset = (cpu_set_t *) newp;
182 ret = __pthread_getaffinity_np (thread_id, size, cpuset);
184 /* Pick some ridiculous upper limit. Is 8 million CPUs enough? */
185 while (ret == EINVAL && size < 1024 * 1024);
187 if (ret == 0)
189 iattr->cpuset = cpuset;
190 iattr->cpusetsize = size;
192 else
194 free (cpuset);
195 if (ret == ENOSYS)
197 /* There is no such functionality. */
198 ret = 0;
199 iattr->cpuset = NULL;
200 iattr->cpusetsize = 0;
205 lll_unlock (thread->lock, LLL_PRIVATE);
207 return ret;