Further harden glibc malloc metadata against 1-byte overflows.
[glibc.git] / nptl / pthread_getattr_np.c
blob06093b3d9270c2fcaa5af191852c9eca25dd506f
1 /* Copyright (C) 2002-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <stdio.h>
23 #include <stdio_ext.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/resource.h>
27 #include "pthreadP.h"
28 #include <lowlevellock.h>
29 #include <ldsodefs.h>
32 int
33 pthread_getattr_np (pthread_t thread_id, pthread_attr_t *attr)
35 struct pthread *thread = (struct pthread *) thread_id;
36 struct pthread_attr *iattr = (struct pthread_attr *) attr;
37 int ret = 0;
39 lll_lock (thread->lock, LLL_PRIVATE);
41 /* The thread library is responsible for keeping the values in the
42 thread desriptor up-to-date in case the user changes them. */
43 memcpy (&iattr->schedparam, &thread->schedparam,
44 sizeof (struct sched_param));
45 iattr->schedpolicy = thread->schedpolicy;
47 /* Clear the flags work. */
48 iattr->flags = thread->flags;
50 /* The thread might be detached by now. */
51 if (IS_DETACHED (thread))
52 iattr->flags |= ATTR_FLAG_DETACHSTATE;
54 /* This is the guardsize after adjusting it. */
55 iattr->guardsize = thread->reported_guardsize;
57 /* The sizes are subject to alignment. */
58 if (__glibc_likely (thread->stackblock != NULL))
60 iattr->stacksize = thread->stackblock_size;
61 #if _STACK_GROWS_DOWN
62 iattr->stackaddr = (char *) thread->stackblock + iattr->stacksize;
63 #else
64 iattr->stackaddr = (char *) thread->stackblock;
65 #endif
67 else
69 /* No stack information available. This must be for the initial
70 thread. Get the info in some magical way. */
72 /* Stack size limit. */
73 struct rlimit rl;
75 /* The safest way to get the top of the stack is to read
76 /proc/self/maps and locate the line into which
77 __libc_stack_end falls. */
78 FILE *fp = fopen ("/proc/self/maps", "rce");
79 if (fp == NULL)
80 ret = errno;
81 /* We need the limit of the stack in any case. */
82 else
84 if (getrlimit (RLIMIT_STACK, &rl) != 0)
85 ret = errno;
86 else
88 /* We consider the main process stack to have ended with
89 the page containing __libc_stack_end. There is stuff below
90 it in the stack too, like the program arguments, environment
91 variables and auxv info, but we ignore those pages when
92 returning size so that the output is consistent when the
93 stack is marked executable due to a loaded DSO requiring
94 it. */
95 void *stack_end = (void *) ((uintptr_t) __libc_stack_end
96 & -(uintptr_t) GLRO(dl_pagesize));
97 #if _STACK_GROWS_DOWN
98 stack_end += GLRO(dl_pagesize);
99 #endif
100 /* We need no locking. */
101 __fsetlocking (fp, FSETLOCKING_BYCALLER);
103 /* Until we found an entry (which should always be the case)
104 mark the result as a failure. */
105 ret = ENOENT;
107 char *line = NULL;
108 size_t linelen = 0;
109 #if _STACK_GROWS_DOWN
110 uintptr_t last_to = 0;
111 #endif
113 while (! feof_unlocked (fp))
115 if (__getdelim (&line, &linelen, '\n', fp) <= 0)
116 break;
118 uintptr_t from;
119 uintptr_t to;
120 if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
121 continue;
122 if (from <= (uintptr_t) __libc_stack_end
123 && (uintptr_t) __libc_stack_end < to)
125 /* Found the entry. Now we have the info we need. */
126 iattr->stackaddr = stack_end;
127 iattr->stacksize =
128 rl.rlim_cur - (size_t) (to - (uintptr_t) stack_end);
130 /* Cut it down to align it to page size since otherwise we
131 risk going beyond rlimit when the kernel rounds up the
132 stack extension request. */
133 iattr->stacksize = (iattr->stacksize
134 & -(intptr_t) GLRO(dl_pagesize));
135 #if _STACK_GROWS_DOWN
136 /* The limit might be too high. */
137 if ((size_t) iattr->stacksize
138 > (size_t) iattr->stackaddr - last_to)
139 iattr->stacksize = (size_t) iattr->stackaddr - last_to;
140 #else
141 /* The limit might be too high. */
142 if ((size_t) iattr->stacksize
143 > to - (size_t) iattr->stackaddr)
144 iattr->stacksize = to - (size_t) iattr->stackaddr;
145 #endif
146 /* We succeed and no need to look further. */
147 ret = 0;
148 break;
150 #if _STACK_GROWS_DOWN
151 last_to = to;
152 #endif
155 free (line);
158 fclose (fp);
162 iattr->flags |= ATTR_FLAG_STACKADDR;
164 if (ret == 0)
166 size_t size = 16;
167 cpu_set_t *cpuset = NULL;
171 size <<= 1;
173 void *newp = realloc (cpuset, size);
174 if (newp == NULL)
176 ret = ENOMEM;
177 break;
179 cpuset = (cpu_set_t *) newp;
181 ret = __pthread_getaffinity_np (thread_id, size, cpuset);
183 /* Pick some ridiculous upper limit. Is 8 million CPUs enough? */
184 while (ret == EINVAL && size < 1024 * 1024);
186 if (ret == 0)
188 iattr->cpuset = cpuset;
189 iattr->cpusetsize = size;
191 else
193 free (cpuset);
194 if (ret == ENOSYS)
196 /* There is no such functionality. */
197 ret = 0;
198 iattr->cpuset = NULL;
199 iattr->cpusetsize = 0;
204 lll_unlock (thread->lock, LLL_PRIVATE);
206 return ret;