Meaningless ChangeLog cleanup to trigger buildbot.
[glibc.git] / nptl / pthread_getattr_np.c
blob52a4602730b646162d7de3118f8046a58c8fc438
1 /* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <stdio.h>
23 #include <stdio_ext.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/resource.h>
27 #include "pthreadP.h"
28 #include <lowlevellock.h>
29 #include <ldsodefs.h>
32 int
33 pthread_getattr_np (thread_id, attr)
34 pthread_t thread_id;
35 pthread_attr_t *attr;
37 struct pthread *thread = (struct pthread *) thread_id;
38 struct pthread_attr *iattr = (struct pthread_attr *) attr;
39 int ret = 0;
41 lll_lock (thread->lock, LLL_PRIVATE);
43 /* The thread library is responsible for keeping the values in the
44 thread desriptor up-to-date in case the user changes them. */
45 memcpy (&iattr->schedparam, &thread->schedparam,
46 sizeof (struct sched_param));
47 iattr->schedpolicy = thread->schedpolicy;
49 /* Clear the flags work. */
50 iattr->flags = thread->flags;
52 /* The thread might be detached by now. */
53 if (IS_DETACHED (thread))
54 iattr->flags |= ATTR_FLAG_DETACHSTATE;
56 /* This is the guardsize after adjusting it. */
57 iattr->guardsize = thread->reported_guardsize;
59 /* The sizes are subject to alignment. */
60 if (__glibc_likely (thread->stackblock != NULL))
62 iattr->stacksize = thread->stackblock_size;
63 iattr->stackaddr = (char *) thread->stackblock + iattr->stacksize;
65 else
67 /* No stack information available. This must be for the initial
68 thread. Get the info in some magical way. */
69 assert (abs (thread->pid) == thread->tid);
71 /* Stack size limit. */
72 struct rlimit rl;
74 /* The safest way to get the top of the stack is to read
75 /proc/self/maps and locate the line into which
76 __libc_stack_end falls. */
77 FILE *fp = fopen ("/proc/self/maps", "rce");
78 if (fp == NULL)
79 ret = errno;
80 /* We need the limit of the stack in any case. */
81 else
83 if (getrlimit (RLIMIT_STACK, &rl) != 0)
84 ret = errno;
85 else
87 /* We consider the main process stack to have ended with
88 the page containing __libc_stack_end. There is stuff below
89 it in the stack too, like the program arguments, environment
90 variables and auxv info, but we ignore those pages when
91 returning size so that the output is consistent when the
92 stack is marked executable due to a loaded DSO requiring
93 it. */
94 void *stack_end = (void *) ((uintptr_t) __libc_stack_end
95 & -(uintptr_t) GLRO(dl_pagesize));
96 #if _STACK_GROWS_DOWN
97 stack_end += GLRO(dl_pagesize);
98 #endif
99 /* We need no locking. */
100 __fsetlocking (fp, FSETLOCKING_BYCALLER);
102 /* Until we found an entry (which should always be the case)
103 mark the result as a failure. */
104 ret = ENOENT;
106 char *line = NULL;
107 size_t linelen = 0;
108 uintptr_t last_to = 0;
110 while (! feof_unlocked (fp))
112 if (__getdelim (&line, &linelen, '\n', fp) <= 0)
113 break;
115 uintptr_t from;
116 uintptr_t to;
117 if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
118 continue;
119 if (from <= (uintptr_t) __libc_stack_end
120 && (uintptr_t) __libc_stack_end < to)
122 /* Found the entry. Now we have the info we need. */
123 iattr->stackaddr = stack_end;
124 iattr->stacksize =
125 rl.rlim_cur - (size_t) (to - (uintptr_t) stack_end);
127 /* Cut it down to align it to page size since otherwise we
128 risk going beyond rlimit when the kernel rounds up the
129 stack extension request. */
130 iattr->stacksize = (iattr->stacksize
131 & -(intptr_t) GLRO(dl_pagesize));
133 /* The limit might be too high. */
134 if ((size_t) iattr->stacksize
135 > (size_t) iattr->stackaddr - last_to)
136 iattr->stacksize = (size_t) iattr->stackaddr - last_to;
138 /* We succeed and no need to look further. */
139 ret = 0;
140 break;
142 last_to = to;
145 free (line);
148 fclose (fp);
152 iattr->flags |= ATTR_FLAG_STACKADDR;
154 if (ret == 0)
156 size_t size = 16;
157 cpu_set_t *cpuset = NULL;
161 size <<= 1;
163 void *newp = realloc (cpuset, size);
164 if (newp == NULL)
166 ret = ENOMEM;
167 break;
169 cpuset = (cpu_set_t *) newp;
171 ret = __pthread_getaffinity_np (thread_id, size, cpuset);
173 /* Pick some ridiculous upper limit. Is 8 million CPUs enough? */
174 while (ret == EINVAL && size < 1024 * 1024);
176 if (ret == 0)
178 iattr->cpuset = cpuset;
179 iattr->cpusetsize = size;
181 else
183 free (cpuset);
184 if (ret == ENOSYS)
186 /* There is no such functionality. */
187 ret = 0;
188 iattr->cpuset = NULL;
189 iattr->cpusetsize = 0;
194 lll_unlock (thread->lock, LLL_PRIVATE);
196 return ret;