Add bug 18604 to the correct section
[glibc.git] / nptl / tpp.c
blob142c618b13fdbca9c4e00d19a06826457ea26717
1 /* Thread Priority Protect helpers.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
20 #include <assert.h>
21 #include <atomic.h>
22 #include <errno.h>
23 #include <pthreadP.h>
24 #include <sched.h>
25 #include <stdlib.h>
26 #include <atomic.h>
29 int __sched_fifo_min_prio = -1;
30 int __sched_fifo_max_prio = -1;
32 /* We only want to initialize __sched_fifo_min_prio and __sched_fifo_max_prio
33 once. The standard solution would be similar to pthread_once, but then
34 readers would need to use an acquire fence. In this specific case,
35 initialization is comprised of just idempotent writes to two variables
36 that have an initial value of -1. Therefore, we can treat each variable as
37 a separate, at-least-once initialized value. This enables using just
38 relaxed MO loads and stores, but requires that consumers check for
39 initialization of each value that is to be used; see
40 __pthread_tpp_change_priority for an example.
42 void
43 __init_sched_fifo_prio (void)
45 atomic_store_relaxed (&__sched_fifo_max_prio,
46 sched_get_priority_max (SCHED_FIFO));
47 atomic_store_relaxed (&__sched_fifo_min_prio,
48 sched_get_priority_min (SCHED_FIFO));
51 int
52 __pthread_tpp_change_priority (int previous_prio, int new_prio)
54 struct pthread *self = THREAD_SELF;
55 struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp);
56 int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
57 int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);
59 if (tpp == NULL)
61 /* See __init_sched_fifo_prio. We need both the min and max prio,
62 so need to check both, and run initialization if either one is
63 not initialized. The memory model's write-read coherence rule
64 makes this work. */
65 if (fifo_min_prio == -1 || fifo_max_prio == -1)
67 __init_sched_fifo_prio ();
68 fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
69 fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);
72 size_t size = sizeof *tpp;
73 size += (fifo_max_prio - fifo_min_prio + 1)
74 * sizeof (tpp->priomap[0]);
75 tpp = calloc (size, 1);
76 if (tpp == NULL)
77 return ENOMEM;
78 tpp->priomax = fifo_min_prio - 1;
79 THREAD_SETMEM (self, tpp, tpp);
82 assert (new_prio == -1
83 || (new_prio >= fifo_min_prio
84 && new_prio <= fifo_max_prio));
85 assert (previous_prio == -1
86 || (previous_prio >= fifo_min_prio
87 && previous_prio <= fifo_max_prio));
89 int priomax = tpp->priomax;
90 int newpriomax = priomax;
91 if (new_prio != -1)
93 if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0)
94 return EAGAIN;
95 ++tpp->priomap[new_prio - fifo_min_prio];
96 if (new_prio > priomax)
97 newpriomax = new_prio;
100 if (previous_prio != -1)
102 if (--tpp->priomap[previous_prio - fifo_min_prio] == 0
103 && priomax == previous_prio
104 && previous_prio > new_prio)
106 int i;
107 for (i = previous_prio - 1; i >= fifo_min_prio; --i)
108 if (tpp->priomap[i - fifo_min_prio])
109 break;
110 newpriomax = i;
114 if (priomax == newpriomax)
115 return 0;
117 lll_lock (self->lock, LLL_PRIVATE);
119 tpp->priomax = newpriomax;
121 int result = 0;
123 if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
125 if (__sched_getparam (self->tid, &self->schedparam) != 0)
126 result = errno;
127 else
128 self->flags |= ATTR_FLAG_SCHED_SET;
131 if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
133 self->schedpolicy = __sched_getscheduler (self->tid);
134 if (self->schedpolicy == -1)
135 result = errno;
136 else
137 self->flags |= ATTR_FLAG_POLICY_SET;
140 if (result == 0)
142 struct sched_param sp = self->schedparam;
143 if (sp.sched_priority < newpriomax || sp.sched_priority < priomax)
145 if (sp.sched_priority < newpriomax)
146 sp.sched_priority = newpriomax;
148 if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0)
149 result = errno;
153 lll_unlock (self->lock, LLL_PRIVATE);
155 return result;
159 __pthread_current_priority (void)
161 struct pthread *self = THREAD_SELF;
162 if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
163 == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
164 return self->schedparam.sched_priority;
166 int result = 0;
168 lll_lock (self->lock, LLL_PRIVATE);
170 if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
172 if (__sched_getparam (self->tid, &self->schedparam) != 0)
173 result = -1;
174 else
175 self->flags |= ATTR_FLAG_SCHED_SET;
178 if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
180 self->schedpolicy = __sched_getscheduler (self->tid);
181 if (self->schedpolicy == -1)
182 result = -1;
183 else
184 self->flags |= ATTR_FLAG_POLICY_SET;
187 if (result != -1)
188 result = self->schedparam.sched_priority;
190 lll_unlock (self->lock, LLL_PRIVATE);
192 return result;