2 * kernel/sched_cpupri.c
4 * CPU priority management
6 * Copyright (C) 2007-2008 Novell
8 * Author: Gregory Haskins <ghaskins@novell.com>
10 * This code tracks the priority of each CPU so that global migration
11 * decisions are easy to calculate. Each CPU can be in a state as follows:
13 * (INVALID), IDLE, NORMAL, RT1, ... RT99
15 * going from the lowest priority to the highest. CPUs in the INVALID state
16 * are not eligible for routing. The system maintains this state with
17 * a 2 dimensional bitmap (the first for priority class, the second for cpus
18 * in that class). Therefore a typical application without affinity
19 * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
20 * searches). For tasks with affinity restrictions, the algorithm has a
21 * worst case complexity of O(min(102, nr_domcpus)), though the scenario that
22 * yields the worst case search is fairly contrived.
24 * This program is free software; you can redistribute it and/or
25 * modify it under the terms of the GNU General Public License
26 * as published by the Free Software Foundation; version 2
30 #include <linux/gfp.h>
31 #include "sched_cpupri.h"
33 /* Convert between a 140 based task->prio, and our 102 based cpupri */
34 static int convert_prio(int prio
)
38 if (prio
== CPUPRI_INVALID
)
39 cpupri
= CPUPRI_INVALID
;
40 else if (prio
== MAX_PRIO
)
42 else if (prio
>= MAX_RT_PRIO
)
43 cpupri
= CPUPRI_NORMAL
;
45 cpupri
= MAX_RT_PRIO
- prio
+ 1;
50 #define for_each_cpupri_active(array, idx) \
51 for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES)
54 * cpupri_find - find the best (lowest-pri) CPU in the system
55 * @cp: The cpupri context
57 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
59 * Note: This function returns the recommended CPUs as calculated during the
60 * current invocation. By the time the call returns, the CPUs may have in
61 * fact changed priorities any number of times. While not ideal, it is not
62 * an issue of correctness since the normal rebalancer logic will correct
63 * any discrepancies created by racing against the uncertainty of the current
64 * priority configuration.
66 * Returns: (int)bool - CPUs were found
68 int cpupri_find(struct cpupri
*cp
, struct task_struct
*p
,
69 struct cpumask
*lowest_mask
)
72 int task_pri
= convert_prio(p
->prio
);
74 for_each_cpupri_active(cp
->pri_active
, idx
) {
75 struct cpupri_vec
*vec
= &cp
->pri_to_cpu
[idx
];
80 if (cpumask_any_and(&p
->cpus_allowed
, vec
->mask
) >= nr_cpu_ids
)
84 cpumask_and(lowest_mask
, &p
->cpus_allowed
, vec
->mask
);
87 * We have to ensure that we have at least one bit
88 * still set in the array, since the map could have
89 * been concurrently emptied between the first and
90 * second reads of vec->mask. If we hit this
91 * condition, simply act as though we never hit this
92 * priority level and continue on.
94 if (cpumask_any(lowest_mask
) >= nr_cpu_ids
)
105 * cpupri_set - update the cpu priority setting
106 * @cp: The cpupri context
107 * @cpu: The target cpu
108 * @pri: The priority (INVALID-RT99) to assign to this CPU
110 * Note: Assumes cpu_rq(cpu)->lock is locked
114 void cpupri_set(struct cpupri
*cp
, int cpu
, int newpri
)
116 int *currpri
= &cp
->cpu_to_pri
[cpu
];
117 int oldpri
= *currpri
;
120 newpri
= convert_prio(newpri
);
122 BUG_ON(newpri
>= CPUPRI_NR_PRIORITIES
);
124 if (newpri
== oldpri
)
128 * If the cpu was currently mapped to a different value, we
129 * need to map it to the new value then remove the old value.
130 * Note, we must add the new value first, otherwise we risk the
131 * cpu being cleared from pri_active, and this cpu could be
132 * missed for a push or pull.
134 if (likely(newpri
!= CPUPRI_INVALID
)) {
135 struct cpupri_vec
*vec
= &cp
->pri_to_cpu
[newpri
];
137 raw_spin_lock_irqsave(&vec
->lock
, flags
);
139 cpumask_set_cpu(cpu
, vec
->mask
);
142 set_bit(newpri
, cp
->pri_active
);
144 raw_spin_unlock_irqrestore(&vec
->lock
, flags
);
146 if (likely(oldpri
!= CPUPRI_INVALID
)) {
147 struct cpupri_vec
*vec
= &cp
->pri_to_cpu
[oldpri
];
149 raw_spin_lock_irqsave(&vec
->lock
, flags
);
153 clear_bit(oldpri
, cp
->pri_active
);
154 cpumask_clear_cpu(cpu
, vec
->mask
);
156 raw_spin_unlock_irqrestore(&vec
->lock
, flags
);
163 * cpupri_init - initialize the cpupri structure
164 * @cp: The cpupri context
165 * @bootmem: true if allocations need to use bootmem
167 * Returns: -ENOMEM if memory fails.
169 int cpupri_init(struct cpupri
*cp
)
173 memset(cp
, 0, sizeof(*cp
));
175 for (i
= 0; i
< CPUPRI_NR_PRIORITIES
; i
++) {
176 struct cpupri_vec
*vec
= &cp
->pri_to_cpu
[i
];
178 raw_spin_lock_init(&vec
->lock
);
180 if (!zalloc_cpumask_var(&vec
->mask
, GFP_KERNEL
))
184 for_each_possible_cpu(i
)
185 cp
->cpu_to_pri
[i
] = CPUPRI_INVALID
;
189 for (i
--; i
>= 0; i
--)
190 free_cpumask_var(cp
->pri_to_cpu
[i
].mask
);
195 * cpupri_cleanup - clean up the cpupri structure
196 * @cp: The cpupri context
198 void cpupri_cleanup(struct cpupri
*cp
)
202 for (i
= 0; i
< CPUPRI_NR_PRIORITIES
; i
++)
203 free_cpumask_var(cp
->pri_to_cpu
[i
].mask
);