sched: put back some stack hog changes that were undone in kernel/sched.c
[linux-2.6/mini2440.git] / fs / ioprio.c
blob3569e0ad86a2e0ccc96cef6862b93b1e19f7b14e
1 /*
2 * fs/ioprio.c
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
13 * IOW, setting BE scheduling class with prio 2 is done ala:
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
17 * ioprio_set(PRIO_PROCESS, pid, prio);
19 * See also Documentation/block/ioprio.txt
22 #include <linux/kernel.h>
23 #include <linux/ioprio.h>
24 #include <linux/blkdev.h>
25 #include <linux/capability.h>
26 #include <linux/syscalls.h>
27 #include <linux/security.h>
28 #include <linux/pid_namespace.h>
30 static int set_task_ioprio(struct task_struct *task, int ioprio)
32 int err;
33 struct io_context *ioc;
34 const struct cred *cred = current_cred(), *tcred;
36 rcu_read_lock();
37 tcred = __task_cred(task);
38 if (tcred->uid != cred->euid &&
39 tcred->uid != cred->uid && !capable(CAP_SYS_NICE)) {
40 rcu_read_unlock();
41 return -EPERM;
43 rcu_read_unlock();
45 err = security_task_setioprio(task, ioprio);
46 if (err)
47 return err;
49 task_lock(task);
50 do {
51 ioc = task->io_context;
52 /* see wmb() in current_io_context() */
53 smp_read_barrier_depends();
54 if (ioc)
55 break;
57 ioc = alloc_io_context(GFP_ATOMIC, -1);
58 if (!ioc) {
59 err = -ENOMEM;
60 break;
62 task->io_context = ioc;
63 } while (1);
65 if (!err) {
66 ioc->ioprio = ioprio;
67 ioc->ioprio_changed = 1;
70 task_unlock(task);
71 return err;
74 asmlinkage long sys_ioprio_set(int which, int who, int ioprio)
76 int class = IOPRIO_PRIO_CLASS(ioprio);
77 int data = IOPRIO_PRIO_DATA(ioprio);
78 struct task_struct *p, *g;
79 struct user_struct *user;
80 struct pid *pgrp;
81 int ret;
83 switch (class) {
84 case IOPRIO_CLASS_RT:
85 if (!capable(CAP_SYS_ADMIN))
86 return -EPERM;
87 /* fall through, rt has prio field too */
88 case IOPRIO_CLASS_BE:
89 if (data >= IOPRIO_BE_NR || data < 0)
90 return -EINVAL;
92 break;
93 case IOPRIO_CLASS_IDLE:
94 break;
95 case IOPRIO_CLASS_NONE:
96 if (data)
97 return -EINVAL;
98 break;
99 default:
100 return -EINVAL;
103 ret = -ESRCH;
105 * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic",
106 * so we can't use rcu_read_lock(). See re-copy of ->ioprio
107 * in copy_process().
109 read_lock(&tasklist_lock);
110 switch (which) {
111 case IOPRIO_WHO_PROCESS:
112 if (!who)
113 p = current;
114 else
115 p = find_task_by_vpid(who);
116 if (p)
117 ret = set_task_ioprio(p, ioprio);
118 break;
119 case IOPRIO_WHO_PGRP:
120 if (!who)
121 pgrp = task_pgrp(current);
122 else
123 pgrp = find_vpid(who);
124 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
125 ret = set_task_ioprio(p, ioprio);
126 if (ret)
127 break;
128 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
129 break;
130 case IOPRIO_WHO_USER:
131 if (!who)
132 user = current_user();
133 else
134 user = find_user(who);
136 if (!user)
137 break;
139 do_each_thread(g, p) {
140 if (__task_cred(p)->uid != who)
141 continue;
142 ret = set_task_ioprio(p, ioprio);
143 if (ret)
144 goto free_uid;
145 } while_each_thread(g, p);
146 free_uid:
147 if (who)
148 free_uid(user);
149 break;
150 default:
151 ret = -EINVAL;
154 read_unlock(&tasklist_lock);
155 return ret;
158 static int get_task_ioprio(struct task_struct *p)
160 int ret;
162 ret = security_task_getioprio(p);
163 if (ret)
164 goto out;
165 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
166 if (p->io_context)
167 ret = p->io_context->ioprio;
168 out:
169 return ret;
172 int ioprio_best(unsigned short aprio, unsigned short bprio)
174 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
175 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
177 if (aclass == IOPRIO_CLASS_NONE)
178 aclass = IOPRIO_CLASS_BE;
179 if (bclass == IOPRIO_CLASS_NONE)
180 bclass = IOPRIO_CLASS_BE;
182 if (aclass == bclass)
183 return min(aprio, bprio);
184 if (aclass > bclass)
185 return bprio;
186 else
187 return aprio;
190 asmlinkage long sys_ioprio_get(int which, int who)
192 struct task_struct *g, *p;
193 struct user_struct *user;
194 struct pid *pgrp;
195 int ret = -ESRCH;
196 int tmpio;
198 read_lock(&tasklist_lock);
199 switch (which) {
200 case IOPRIO_WHO_PROCESS:
201 if (!who)
202 p = current;
203 else
204 p = find_task_by_vpid(who);
205 if (p)
206 ret = get_task_ioprio(p);
207 break;
208 case IOPRIO_WHO_PGRP:
209 if (!who)
210 pgrp = task_pgrp(current);
211 else
212 pgrp = find_vpid(who);
213 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
214 tmpio = get_task_ioprio(p);
215 if (tmpio < 0)
216 continue;
217 if (ret == -ESRCH)
218 ret = tmpio;
219 else
220 ret = ioprio_best(ret, tmpio);
221 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
222 break;
223 case IOPRIO_WHO_USER:
224 if (!who)
225 user = current_user();
226 else
227 user = find_user(who);
229 if (!user)
230 break;
232 do_each_thread(g, p) {
233 if (__task_cred(p)->uid != user->uid)
234 continue;
235 tmpio = get_task_ioprio(p);
236 if (tmpio < 0)
237 continue;
238 if (ret == -ESRCH)
239 ret = tmpio;
240 else
241 ret = ioprio_best(ret, tmpio);
242 } while_each_thread(g, p);
244 if (who)
245 free_uid(user);
246 break;
247 default:
248 ret = -EINVAL;
251 read_unlock(&tasklist_lock);
252 return ret;