jffs2: use cond_resched() instead of yield()
[linux-2.6/cjktty.git] / fs / ioprio.c
blob748cfb92dcc6203897c62d87ee113876f197a28b
1 /*
2 * fs/ioprio.c
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
13 * IOW, setting BE scheduling class with prio 2 is done ala:
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
17 * ioprio_set(PRIO_PROCESS, pid, prio);
19 * See also Documentation/block/ioprio.txt
22 #include <linux/gfp.h>
23 #include <linux/kernel.h>
24 #include <linux/ioprio.h>
25 #include <linux/blkdev.h>
26 #include <linux/capability.h>
27 #include <linux/syscalls.h>
28 #include <linux/security.h>
29 #include <linux/pid_namespace.h>
31 int set_task_ioprio(struct task_struct *task, int ioprio)
33 int err;
34 struct io_context *ioc;
35 const struct cred *cred = current_cred(), *tcred;
37 rcu_read_lock();
38 tcred = __task_cred(task);
39 if (tcred->uid != cred->euid &&
40 tcred->uid != cred->uid && !capable(CAP_SYS_NICE)) {
41 rcu_read_unlock();
42 return -EPERM;
44 rcu_read_unlock();
46 err = security_task_setioprio(task, ioprio);
47 if (err)
48 return err;
50 task_lock(task);
51 do {
52 ioc = task->io_context;
53 /* see wmb() in current_io_context() */
54 smp_read_barrier_depends();
55 if (ioc)
56 break;
58 ioc = alloc_io_context(GFP_ATOMIC, -1);
59 if (!ioc) {
60 err = -ENOMEM;
61 break;
63 task->io_context = ioc;
64 } while (1);
66 if (!err) {
67 ioc->ioprio = ioprio;
68 ioc->ioprio_changed = 1;
71 task_unlock(task);
72 return err;
74 EXPORT_SYMBOL_GPL(set_task_ioprio);
76 SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
78 int class = IOPRIO_PRIO_CLASS(ioprio);
79 int data = IOPRIO_PRIO_DATA(ioprio);
80 struct task_struct *p, *g;
81 struct user_struct *user;
82 struct pid *pgrp;
83 int ret;
85 switch (class) {
86 case IOPRIO_CLASS_RT:
87 if (!capable(CAP_SYS_ADMIN))
88 return -EPERM;
89 /* fall through, rt has prio field too */
90 case IOPRIO_CLASS_BE:
91 if (data >= IOPRIO_BE_NR || data < 0)
92 return -EINVAL;
94 break;
95 case IOPRIO_CLASS_IDLE:
96 break;
97 case IOPRIO_CLASS_NONE:
98 if (data)
99 return -EINVAL;
100 break;
101 default:
102 return -EINVAL;
105 ret = -ESRCH;
107 * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic",
108 * so we can't use rcu_read_lock(). See re-copy of ->ioprio
109 * in copy_process().
111 read_lock(&tasklist_lock);
112 switch (which) {
113 case IOPRIO_WHO_PROCESS:
114 if (!who)
115 p = current;
116 else
117 p = find_task_by_vpid(who);
118 if (p)
119 ret = set_task_ioprio(p, ioprio);
120 break;
121 case IOPRIO_WHO_PGRP:
122 if (!who)
123 pgrp = task_pgrp(current);
124 else
125 pgrp = find_vpid(who);
126 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
127 ret = set_task_ioprio(p, ioprio);
128 if (ret)
129 break;
130 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
131 break;
132 case IOPRIO_WHO_USER:
133 if (!who)
134 user = current_user();
135 else
136 user = find_user(who);
138 if (!user)
139 break;
141 do_each_thread(g, p) {
142 if (__task_cred(p)->uid != who)
143 continue;
144 ret = set_task_ioprio(p, ioprio);
145 if (ret)
146 goto free_uid;
147 } while_each_thread(g, p);
148 free_uid:
149 if (who)
150 free_uid(user);
151 break;
152 default:
153 ret = -EINVAL;
156 read_unlock(&tasklist_lock);
157 return ret;
160 static int get_task_ioprio(struct task_struct *p)
162 int ret;
164 ret = security_task_getioprio(p);
165 if (ret)
166 goto out;
167 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
168 if (p->io_context)
169 ret = p->io_context->ioprio;
170 out:
171 return ret;
174 int ioprio_best(unsigned short aprio, unsigned short bprio)
176 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
177 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
179 if (aclass == IOPRIO_CLASS_NONE)
180 aclass = IOPRIO_CLASS_BE;
181 if (bclass == IOPRIO_CLASS_NONE)
182 bclass = IOPRIO_CLASS_BE;
184 if (aclass == bclass)
185 return min(aprio, bprio);
186 if (aclass > bclass)
187 return bprio;
188 else
189 return aprio;
192 SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
194 struct task_struct *g, *p;
195 struct user_struct *user;
196 struct pid *pgrp;
197 int ret = -ESRCH;
198 int tmpio;
200 read_lock(&tasklist_lock);
201 switch (which) {
202 case IOPRIO_WHO_PROCESS:
203 if (!who)
204 p = current;
205 else
206 p = find_task_by_vpid(who);
207 if (p)
208 ret = get_task_ioprio(p);
209 break;
210 case IOPRIO_WHO_PGRP:
211 if (!who)
212 pgrp = task_pgrp(current);
213 else
214 pgrp = find_vpid(who);
215 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
216 tmpio = get_task_ioprio(p);
217 if (tmpio < 0)
218 continue;
219 if (ret == -ESRCH)
220 ret = tmpio;
221 else
222 ret = ioprio_best(ret, tmpio);
223 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
224 break;
225 case IOPRIO_WHO_USER:
226 if (!who)
227 user = current_user();
228 else
229 user = find_user(who);
231 if (!user)
232 break;
234 do_each_thread(g, p) {
235 if (__task_cred(p)->uid != user->uid)
236 continue;
237 tmpio = get_task_ioprio(p);
238 if (tmpio < 0)
239 continue;
240 if (ret == -ESRCH)
241 ret = tmpio;
242 else
243 ret = ioprio_best(ret, tmpio);
244 } while_each_thread(g, p);
246 if (who)
247 free_uid(user);
248 break;
249 default:
250 ret = -EINVAL;
253 read_unlock(&tasklist_lock);
254 return ret;