[PATCH] dm mpath: support ioctls
[linux-2.6/mini2440.git] / kernel / taskstats.c
blob5d6a8c54ee85f56f9a3640480d5560ed35f2b4e5
1 /*
2 * taskstats.c - Export per-task statistics to userland
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/delayacct.h>
23 #include <linux/tsacct_kern.h>
24 #include <linux/cpumask.h>
25 #include <linux/percpu.h>
26 #include <net/genetlink.h>
27 #include <asm/atomic.h>
30 * Maximum length of a cpumask that can be specified in
31 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
33 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
35 static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
36 static int family_registered;
37 kmem_cache_t *taskstats_cache;
39 static struct genl_family family = {
40 .id = GENL_ID_GENERATE,
41 .name = TASKSTATS_GENL_NAME,
42 .version = TASKSTATS_GENL_VERSION,
43 .maxattr = TASKSTATS_CMD_ATTR_MAX,
46 static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
47 __read_mostly = {
48 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
49 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
50 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
51 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
53 struct listener {
54 struct list_head list;
55 pid_t pid;
56 char valid;
59 struct listener_list {
60 struct rw_semaphore sem;
61 struct list_head list;
63 static DEFINE_PER_CPU(struct listener_list, listener_array);
65 enum actions {
66 REGISTER,
67 DEREGISTER,
68 CPU_DONT_CARE
71 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
72 void **replyp, size_t size)
74 struct sk_buff *skb;
75 void *reply;
78 * If new attributes are added, please revisit this allocation
80 skb = nlmsg_new(genlmsg_total_size(size), GFP_KERNEL);
81 if (!skb)
82 return -ENOMEM;
84 if (!info) {
85 int seq = get_cpu_var(taskstats_seqnum)++;
86 put_cpu_var(taskstats_seqnum);
88 reply = genlmsg_put(skb, 0, seq,
89 family.id, 0, 0,
90 cmd, family.version);
91 } else
92 reply = genlmsg_put(skb, info->snd_pid, info->snd_seq,
93 family.id, 0, 0,
94 cmd, family.version);
95 if (reply == NULL) {
96 nlmsg_free(skb);
97 return -EINVAL;
100 *skbp = skb;
101 *replyp = reply;
102 return 0;
106 * Send taskstats data in @skb to listener with nl_pid @pid
108 static int send_reply(struct sk_buff *skb, pid_t pid)
110 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
111 void *reply = genlmsg_data(genlhdr);
112 int rc;
114 rc = genlmsg_end(skb, reply);
115 if (rc < 0) {
116 nlmsg_free(skb);
117 return rc;
120 return genlmsg_unicast(skb, pid);
124 * Send taskstats data in @skb to listeners registered for @cpu's exit data
126 static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
128 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
129 struct listener_list *listeners;
130 struct listener *s, *tmp;
131 struct sk_buff *skb_next, *skb_cur = skb;
132 void *reply = genlmsg_data(genlhdr);
133 int rc, delcount = 0;
135 rc = genlmsg_end(skb, reply);
136 if (rc < 0) {
137 nlmsg_free(skb);
138 return;
141 rc = 0;
142 listeners = &per_cpu(listener_array, cpu);
143 down_read(&listeners->sem);
144 list_for_each_entry(s, &listeners->list, list) {
145 skb_next = NULL;
146 if (!list_is_last(&s->list, &listeners->list)) {
147 skb_next = skb_clone(skb_cur, GFP_KERNEL);
148 if (!skb_next)
149 break;
151 rc = genlmsg_unicast(skb_cur, s->pid);
152 if (rc == -ECONNREFUSED) {
153 s->valid = 0;
154 delcount++;
156 skb_cur = skb_next;
158 up_read(&listeners->sem);
160 if (skb_cur)
161 nlmsg_free(skb_cur);
163 if (!delcount)
164 return;
166 /* Delete invalidated entries */
167 down_write(&listeners->sem);
168 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
169 if (!s->valid) {
170 list_del(&s->list);
171 kfree(s);
174 up_write(&listeners->sem);
177 static int fill_pid(pid_t pid, struct task_struct *pidtsk,
178 struct taskstats *stats)
180 int rc = 0;
181 struct task_struct *tsk = pidtsk;
183 if (!pidtsk) {
184 read_lock(&tasklist_lock);
185 tsk = find_task_by_pid(pid);
186 if (!tsk) {
187 read_unlock(&tasklist_lock);
188 return -ESRCH;
190 get_task_struct(tsk);
191 read_unlock(&tasklist_lock);
192 } else
193 get_task_struct(tsk);
196 * Each accounting subsystem adds calls to its functions to
197 * fill in relevant parts of struct taskstsats as follows
199 * per-task-foo(stats, tsk);
202 delayacct_add_tsk(stats, tsk);
204 /* fill in basic acct fields */
205 stats->version = TASKSTATS_VERSION;
206 bacct_add_tsk(stats, tsk);
208 /* fill in extended acct fields */
209 xacct_add_tsk(stats, tsk);
211 /* Define err: label here if needed */
212 put_task_struct(tsk);
213 return rc;
217 static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
218 struct taskstats *stats)
220 struct task_struct *tsk, *first;
221 unsigned long flags;
224 * Add additional stats from live tasks except zombie thread group
225 * leaders who are already counted with the dead tasks
227 first = tgidtsk;
228 if (!first) {
229 read_lock(&tasklist_lock);
230 first = find_task_by_pid(tgid);
231 if (!first) {
232 read_unlock(&tasklist_lock);
233 return -ESRCH;
235 get_task_struct(first);
236 read_unlock(&tasklist_lock);
237 } else
238 get_task_struct(first);
240 /* Start with stats from dead tasks */
241 spin_lock_irqsave(&first->signal->stats_lock, flags);
242 if (first->signal->stats)
243 memcpy(stats, first->signal->stats, sizeof(*stats));
244 spin_unlock_irqrestore(&first->signal->stats_lock, flags);
246 tsk = first;
247 read_lock(&tasklist_lock);
248 do {
249 if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk))
250 continue;
252 * Accounting subsystem can call its functions here to
253 * fill in relevant parts of struct taskstsats as follows
255 * per-task-foo(stats, tsk);
257 delayacct_add_tsk(stats, tsk);
259 } while_each_thread(first, tsk);
260 read_unlock(&tasklist_lock);
261 stats->version = TASKSTATS_VERSION;
264 * Accounting subsytems can also add calls here to modify
265 * fields of taskstats.
268 return 0;
272 static void fill_tgid_exit(struct task_struct *tsk)
274 unsigned long flags;
276 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
277 if (!tsk->signal->stats)
278 goto ret;
281 * Each accounting subsystem calls its functions here to
282 * accumalate its per-task stats for tsk, into the per-tgid structure
284 * per-task-foo(tsk->signal->stats, tsk);
286 delayacct_add_tsk(tsk->signal->stats, tsk);
287 ret:
288 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
289 return;
292 static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
294 struct listener_list *listeners;
295 struct listener *s, *tmp;
296 unsigned int cpu;
297 cpumask_t mask = *maskp;
299 if (!cpus_subset(mask, cpu_possible_map))
300 return -EINVAL;
302 if (isadd == REGISTER) {
303 for_each_cpu_mask(cpu, mask) {
304 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
305 cpu_to_node(cpu));
306 if (!s)
307 goto cleanup;
308 s->pid = pid;
309 INIT_LIST_HEAD(&s->list);
310 s->valid = 1;
312 listeners = &per_cpu(listener_array, cpu);
313 down_write(&listeners->sem);
314 list_add(&s->list, &listeners->list);
315 up_write(&listeners->sem);
317 return 0;
320 /* Deregister or cleanup */
321 cleanup:
322 for_each_cpu_mask(cpu, mask) {
323 listeners = &per_cpu(listener_array, cpu);
324 down_write(&listeners->sem);
325 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
326 if (s->pid == pid) {
327 list_del(&s->list);
328 kfree(s);
329 break;
332 up_write(&listeners->sem);
334 return 0;
337 static int parse(struct nlattr *na, cpumask_t *mask)
339 char *data;
340 int len;
341 int ret;
343 if (na == NULL)
344 return 1;
345 len = nla_len(na);
346 if (len > TASKSTATS_CPUMASK_MAXLEN)
347 return -E2BIG;
348 if (len < 1)
349 return -EINVAL;
350 data = kmalloc(len, GFP_KERNEL);
351 if (!data)
352 return -ENOMEM;
353 nla_strlcpy(data, na, len);
354 ret = cpulist_parse(data, *mask);
355 kfree(data);
356 return ret;
359 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
361 int rc = 0;
362 struct sk_buff *rep_skb;
363 struct taskstats stats;
364 void *reply;
365 size_t size;
366 struct nlattr *na;
367 cpumask_t mask;
369 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
370 if (rc < 0)
371 return rc;
372 if (rc == 0)
373 return add_del_listener(info->snd_pid, &mask, REGISTER);
375 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
376 if (rc < 0)
377 return rc;
378 if (rc == 0)
379 return add_del_listener(info->snd_pid, &mask, DEREGISTER);
382 * Size includes space for nested attributes
384 size = nla_total_size(sizeof(u32)) +
385 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
387 memset(&stats, 0, sizeof(stats));
388 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
389 if (rc < 0)
390 return rc;
392 if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
393 u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
394 rc = fill_pid(pid, NULL, &stats);
395 if (rc < 0)
396 goto err;
398 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
399 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
400 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
401 stats);
402 } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
403 u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
404 rc = fill_tgid(tgid, NULL, &stats);
405 if (rc < 0)
406 goto err;
408 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
409 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
410 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
411 stats);
412 } else {
413 rc = -EINVAL;
414 goto err;
417 nla_nest_end(rep_skb, na);
419 return send_reply(rep_skb, info->snd_pid);
421 nla_put_failure:
422 return genlmsg_cancel(rep_skb, reply);
423 err:
424 nlmsg_free(rep_skb);
425 return rc;
428 void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
430 struct listener_list *listeners;
431 struct taskstats *tmp;
433 * This is the cpu on which the task is exiting currently and will
434 * be the one for which the exit event is sent, even if the cpu
435 * on which this function is running changes later.
437 *mycpu = raw_smp_processor_id();
439 *ptidstats = NULL;
440 tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
441 if (!tmp)
442 return;
444 listeners = &per_cpu(listener_array, *mycpu);
445 down_read(&listeners->sem);
446 if (!list_empty(&listeners->list)) {
447 *ptidstats = tmp;
448 tmp = NULL;
450 up_read(&listeners->sem);
451 kfree(tmp);
454 /* Send pid data out on exit */
455 void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
456 int group_dead, unsigned int mycpu)
458 int rc;
459 struct sk_buff *rep_skb;
460 void *reply;
461 size_t size;
462 int is_thread_group;
463 struct nlattr *na;
464 unsigned long flags;
466 if (!family_registered || !tidstats)
467 return;
469 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
470 is_thread_group = tsk->signal->stats ? 1 : 0;
471 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
473 rc = 0;
475 * Size includes space for nested attributes
477 size = nla_total_size(sizeof(u32)) +
478 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
480 if (is_thread_group)
481 size = 2 * size; /* PID + STATS + TGID + STATS */
483 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
484 if (rc < 0)
485 goto ret;
487 rc = fill_pid(tsk->pid, tsk, tidstats);
488 if (rc < 0)
489 goto err_skb;
491 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
492 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
493 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
494 *tidstats);
495 nla_nest_end(rep_skb, na);
497 if (!is_thread_group)
498 goto send;
501 * tsk has/had a thread group so fill the tsk->signal->stats structure
502 * Doesn't matter if tsk is the leader or the last group member leaving
505 fill_tgid_exit(tsk);
506 if (!group_dead)
507 goto send;
509 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
510 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
511 /* No locking needed for tsk->signal->stats since group is dead */
512 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
513 *tsk->signal->stats);
514 nla_nest_end(rep_skb, na);
516 send:
517 send_cpu_listeners(rep_skb, mycpu);
518 return;
520 nla_put_failure:
521 genlmsg_cancel(rep_skb, reply);
522 goto ret;
523 err_skb:
524 nlmsg_free(rep_skb);
525 ret:
526 return;
529 static struct genl_ops taskstats_ops = {
530 .cmd = TASKSTATS_CMD_GET,
531 .doit = taskstats_user_cmd,
532 .policy = taskstats_cmd_get_policy,
535 /* Needed early in initialization */
536 void __init taskstats_init_early(void)
538 unsigned int i;
540 taskstats_cache = kmem_cache_create("taskstats_cache",
541 sizeof(struct taskstats),
542 0, SLAB_PANIC, NULL, NULL);
543 for_each_possible_cpu(i) {
544 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
545 init_rwsem(&(per_cpu(listener_array, i).sem));
549 static int __init taskstats_init(void)
551 int rc;
553 rc = genl_register_family(&family);
554 if (rc)
555 return rc;
557 rc = genl_register_ops(&family, &taskstats_ops);
558 if (rc < 0)
559 goto err;
561 family_registered = 1;
562 return 0;
563 err:
564 genl_unregister_family(&family);
565 return rc;
569 * late initcall ensures initialization of statistics collection
570 * mechanisms precedes initialization of the taskstats interface
572 late_initcall(taskstats_init);